Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350

351

352

353

354

355

356

357

358

359

360

361

362

363

364

365

366

367

368

369

370

371

372

373

374

375

376

377

378

379

380

381

382

383

384

385

386

387

388

389

390

391

392

393

394

395

396

397

398

399

400

401

402

403

404

405

406

407

408

409

410

411

412

413

414

415

416

417

418

419

420

421

422

423

424

425

426

427

428

429

430

431

432

433

434

435

436

437

438

439

440

441

442

443

444

445

446

447

448

449

450

451

452

453

454

455

456

457

458

459

460

461

462

463

464

465

466

467

468

469

470

471

472

473

474

475

476

477

478

479

480

481

482

483

484

485

486

487

488

489

490

491

492

493

494

495

496

497

498

499

500

501

502

503

504

505

506

507

508

509

510

511

512

513

514

515

516

517

518

519

520

521

522

523

524

525

526

527

528

529

530

531

532

533

534

535

536

537

538

539

540

541

542

543

544

545

546

547

548

549

550

551

552

553

554

555

556

557

558

559

560

561

562

563

564

565

566

567

568

569

570

571

572

573

574

575

576

577

578

579

580

581

582

583

584

585

586

587

588

589

590

591

592

593

594

595

596

597

598

599

600

601

602

603

604

605

606

607

608

609

610

611

612

613

614

615

616

617

618

619

620

621

622

623

624

625

626

627

628

629

630

631

632

633

634

635

636

637

638

639

640

641

642

643

644

645

646

647

648

649

650

651

652

653

654

655

656

657

658

659

660

661

662

663

664

665

666

667

668

669

670

671

672

673

674

675

676

677

678

679

680

681

682

683

684

685

686

687

688

689

690

691

692

693

694

695

696

697

698

699

700

701

702

703

704

705

706

707

708

709

710

711

712

713

714

715

716

717

718

719

720

721

722

723

724

725

726

727

728

729

730

731

732

733

734

735

736

737

738

739

740

741

742

743

744

745

746

747

748

749

750

751

752

753

754

755

756

757

758

759

760

761

762

763

764

765

766

767

768

769

770

771

772

773

774

775

776

777

778

779

780

781

782

783

784

785

786

787

788

789

790

791

792

793

794

795

796

797

798

799

800

801

802

803

804

805

806

807

808

809

# This file is part of pipe_base. 

# 

# Developed for the LSST Data Management System. 

# This product includes software developed by the LSST Project 

# (http://www.lsst.org). 

# See the COPYRIGHT file at the top-level directory of this distribution 

# for details of code ownership. 

# 

# This program is free software: you can redistribute it and/or modify 

# it under the terms of the GNU General Public License as published by 

# the Free Software Foundation, either version 3 of the License, or 

# (at your option) any later version. 

# 

# This program is distributed in the hope that it will be useful, 

# but WITHOUT ANY WARRANTY; without even the implied warranty of 

# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

# GNU General Public License for more details. 

# 

# You should have received a copy of the GNU General Public License 

# along with this program. If not, see <http://www.gnu.org/licenses/>. 

from __future__ import annotations 

 

"""Module defining GraphBuilder class and related methods. 

""" 

 

__all__ = ['GraphBuilder'] 

 

# ------------------------------- 

# Imports of standard modules -- 

# ------------------------------- 

import copy 

import itertools 

from collections import ChainMap 

from dataclasses import dataclass 

from typing import Set, List, Dict, Optional, Iterable 

import logging 

 

# ----------------------------- 

# Imports for other modules -- 

# ----------------------------- 

from .pipeline import PipelineDatasetTypes, TaskDatasetTypes, Pipeline, TaskDef 

from .graph import QuantumGraph, QuantumGraphTaskNodes 

from lsst.daf.butler import Quantum, DatasetRef, DimensionGraph, DataId, DimensionUniverse, DatasetType 

from lsst.daf.butler.core.utils import NamedKeyDict 

from lsst.daf.butler.sql import DataIdQueryBuilder, SingleDatasetQueryBuilder 

 

# ---------------------------------- 

# Local non-exported definitions -- 

# ---------------------------------- 

 

_LOG = logging.getLogger(__name__.partition(".")[2]) 

 

 

@dataclass 

class _DatasetScaffolding: 

"""Helper class aggregating information about a `DatasetType`, used when 

constructing a `QuantumGraph`. 

 

`_DatasetScaffolding` does not hold the `DatasetType` instance itself 

because it is usually used as the value type in `_DatasetScaffoldingDict`, 

which uses `DatasetType` instances as keys. 

 

See `_PipelineScaffolding` for a top-down description of the full 

scaffolding data structure. 

 

Parameters 

---------- 

dimensions : `DimensionGraph` 

Dimensions of the `DatasetType`, expanded to include implied 

dependencies. 

""" 

def __init__(self, dimensions: DimensionGraph): 

self.dimensions = dimensions 

self.producer = None 

self.consumers = {} 

self.dataIds = set() 

self.refs = [] 

 

__slots__ = ("dimensions", "producer", "consumers", "dataIds", "refs") 

 

dimensions: DimensionGraph 

"""The dimensions of the dataset type, expanded to included implied 

dependencies. 

 

Set during `_PipelineScaffolding` construction. 

""" 

 

producer: Optional[_TaskScaffolding] 

"""The scaffolding objects for the Task that produces this dataset. 

 

Set during `_PipelineScaffolding` construction. 

""" 

 

consumers: Dict[str, _TaskScaffolding] 

"""The scaffolding objects for the Tasks that consume this dataset, 

keyed by their label in the `Pipeline`. 

 

Set during `_PipelineScaffolding` construction. 

""" 

 

dataIds: Set[DataId] 

"""Data IDs for all instances of this dataset type in the graph. 

 

These data IDs cover the full set of implied-expanded dimensions (i.e. 

the `dimensions` attribute of this instance), which is a supserset of the 

dimensions used in `DatasetRef` instances (e.g. in ``refs``). 

 

Populated after construction by `_PipelineScaffolding.fillDataIds`. 

""" 

 

refs: List[DatasetRef] 

"""References for all instances of this dataset type in the graph. 

 

Populated after construction by `_PipelineScaffolding.fillDatasetRefs`. 

""" 

 

 

class _DatasetScaffoldingDict(NamedKeyDict): 

"""Custom dictionary that maps `DatasetType` to `_DatasetScaffolding`. 

 

See `_PipelineScaffolding` for a top-down description of the full 

scaffolding data structure. 

 

Parameters 

---------- 

args 

Positional arguments are forwarded to the `dict` constructor. 

universe : `DimensionUniverse` 

Universe of all possible dimensions. 

""" 

def __init__(self, *args, universe: DimensionGraph): 

super().__init__(*args) 

self.universe = universe 

 

@classmethod 

def fromDatasetTypes(cls, datasetTypes: Iterable[DatasetType], *, 

universe: DimensionUniverse) -> _DatasetScaffoldingDict: 

"""Construct a a dictionary from a flat iterable of `DatasetType` keys. 

 

Parameters 

---------- 

datasetTypes : `iterable` of `DatasetType` 

DatasetTypes to use as keys for the dict. Values will be 

constructed from the dimensions of the keys. 

universe : `DimensionUniverse` 

Universe of all possible dimensions. 

 

Returns 

------- 

dictionary : `_DatasetScaffoldingDict` 

A new dictionary instance. 

""" 

return cls(((datasetType, _DatasetScaffolding(datasetType.dimensions.implied(only=False))) 

for datasetType in datasetTypes), 

universe=universe) 

 

@classmethod 

def fromSubset(cls, datasetTypes: Iterable[DatasetType], first: _DatasetScaffoldingDict, 

*rest) -> _DatasetScaffoldingDict: 

"""Return a new dictionary by extracting items corresponding to the 

given keys from one or more existing dictionaries. 

 

Parameters 

---------- 

datasetTypes : `iterable` of `DatasetType` 

DatasetTypes to use as keys for the dict. Values will be obtained 

by lookups against ``first`` and ``rest``. 

first : `_DatasetScaffoldingDict` 

Another dictionary from which to extract values. 

rest 

Additional dictionaries from which to extract values. 

 

Returns 

------- 

dictionary : `_DatasetScaffoldingDict` 

A new dictionary instance. 

""" 

combined = ChainMap(first, *rest) 

return cls(((datasetType, combined[datasetType]) for datasetType in datasetTypes), 

universe=first.universe) 

 

@property 

def dimensions(self) -> DimensionGraph: 

"""The union of all dimensions used by all dataset types in this 

dictionary, including implied dependencies (`DimensionGraph`). 

""" 

base = self.universe.empty 

if len(self) == 0: 

return base 

return base.union(*(scaffolding.dimensions for scaffolding in self.values()), implied=True) 

 

def unpackRefs(self) -> NamedKeyDict: 

"""Unpack nested single-element `DatasetRef` lists into a new 

dictionary. 

 

This method assumes that each `_DatasetScaffolding.refs` list contains 

exactly one `DatasetRef`, as is the case for all "init" datasets. 

 

Returns 

------- 

dictionary : `NamedKeyDict` 

Dictionary mapping `DatasetType` to `DatasetRef`, with both 

`DatasetType` instances and string names usable as keys. 

""" 

return NamedKeyDict((datasetType, scaffolding.refs[0]) for datasetType, scaffolding in self.items()) 

 

 

@dataclass 

class _TaskScaffolding: 

"""Helper class aggregating information about a `PipelineTask`, used when 

constructing a `QuantumGraph`. 

 

See `_PipelineScaffolding` for a top-down description of the full 

scaffolding data structure. 

 

Parameters 

---------- 

taskDef : `TaskDef` 

Data structure that identifies the task class and its config. 

parent : `_PipelineScaffolding` 

The parent data structure that will hold the instance being 

constructed. 

datasetTypes : `TaskDatasetTypes` 

Data structure that categorizes the dataset types used by this task. 

 

Raises 

------ 

GraphBuilderError 

Raised if the task's dimensions are not a subset of the union of the 

pipeline's dataset dimensions. 

""" 

def __init__(self, taskDef: TaskDef, parent: _PipelineScaffolding, datasetTypes: TaskDatasetTypes): 

universe = parent.dimensions.universe 

self.taskDef = taskDef 

self.dimensions = universe.extract(taskDef.connections.dimensions, implied=True) 

if not self.dimensions.issubset(parent.dimensions): 

raise GraphBuilderError(f"Task with label '{taskDef.label}' has dimensions " 

f"{self.dimensions.toSet()} that are not a subset of " 

f"the pipeline dimensions {parent.dimensions.toSet()}.") 

# Initialize _DatasetScaffoldingDicts as subsets of the one or two 

# corresponding dicts in the parent _PipelineScaffolding. 

self.initInputs = _DatasetScaffoldingDict.fromSubset(datasetTypes.initInputs, 

parent.initInputs, parent.initIntermediates) 

self.initOutputs = _DatasetScaffoldingDict.fromSubset(datasetTypes.initOutputs, 

parent.initIntermediates, parent.initOutputs) 

self.inputs = _DatasetScaffoldingDict.fromSubset(datasetTypes.inputs, 

parent.inputs, parent.intermediates) 

self.outputs = _DatasetScaffoldingDict.fromSubset(datasetTypes.outputs, 

parent.intermediates, parent.outputs) 

self.prerequisites = _DatasetScaffoldingDict.fromSubset(datasetTypes.prerequisites, 

parent.prerequisites) 

# Add backreferences to the _DatasetScaffolding objects that point to 

# this Task. 

for dataset in itertools.chain(self.initInputs.values(), self.inputs.values(), 

self.prerequisites.values()): 

dataset.consumers[self.taskDef.label] = self 

for dataset in itertools.chain(self.initOutputs.values(), self.outputs.values()): 

assert dataset.producer is None 

dataset.producer = self 

self.dataIds = set() 

self.quanta = [] 

 

taskDef: TaskDef 

"""Data structure that identifies the task class and its config 

(`TaskDef`). 

""" 

 

dimensions: DimensionGraph 

"""The dimensions of a single `Quantum` of this task, expanded to include 

implied dependencies (`DimensionGraph`). 

""" 

 

initInputs: _DatasetScaffoldingDict 

"""Dictionary containing information about datasets used to construct this 

task (`_DatasetScaffoldingDict`). 

""" 

 

initOutputs: _DatasetScaffoldingDict 

"""Dictionary containing information about datasets produced as a 

side-effect of constructing this task (`_DatasetScaffoldingDict`). 

""" 

 

inputs: _DatasetScaffoldingDict 

"""Dictionary containing information about datasets used as regular, 

graph-constraining inputs to this task (`_DatasetScaffoldingDict`). 

""" 

 

outputs: _DatasetScaffoldingDict 

"""Dictionary containing information about datasets produced by this task 

(`_DatasetScaffoldingDict`). 

""" 

 

prerequisites: _DatasetScaffoldingDict 

"""Dictionary containing information about input datasets that must be 

present in the repository before any Pipeline containing this task is run 

(`_DatasetScaffoldingDict`). 

""" 

 

dataIds: Set[DataId] 

"""Data IDs for all quanta for this task in the graph (`set` of `DataId`). 

 

Populated after construction by `_PipelineScaffolding.fillDataIds`. 

""" 

 

quanta: List[Quantum] 

"""All quanta for this task in the graph (`list` of `Quantum`). 

 

Populated after construction by `_PipelineScaffolding.fillQuanta`. 

""" 

 

def addQuantum(self, quantum: Quantum): 

config = self.taskDef.config 

connectionClass = config.connections.ConnectionsClass 

connectionInstance = connectionClass(config=config) 

# This will raise if one of the check conditions is not met, which is the intended 

# behavior 

result = connectionInstance.adjustQuantum(quantum.predictedInputs) 

quantum._predictedInputs = NamedKeyDict(result) 

 

# If this function has reached this far add the quantum 

self.quanta.append(quantum) 

 

def makeQuantumGraphTaskNodes(self) -> QuantumGraphTaskNodes: 

"""Create a `QuantumGraphTaskNodes` instance from the information in 

``self``. 

 

Returns 

------- 

nodes : `QuantumGraphTaskNodes` 

The `QuantumGraph` elements corresponding to this task. 

""" 

return QuantumGraphTaskNodes( 

taskDef=self.taskDef, 

quanta=self.quanta, 

initInputs=self.initInputs.unpackRefs(), 

initOutputs=self.initOutputs.unpackRefs(), 

) 

 

 

@dataclass 

class _PipelineScaffolding: 

"""A helper data structure that organizes the information involved in 

constructing a `QuantumGraph` for a `Pipeline`. 

 

Parameters 

---------- 

pipeline : `Pipeline` 

Sequence of tasks from which a graph is to be constructed. Must 

have nested task classes already imported. 

universe : `DimensionUniverse` 

Universe of all possible dimensions. 

 

Raises 

------ 

GraphBuilderError 

Raised if the task's dimensions are not a subset of the union of the 

pipeline's dataset dimensions. 

 

Notes 

----- 

The scaffolding data structure contains nested data structures for both 

tasks (`_TaskScaffolding`) and datasets (`_DatasetScaffolding`), with the 

latter held by `_DatasetScaffoldingDict`. The dataset data structures are 

shared between the pipeline-level structure (which aggregates all datasets 

and categorizes them from the perspective of the complete pipeline) and the 

individual tasks that use them as inputs and outputs. 

 

`QuantumGraph` construction proceeds in five steps, with each corresponding 

to a different `_PipelineScaffolding` method: 

 

1. When `_PipelineScaffolding` is constructed, we extract and categorize 

the DatasetTypes used by the pipeline (delegating to 

`PipelineDatasetTypes.fromPipeline`), then use these to construct the 

nested `_TaskScaffolding` and `_DatasetScaffolding` objects. 

 

2. In `fillDataIds`, we construct and run the "Big Join Query", which 

returns related tuples of all dimensions used to identify any regular 

input, output, and intermediate datasets (not prerequisites). We then 

iterate over these tuples of related dimensions, identifying the subsets 

that correspond to distinct data IDs for each task and dataset type. 

 

3. In `fillDatasetRefs`, we run follow-up queries against all of the 

dataset data IDs previously identified, populating the 

`_DatasetScaffolding.refs` lists - except for those for prerequisite 

datasets, which cannot be resolved until distinct quanta are 

identified. 

 

4. In `fillQuanta`, we extract subsets from the lists of `DatasetRef` into 

the inputs and outputs for each `Quantum` and search for prerequisite 

datasets, populating `_TaskScaffolding.quanta`. 

 

5. In `makeQuantumGraph`, we construct a `QuantumGraph` from the lists of 

per-task quanta identified in the previous step. 

""" 

def __init__(self, pipeline, *, universe): 

self.tasks = [] 

# Aggregate and categorize the DatasetTypes in the Pipeline. 

datasetTypes = PipelineDatasetTypes.fromPipeline(pipeline, universe=universe) 

# Construct dictionaries that map those DatasetTypes to structures 

# that will (later) hold addiitonal information about them. 

for attr in ("initInputs", "initIntermediates", "initOutputs", 

"inputs", "intermediates", "outputs", "prerequisites"): 

setattr(self, attr, _DatasetScaffoldingDict.fromDatasetTypes(getattr(datasetTypes, attr), 

universe=universe)) 

# Aggregate all dimensions for all non-init, non-prerequisite 

# DatasetTypes. These are the ones we'll include in the big join query. 

self.dimensions = self.inputs.dimensions.union(self.inputs.dimensions, 

self.intermediates.dimensions, 

self.outputs.dimensions, implied=True) 

# Construct scaffolding nodes for each Task, and add backreferences 

# to the Task from each DatasetScaffolding node. 

# Note that there's only one scaffolding node for each DatasetType, shared by 

# _PipelineScaffolding and all _TaskScaffoldings that reference it. 

self.tasks = [_TaskScaffolding(taskDef=taskDef, parent=self, datasetTypes=taskDatasetTypes) 

for taskDef, taskDatasetTypes in zip(pipeline, datasetTypes.byTask.values())] 

 

tasks: List[_TaskScaffolding] 

"""Scaffolding data structures for each task in the pipeline 

(`list` of `_TaskScaffolding`). 

""" 

 

initInputs: _DatasetScaffoldingDict 

"""Datasets consumed but not produced when constructing the tasks in this 

pipeline (`_DatasetScaffoldingDict`). 

""" 

 

initIntermediates: _DatasetScaffoldingDict 

"""Datasets that are both consumed and produced when constructing the tasks 

in this pipeline (`_DatasetScaffoldingDict`). 

""" 

 

initOutputs: _DatasetScaffoldingDict 

"""Datasets produced but not consumed when constructing the tasks in this 

pipeline (`_DatasetScaffoldingDict`). 

""" 

 

inputs: _DatasetScaffoldingDict 

"""Datasets that are consumed but not produced when running this pipeline 

(`_DatasetScaffoldingDict`). 

""" 

 

intermediates: _DatasetScaffoldingDict 

"""Datasets that are both produced and consumed when running this pipeline 

(`_DatasetScaffoldingDict`). 

""" 

 

outputs: _DatasetScaffoldingDict 

"""Datasets produced but not consumed when when running this pipeline 

(`_DatasetScaffoldingDict`). 

""" 

 

prerequisites: _DatasetScaffoldingDict 

"""Datasets that are consumed when running this pipeline and looked up 

per-Quantum when generating the graph (`_DatasetScaffoldingDict`). 

""" 

 

dimensions: DimensionGraph 

"""All dimensions used by any regular input, intermediate, or output 

(not prerequisite) dataset; the set of dimension used in the "Big Join 

Query" (`DimensionGraph`). 

 

This is required to be a superset of all task quantum dimensions. 

""" 

 

def fillDataIds(self, registry, originInfo, userQuery): 

"""Query for the data IDs that connect nodes in the `QuantumGraph`. 

 

This method populates `_TaskScaffolding.dataIds` and 

`_DatasetScaffolding.dataIds` (except for those in `prerequisites`). 

 

Parameters 

---------- 

registry : `lsst.daf.butler.Registry` 

Registry for the data repository; used for all data ID queries. 

originInfo : `lsst.daf.butler.DatasetOriginInfo` 

Object holding the input and output collections for each 

`DatasetType`. 

userQuery : `str`, optional 

User-provided expression to limit the data IDs processed. 

""" 

# Initialization datasets always have empty data IDs. 

emptyDataId = DataId(dimensions=registry.dimensions.empty) 

for scaffolding in itertools.chain(self.initInputs.values(), 

self.initIntermediates.values(), 

self.initOutputs.values()): 

scaffolding.dataIds.add(emptyDataId) 

# We'll run one big query for the data IDs for task dimensions and 

# regular input and outputs. 

query = DataIdQueryBuilder.fromDimensions(registry, self.dimensions) 

# Limit the query to only dimensions that are associated with the input 

# dataset types. 

for datasetType in self.inputs: 

query.requireDataset(datasetType, originInfo.getInputCollections(datasetType.name)) 

# Add the user expression, if any 

if userQuery: 

query.whereParsedExpression(userQuery) 

# Execute the query and populate the data IDs in self 

# _TaskScaffolding.refs, extracting the subsets of the common data ID 

# from the query corresponding to the dimensions of each. By using 

# sets, we remove duplicates caused by query rows in which the 

# dimensions that change are not relevant for that task or dataset 

# type. For example, if the Big Join Query involves the dimensions 

# (instrument, visit, detector, skymap, tract, patch), we extract 

# "calexp" data IDs from the instrument, visit, and detector values 

# only, and rely on `set.add` to avoid duplications due to result rows 

# in which only skymap, tract, and patch are varying. 

# The Big Join Query is defined such that only visit+detector and 

# tract+patch combinations that represent spatial overlaps are included 

# in the results. 

for commonDataId in query.execute(): 

for taskScaffolding in self.tasks: 

dataId = DataId(commonDataId, dimensions=taskScaffolding.dimensions) 

taskScaffolding.dataIds.add(dataId) 

for datasetType, scaffolding in itertools.chain(self.inputs.items(), 

self.intermediates.items(), 

self.outputs.items()): 

dataId = DataId(commonDataId, dimensions=scaffolding.dimensions) 

scaffolding.dataIds.add(dataId) 

 

def fillDatasetRefs(self, registry, originInfo, *, skipExisting=True, clobberExisting=False): 

"""Perform follow up queries for each dataset data ID produced in 

`fillDataIds`. 

 

This method populates `_DatasetScaffolding.refs` (except for those in 

`prerequisites`). 

 

Parameters 

---------- 

registry : `lsst.daf.butler.Registry` 

Registry for the data repository; used for all data ID queries. 

originInfo : `lsst.daf.butler.DatasetOriginInfo` 

Object holding the input and output collections for each 

`DatasetType`. 

skipExisting : `bool`, optional 

If `True` (default), a Quantum is not created if all its outputs 

already exist. 

clobberExisting : `bool`, optional 

If `True`, overwrite any outputs that already exist. Cannot be 

`True` if ``skipExisting`` is. 

 

Raises 

------ 

ValueError 

Raised if both `skipExisting` and `clobberExisting` are `True`. 

OutputExistsError 

Raised if an output dataset already exists in the output collection 

and both ``skipExisting`` and ``clobberExisting`` are `False`. The 

case where some but not all of a quantum's outputs are present and 

``skipExisting`` is `True` cannot be identified at this stage, and 

is handled by `fillQuanta` instead. 

""" 

if clobberExisting and skipExisting: 

raise ValueError("clobberExisting and skipExisting cannot both be true.") 

# Look up input and initInput datasets in the input collection(s). 

for datasetType, scaffolding in itertools.chain(self.initInputs.items(), self.inputs.items()): 

for dataId in scaffolding.dataIds: 

# TODO: we only need to use SingleDatasetQueryBuilder here because 

# it provides multi-collection search support. There should be a 

# way to do that directly with Registry, and it should probably 

# operate by just doing an unordered collection search and 

# resolving the order in Python. 

builder = SingleDatasetQueryBuilder.fromCollections( 

registry, datasetType, 

collections=originInfo.getInputCollections(datasetType.name) 

) 

builder.whereDataId(dataId) 

ref = builder.executeOne(expandDataId=True) 

if ref is None: 

# Data IDs have been expanded to include implied 

# dimensions, which is not what we want for the DatasetRef. 

# Constructing a new DataID shrinks them back down. 

ref = DatasetRef(datasetType, DataId(dataId, dimensions=datasetType.dimensions)) 

scaffolding.refs.append(ref) 

# Look up [init] intermediate and output datasets in the output collection, 

# unless clobberExisting is True (in which case we don't care if these 

# already exist). 

for datasetType, scaffolding in itertools.chain(self.initIntermediates.items(), 

self.initOutputs.items(), 

self.intermediates.items(), 

self.outputs.items()): 

collection = originInfo.getOutputCollection(datasetType.name) 

for dataId in scaffolding.dataIds: 

# TODO: we could easily support per-DatasetType clobberExisting 

# and skipExisting (it might make sense to put them in 

# originInfo), and I could imagine that being useful - it's 

# probably required in order to support writing initOutputs 

# before QuantumGraph generation. 

if clobberExisting: 

ref = None 

else: 

ref = registry.find(collection=collection, datasetType=datasetType, dataId=dataId) 

if ref is None: 

# data IDs have been expanded to include implied dimensions, 

# which is not what we want for the DatasetRef. 

ref = DatasetRef(datasetType, DataId(dataId, dimensions=datasetType.dimensions)) 

elif not skipExisting: 

raise OutputExistsError(f"Output dataset {datasetType.name} already exists in " 

f"output collection {collection} with data ID {dataId}.") 

scaffolding.refs.append(ref) 

# Prerequisite dataset lookups are deferred until fillQuanta. 

 

def fillQuanta(self, registry, originInfo, *, skipExisting=True): 

"""Define quanta for each task by splitting up the datasets associated 

with each task data ID. 

 

This method populates `_TaskScaffolding.quanta`. 

 

Parameters 

---------- 

registry : `lsst.daf.butler.Registry` 

Registry for the data repository; used for all data ID queries. 

originInfo : `lsst.daf.butler.DatasetOriginInfo` 

Object holding the input and output collections for each 

`DatasetType`. 

skipExisting : `bool`, optional 

If `True` (default), a Quantum is not created if all its outputs 

already exist. 

""" 

for task in self.tasks: 

for quantumDataId in task.dataIds: 

# Identify the (regular) inputs that correspond to the Quantum 

# with this data ID. These are those whose data IDs have the 

# same values for all dimensions they have in common. 

# We do this data IDs expanded to include implied dimensions, 

# which is why _DatasetScaffolding.dimensions is thus expanded 

# even though DatasetType.dimensions is not. 

inputs = NamedKeyDict() 

for datasetType, scaffolding in task.inputs.items(): 

inputs[datasetType] = [ref for ref, dataId in zip(scaffolding.refs, scaffolding.dataIds) 

if quantumDataId.matches(dataId)] 

# Same for outputs. 

outputs = NamedKeyDict() 

allOutputsPresent = True 

for datasetType, scaffolding in task.outputs.items(): 

outputs[datasetType] = [] 

for ref, dataId in zip(scaffolding.refs, scaffolding.dataIds): 

if quantumDataId.matches(dataId): 

if ref.id is None: 

allOutputsPresent = False 

else: 

assert skipExisting, "Existing outputs should have already been identified." 

if not allOutputsPresent: 

raise OutputExistsError(f"Output {datasetType.name} with data ID " 

f"{dataId} already exists, but other outputs " 

f"for task with label {task.taskDef.label} " 

f"and data ID {quantumDataId} do not.") 

outputs[datasetType].append(ref) 

if allOutputsPresent and skipExisting: 

continue 

 

# Look up prerequisite datasets in the input collection(s). 

# These may have dimensions that extend beyond those we queried 

# for originally, because we want to permit those data ID 

# values to differ across quanta and dataset types. 

# For example, the same quantum may have a flat and bias with 

# a different calibration_label, or a refcat with a skypix 

# value that overlaps the quantum's data ID's region, but not 

# the user expression used for the initial query. 

for datasetType, scaffolding in task.prerequisites.items(): 

builder = SingleDatasetQueryBuilder.fromCollections( 

registry, datasetType, 

collections=originInfo.getInputCollections(datasetType.name) 

) 

if not datasetType.dimensions.issubset(quantumDataId.dimensions()): 

builder.relateDimensions(quantumDataId.dimensions(), addResultColumns=False) 

builder.whereDataId(quantumDataId) 

refs = list(builder.execute(expandDataId=True)) 

if len(refs) == 0: 

raise PrerequisiteMissingError( 

f"No instances of prerequisite dataset {datasetType.name} found for task " 

f"with label {task.taskDef.label} and quantum data ID {quantumDataId}." 

) 

inputs[datasetType] = refs 

task.addQuantum( 

Quantum( 

taskName=task.taskDef.taskName, 

taskClass=task.taskDef.taskClass, 

dataId=quantumDataId, 

initInputs=task.initInputs.unpackRefs(), 

predictedInputs=inputs, 

outputs=outputs, 

) 

) 

 

def makeQuantumGraph(self): 

"""Create a `QuantumGraph` from the quanta already present in 

the scaffolding data structure. 

""" 

graph = QuantumGraph(task.makeQuantumGraphTaskNodes() for task in self.tasks) 

graph.initInputs = self.initInputs.unpackRefs() 

graph.initOutputs = self.initOutputs.unpackRefs() 

graph.initIntermediates = self.initIntermediates.unpackRefs() 

return graph 

 

 

# ------------------------ 

# Exported definitions -- 

# ------------------------ 

 

 

class GraphBuilderError(Exception): 

"""Base class for exceptions generated by graph builder. 

""" 

pass 

 

 

class OutputExistsError(GraphBuilderError): 

"""Exception generated when output datasets already exist. 

""" 

pass 

 

 

class PrerequisiteMissingError(GraphBuilderError): 

"""Exception generated when a prerequisite dataset does not exist. 

""" 

pass 

 

 

class GraphBuilder(object): 

"""GraphBuilder class is responsible for building task execution graph from 

a Pipeline. 

 

Parameters 

---------- 

taskFactory : `TaskFactory` 

Factory object used to load/instantiate PipelineTasks 

registry : `~lsst.daf.butler.Registry` 

Data butler instance. 

skipExisting : `bool`, optional 

If `True` (default), a Quantum is not created if all its outputs 

already exist. 

clobberExisting : `bool`, optional 

If `True`, overwrite any outputs that already exist. Cannot be 

`True` if ``skipExisting`` is. 

""" 

 

def __init__(self, taskFactory, registry, skipExisting=True, clobberExisting=False): 

self.taskFactory = taskFactory 

self.registry = registry 

self.dimensions = registry.dimensions 

self.skipExisting = skipExisting 

self.clobberExisting = clobberExisting 

 

def _loadTaskClass(self, taskDef): 

"""Make sure task class is loaded. 

 

Load task class, update task name to make sure it is fully-qualified, 

do not update original taskDef in a Pipeline though. 

 

Parameters 

---------- 

taskDef : `TaskDef` 

 

Returns 

------- 

`TaskDef` instance, may be the same as parameter if task class is 

already loaded. 

""" 

if taskDef.taskClass is None: 

tClass, tName = self.taskFactory.loadTaskClass(taskDef.taskName) 

taskDef = copy.copy(taskDef) 

taskDef.taskClass = tClass 

taskDef.taskName = tName 

return taskDef 

 

def makeGraph(self, pipeline, originInfo, userQuery): 

"""Create execution graph for a pipeline. 

 

Parameters 

---------- 

pipeline : `Pipeline` 

Pipeline definition, task names/classes and their configs. 

originInfo : `~lsst.daf.butler.DatasetOriginInfo` 

Object which provides names of the input/output collections. 

userQuery : `str` 

String which defunes user-defined selection for registry, should be 

empty or `None` if there is no restrictions on data selection. 

 

Returns 

------- 

graph : `QuantumGraph` 

 

Raises 

------ 

UserExpressionError 

Raised when user expression cannot be parsed. 

OutputExistsError 

Raised when output datasets already exist. 

Exception 

Other exceptions types may be raised by underlying registry 

classes. 

""" 

# Make sure all task classes are loaded, creating a new Pipeline 

# to avoid modifying the input one. 

# TODO: in the future, it would be preferable for `Pipeline` to 

# guarantee that its Task classes have been imported to avoid this 

# sort of two-stage initialization. 

pipeline = Pipeline([self._loadTaskClass(taskDef) for taskDef in pipeline]) 

 

scaffolding = _PipelineScaffolding(pipeline, universe=self.registry.dimensions) 

 

scaffolding.fillDataIds(self.registry, originInfo, userQuery) 

scaffolding.fillDatasetRefs(self.registry, originInfo, 

skipExisting=self.skipExisting, 

clobberExisting=self.clobberExisting) 

scaffolding.fillQuanta(self.registry, originInfo, 

skipExisting=self.skipExisting) 

 

return scaffolding.makeQuantumGraph()