Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350

351

352

353

354

355

356

357

358

359

360

361

362

363

364

365

366

367

368

369

370

371

372

373

374

375

376

377

378

379

380

381

382

383

384

385

386

387

388

389

390

391

392

393

394

395

396

397

398

399

400

401

402

403

404

405

406

407

408

409

410

411

412

413

414

415

416

417

418

419

420

421

422

423

424

425

426

427

428

429

430

431

432

433

434

435

436

437

438

439

440

441

442

443

444

445

446

447

448

449

450

451

452

453

454

455

456

457

458

459

460

461

462

463

464

465

466

467

468

469

470

471

472

473

474

475

476

477

478

479

480

481

482

483

484

485

486

487

488

489

490

491

492

493

494

495

496

497

498

# This file is part of daf_butler. 

# 

# Developed for the LSST Data Management System. 

# This product includes software developed by the LSST Project 

# (http://www.lsst.org). 

# See the COPYRIGHT file at the top-level directory of this distribution 

# for details of code ownership. 

# 

# This program is free software: you can redistribute it and/or modify 

# it under the terms of the GNU General Public License as published by 

# the Free Software Foundation, either version 3 of the License, or 

# (at your option) any later version. 

# 

# This program is distributed in the hope that it will be useful, 

# but WITHOUT ANY WARRANTY; without even the implied warranty of 

# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

# GNU General Public License for more details. 

# 

# You should have received a copy of the GNU General Public License 

# along with this program. If not, see <http://www.gnu.org/licenses/>. 

 

from copy import deepcopy 

import hashlib 

 

from types import MappingProxyType 

from .utils import slotValuesAreEqual 

from .storageClass import StorageClass, StorageClassFactory 

from .dimensions import DimensionGraph, DimensionNameSet, DataId 

from .configSupport import LookupKey 

 

__all__ = ("DatasetType", "DatasetRef") 

 

 

def _safeMakeMappingProxyType(data): 

if data is None: 

data = {} 

return MappingProxyType(data) 

 

 

class DatasetType: 

r"""A named category of Datasets that defines how they are organized, 

related, and stored. 

 

A concrete, final class whose instances represent `DatasetType`\ s. 

`DatasetType` instances may be constructed without a `Registry`, 

but they must be registered 

via `Registry.registerDatasetType()` before corresponding Datasets 

may be added. 

`DatasetType` instances are immutable. 

 

Parameters 

---------- 

name : `str` 

A string name for the Dataset; must correspond to the same 

`DatasetType` across all Registries. 

dimensions : `DimensionGraph` or iterable of `str` 

Dimensions used to label and relate instances of this DatasetType, 

or string names thereof. 

storageClass : `StorageClass` or `str` 

Instance of a `StorageClass` or name of `StorageClass` that defines 

how this `DatasetType` is persisted. 

""" 

 

__slots__ = ("_name", "_dimensions", "_storageClass", "_storageClassName") 

 

@staticmethod 

def nameWithComponent(datasetTypeName, componentName): 

"""Form a valid DatasetTypeName from a parent and component. 

 

No validation is performed. 

 

Parameters 

---------- 

datasetTypeName : `str` 

Base type name. 

componentName : `str` 

Name of component. 

 

Returns 

------- 

compTypeName : `str` 

Name to use for component DatasetType. 

""" 

return "{}.{}".format(datasetTypeName, componentName) 

 

def __init__(self, name, dimensions, storageClass): 

self._name = name 

if isinstance(dimensions, (DimensionGraph, DimensionNameSet)): 

self._dimensions = dimensions 

else: 

self._dimensions = DimensionNameSet(names=dimensions) 

assert isinstance(storageClass, (StorageClass, str)) 

if isinstance(storageClass, StorageClass): 

self._storageClass = storageClass 

self._storageClassName = storageClass.name 

else: 

self._storageClass = None 

self._storageClassName = storageClass 

 

def __repr__(self): 

return "DatasetType({}, {}, {})".format(self.name, self.dimensions, self._storageClassName) 

 

def __eq__(self, other): 

if self._name != other._name: 

return False 

if self._dimensions != other._dimensions: 

return False 

if self._storageClass is not None and other._storageClass is not None: 

return self._storageClass == other._storageClass 

else: 

return self._storageClassName == other._storageClassName 

 

def __hash__(self): 

"""Hash DatasetType instance. 

 

This only uses StorageClass name which is it consistent with the 

implementation of StorageClass hash method. 

""" 

return hash((self._name, self._dimensions, self._storageClassName)) 

 

@property 

def name(self): 

"""A string name for the Dataset; must correspond to the same 

`DatasetType` across all Registries. 

""" 

return self._name 

 

@property 

def dimensions(self): 

r"""The `Dimension`\ s that label and relate instances of this 

`DatasetType` (`DimensionGraph` or `DimensionNameSet`). 

 

If this `DatasetType` was not obtained from or registered with a 

`Registry`, this will typically be a `DimensionNameSet`, with much 

less functionality (just an unsorted ``.names`` and comparison 

operators) than a full `DimensionGraph`. 

""" 

return self._dimensions 

 

@property 

def storageClass(self): 

"""`StorageClass` instance that defines how this `DatasetType` 

is persisted. Note that if DatasetType was constructed with a name 

of a StorageClass then Butler has to be initialized before using 

this property. 

""" 

if self._storageClass is None: 

self._storageClass = StorageClassFactory().getStorageClass(self._storageClassName) 

return self._storageClass 

 

@staticmethod 

def splitDatasetTypeName(datasetTypeName): 

"""Given a dataset type name, return the root name and the component 

name. 

 

Parameters 

---------- 

datasetTypeName : `str` 

The name of the dataset type, can include a component using 

a "."-separator. 

 

Returns 

------- 

rootName : `str` 

Root name without any components. 

componentName : `str` 

The component if it has been specified, else `None`. 

 

Notes 

----- 

If the dataset type name is ``a.b.c`` this method will return a 

root name of ``a`` and a component name of ``b.c``. 

""" 

comp = None 

root = datasetTypeName 

if "." in root: 

# If there is doubt, the component is after the first "." 

root, comp = root.split(".", maxsplit=1) 

return root, comp 

 

def nameAndComponent(self): 

"""Return the root name of this dataset type and the component 

name (if defined). 

 

Returns 

------- 

rootName : `str` 

Root name for this `DatasetType` without any components. 

componentName : `str` 

The component if it has been specified, else `None`. 

""" 

return self.splitDatasetTypeName(self.name) 

 

def component(self): 

"""Component name (if defined) 

 

Returns 

------- 

comp : `str` 

Name of component part of DatasetType name. `None` if this 

`DatasetType` is not associated with a component. 

""" 

_, comp = self.nameAndComponent() 

return comp 

 

def componentTypeName(self, component): 

"""Given a component name, derive the datasetTypeName of that component 

 

Parameters 

---------- 

component : `str` 

Name of component 

 

Returns 

------- 

derived : `str` 

Compound name of this `DatasetType` and the component. 

 

Raises 

------ 

KeyError 

Requested component is not supported by this `DatasetType`. 

""" 

if component in self.storageClass.components: 

return self.nameWithComponent(self.name, component) 

raise KeyError("Requested component ({}) not understood by this DatasetType".format(component)) 

 

def isComposite(self): 

"""Boolean indicating whether this `DatasetType` is a composite type. 

 

Returns 

------- 

isComposite : `bool` 

`True` if this `DatasetType` is a composite type, `False` 

otherwise. 

""" 

return self.storageClass.isComposite() 

 

def _lookupNames(self): 

"""Name keys to use when looking up this datasetType in a 

configuration. 

 

The names are returned in order of priority. 

 

Returns 

------- 

names : `tuple` of `LookupKey` 

Tuple of the `DatasetType` name and the `StorageClass` name. 

If the name includes a component the name with the component 

is first, then the name without the component and finally 

the storage class name. 

""" 

rootName, componentName = self.nameAndComponent() 

lookups = (LookupKey(name=self.name),) 

if componentName is not None: 

lookups = lookups + (LookupKey(name=rootName),) 

 

if self.dimensions: 

# Dimensions are a lower priority than dataset type name 

lookups = lookups + (LookupKey(dimensions=self.dimensions),) 

 

return lookups + self.storageClass._lookupNames() 

 

def __reduce__(self): 

"""Support pickling. 

 

StorageClass instances can not normally be pickled, so we pickle 

StorageClass name instead of instance. 

""" 

return (DatasetType, (self.name, self.dimensions, self._storageClassName)) 

 

def __deepcopy__(self, memo): 

"""Support for deep copy method. 

 

Normally ``deepcopy`` will use pickle mechanism to make copies. 

We want to avoid that to support (possibly degenerate) use case when 

DatasetType is constructed with StorageClass instance which is not 

registered with StorageClassFactory (this happens in unit tests). 

Instead we re-implement ``__deepcopy__`` method. 

""" 

return DatasetType(name=deepcopy(self.name, memo), 

dimensions=deepcopy(self.dimensions, memo), 

storageClass=deepcopy(self._storageClass or self._storageClassName, memo)) 

 

def normalize(self, universe): 

"""Ensure the dimensions and storage class name are valid, and make 

``self.dimensions`` a true `DimensionGraph` instance if it isn't 

already. 

 

Parameters 

---------- 

universe : `DimensionGraph` 

The set of all known dimensions. 

 

Raises 

------ 

ValueError 

Raised if the DatasetType is invalid, either because one or more 

dimensions in ``self.dimensions`` is not in ``universe``, or the 

storage class name is not recognized. 

""" 

if not isinstance(self._dimensions, DimensionGraph): 

self._dimensions = universe.extract(self._dimensions) 

try: 

# Trigger lookup of StorageClass instance from StorageClass name. 

# KeyError (sort of) makes sense in that context, but it doesn't 

# make as much sense in the context in which normalize() is called, 

# so we translate it to ValueError. 

self.storageClass 

except KeyError: 

raise ValueError(f"Storage class '{self._storageClassName}' not recognized.") 

 

 

class DatasetRef: 

"""Reference to a Dataset in a `Registry`. 

 

A `DatasetRef` may point to a Dataset that currently does not yet exist 

(e.g., because it is a predicted input for provenance). 

 

Parameters 

---------- 

datasetType : `DatasetType` 

The `DatasetType` for this Dataset. 

dataId : `dict` or `DataId` 

A `dict` of `Dimension` link fields that labels the Dataset within a 

Collection. 

id : `int`, optional 

A unique identifier. 

Normally set to `None` and assigned by `Registry` 

""" 

 

__slots__ = ("_id", "_datasetType", "_dataId", "_producer", "_run", "_hash", 

"_predictedConsumers", "_actualConsumers", "_components") 

 

def __init__(self, datasetType, dataId, id=None, run=None, hash=None, components=None): 

assert isinstance(datasetType, DatasetType) 

 

# Check the dimensions match if a DataId is provided 

if isinstance(dataId, DataId) and isinstance(datasetType.dimensions, DimensionGraph): 

if dataId.dimensions() != datasetType.dimensions: 

raise ValueError(f"Dimensions mismatch for {dataId} and {datasetType}") 

 

self._id = id 

self._datasetType = datasetType 

self._dataId = dataId 

self._producer = None 

self._predictedConsumers = dict() 

self._actualConsumers = dict() 

self._components = dict() 

if components is not None: 

self._components.update(components) 

self._run = run 

self._hash = hash 

 

__eq__ = slotValuesAreEqual 

 

def __repr__(self): 

return f"DatasetRef({self.datasetType}, {self.dataId}, id={self.id}, run={self.run})" 

 

@property 

def id(self): 

"""Primary key of the dataset (`int`) 

 

Typically assigned by `Registry`. 

""" 

return self._id 

 

@property 

def hash(self): 

"""Secure hash of the `DatasetType` name and `DataId` (`bytes`). 

""" 

if self._hash is None: 

message = hashlib.blake2b(digest_size=32) 

message.update(self.datasetType.name.encode("utf8")) 

self.dataId.updateHash(message) 

self._hash = message.digest() 

return self._hash 

 

@property 

def datasetType(self): 

"""The `DatasetType` associated with the Dataset the `DatasetRef` 

points to. 

""" 

return self._datasetType 

 

@property 

def dataId(self): 

"""A `dict` of `Dimension` link fields that labels the Dataset 

within a Collection (`dict` or `DataId`). 

""" 

return self._dataId 

 

@property 

def producer(self): 

"""The `~lsst.daf.butler.Quantum` instance that produced (or will 

produce) the Dataset. 

 

Read-only; update via `~lsst.daf.butler.Registry.addDataset()`, 

`~lsst.daf.butler.Quantum.addOutput()`, or 

`~lsst.daf.butler.Butler.put()`. 

May be `None` if no provenance information is available. 

""" 

return self._producer 

 

@property 

def run(self): 

"""The `~lsst.daf.butler.Run` instance that produced (or will produce) 

the Dataset. 

 

Read-only; update via `~lsst.daf.butler.Registry.addDataset()` or 

`~lsst.daf.butler.Butler.put()`. 

""" 

return self._run 

 

@property 

def predictedConsumers(self): 

"""A sequence of `Quantum` instances that list this Dataset in their 

`predictedInputs` attributes. 

 

Read-only; update via `Quantum.addPredictedInput()`. 

May be an empty list if no provenance information is available. 

""" 

return _safeMakeMappingProxyType(self._predictedConsumers) 

 

@property 

def actualConsumers(self): 

"""A sequence of `Quantum` instances that list this Dataset in their 

`actualInputs` attributes. 

 

Read-only; update via `Registry.markInputUsed()`. 

May be an empty list if no provenance information is available. 

""" 

return _safeMakeMappingProxyType(self._actualConsumers) 

 

@property 

def components(self): 

"""Named `DatasetRef` components. 

 

Read-only; update via `Registry.attachComponent()`. 

""" 

return _safeMakeMappingProxyType(self._components) 

 

@property 

def dimensions(self): 

"""The dimensions associated with the underlying `DatasetType` 

""" 

return self.datasetType.dimensions 

 

def __str__(self): 

components = "" 

if self.components: 

components = ", components=[" + ", ".join(self.components) + "]" 

return "DatasetRef({}, id={}, dataId={} {})".format(self.datasetType.name, 

self.id, self.dataId, components) 

 

def detach(self): 

"""Obtain a new DatasetRef that is detached from the registry. 

 

Its ``id`` property will be `None`. This can be used for transfers 

and similar operations. 

""" 

ref = deepcopy(self) 

ref._id = None 

return ref 

 

def isComposite(self): 

"""Boolean indicating whether this `DatasetRef` is a composite type. 

 

Returns 

------- 

isComposite : `bool` 

`True` if this `DatasetRef` is a composite type, `False` 

otherwise. 

""" 

return self.datasetType.isComposite() 

 

def _lookupNames(self): 

"""Name keys to use when looking up this DatasetRef in a configuration. 

 

The names are returned in order of priority. 

 

Returns 

------- 

names : `tuple` of `LookupKey` 

Tuple of the `DatasetType` name and the `StorageClass` name. 

If ``instrument`` is defined in the dataId, each of those names 

is added to the start of the tuple with a key derived from the 

value of ``instrument``. 

""" 

# Special case the instrument Dimension since we allow configs 

# to include the instrument name in the hierarchy. 

names = self.datasetType._lookupNames() 

 

if "instrument" in self.dataId: 

names = tuple(n.clone(dataId={"instrument": self.dataId["instrument"]}) 

for n in names) + names 

 

return names