Coverage for python/lsst/daf/butler/transfers/_yaml.py: 11%

185 statements  

« prev     ^ index     » next       coverage.py v7.2.5, created at 2023-05-05 03:17 -0700

1# This file is part of daf_butler. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (http://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21 

22from __future__ import annotations 

23 

24__all__ = ["YamlRepoExportBackend", "YamlRepoImportBackend"] 

25 

26import uuid 

27import warnings 

28from collections import defaultdict 

29from datetime import datetime 

30from typing import IO, TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Set, Tuple, Type 

31 

32import astropy.time 

33import yaml 

34from lsst.resources import ResourcePath 

35from lsst.utils import doImportType 

36from lsst.utils.iteration import ensure_iterable 

37 

38from ..core import ( 

39 DatasetAssociation, 

40 DatasetId, 

41 DatasetIdGenEnum, 

42 DatasetRef, 

43 DatasetType, 

44 Datastore, 

45 DimensionElement, 

46 DimensionRecord, 

47 DimensionUniverse, 

48 FileDataset, 

49 Timespan, 

50) 

51from ..core.named import NamedValueSet 

52from ..registry import CollectionType, Registry 

53from ..registry.interfaces import ChainedCollectionRecord, CollectionRecord, RunRecord, VersionTuple 

54from ..registry.versions import IncompatibleVersionError 

55from ._interfaces import RepoExportBackend, RepoImportBackend 

56 

57if TYPE_CHECKING: 

58 from lsst.resources import ResourcePathExpression 

59 

60EXPORT_FORMAT_VERSION = VersionTuple(1, 0, 2) 

61"""Export format version. 

62 

63Files with a different major version or a newer minor version cannot be read by 

64this version of the code. 

65""" 

66 

67 

68def _uuid_representer(dumper: yaml.Dumper, data: uuid.UUID) -> yaml.Node: 

69 """Generate YAML representation for UUID. 

70 

71 This produces a scalar node with a tag "!uuid" and value being a regular 

72 string representation of UUID. 

73 """ 

74 return dumper.represent_scalar("!uuid", str(data)) 

75 

76 

77def _uuid_constructor(loader: yaml.Loader, node: yaml.Node) -> Optional[uuid.UUID]: 

78 if node.value is not None: 

79 return uuid.UUID(hex=node.value) 

80 return None 

81 

82 

83yaml.Dumper.add_representer(uuid.UUID, _uuid_representer) 

84yaml.SafeLoader.add_constructor("!uuid", _uuid_constructor) 

85 

86 

87class YamlRepoExportBackend(RepoExportBackend): 

88 """A repository export implementation that saves to a YAML file. 

89 

90 Parameters 

91 ---------- 

92 stream 

93 A writeable file-like object. 

94 """ 

95 

96 def __init__(self, stream: IO, universe: DimensionUniverse): 

97 self.stream = stream 

98 self.universe = universe 

99 self.data: List[Dict[str, Any]] = [] 

100 

101 def saveDimensionData(self, element: DimensionElement, *data: DimensionRecord) -> None: 

102 # Docstring inherited from RepoExportBackend.saveDimensionData. 

103 data_dicts = [record.toDict(splitTimespan=True) for record in data] 

104 self.data.append( 

105 { 

106 "type": "dimension", 

107 "element": element.name, 

108 "records": data_dicts, 

109 } 

110 ) 

111 

112 def saveCollection(self, record: CollectionRecord, doc: Optional[str]) -> None: 

113 # Docstring inherited from RepoExportBackend.saveCollections. 

114 data: Dict[str, Any] = { 

115 "type": "collection", 

116 "collection_type": record.type.name, 

117 "name": record.name, 

118 } 

119 if doc is not None: 

120 data["doc"] = doc 

121 if isinstance(record, RunRecord): 

122 data["host"] = record.host 

123 data["timespan_begin"] = record.timespan.begin 

124 data["timespan_end"] = record.timespan.end 

125 elif isinstance(record, ChainedCollectionRecord): 

126 data["children"] = list(record.children) 

127 self.data.append(data) 

128 

129 def saveDatasets(self, datasetType: DatasetType, run: str, *datasets: FileDataset) -> None: 

130 # Docstring inherited from RepoExportBackend.saveDatasets. 

131 self.data.append( 

132 { 

133 "type": "dataset_type", 

134 "name": datasetType.name, 

135 "dimensions": [d.name for d in datasetType.dimensions], 

136 "storage_class": datasetType.storageClass_name, 

137 "is_calibration": datasetType.isCalibration(), 

138 } 

139 ) 

140 self.data.append( 

141 { 

142 "type": "dataset", 

143 "dataset_type": datasetType.name, 

144 "run": run, 

145 "records": [ 

146 { 

147 "dataset_id": [ref.id for ref in sorted(dataset.refs)], 

148 "data_id": [ref.dataId.byName() for ref in sorted(dataset.refs)], 

149 "path": dataset.path, 

150 "formatter": dataset.formatter, 

151 # TODO: look up and save other collections 

152 } 

153 for dataset in datasets 

154 ], 

155 } 

156 ) 

157 

158 def saveDatasetAssociations( 

159 self, collection: str, collectionType: CollectionType, associations: Iterable[DatasetAssociation] 

160 ) -> None: 

161 # Docstring inherited from RepoExportBackend.saveDatasetAssociations. 

162 if collectionType is CollectionType.TAGGED: 

163 self.data.append( 

164 { 

165 "type": "associations", 

166 "collection": collection, 

167 "collection_type": collectionType.name, 

168 "dataset_ids": [assoc.ref.id for assoc in associations], 

169 } 

170 ) 

171 elif collectionType is CollectionType.CALIBRATION: 

172 idsByTimespan: Dict[Timespan, List[DatasetId]] = defaultdict(list) 

173 for association in associations: 

174 assert association.timespan is not None 

175 assert association.ref.id is not None 

176 idsByTimespan[association.timespan].append(association.ref.id) 

177 self.data.append( 

178 { 

179 "type": "associations", 

180 "collection": collection, 

181 "collection_type": collectionType.name, 

182 "validity_ranges": [ 

183 { 

184 "timespan": timespan, 

185 "dataset_ids": dataset_ids, 

186 } 

187 for timespan, dataset_ids in idsByTimespan.items() 

188 ], 

189 } 

190 ) 

191 

192 def finish(self) -> None: 

193 # Docstring inherited from RepoExportBackend. 

194 yaml.dump( 

195 { 

196 "description": "Butler Data Repository Export", 

197 "version": str(EXPORT_FORMAT_VERSION), 

198 "universe_version": self.universe.version, 

199 "universe_namespace": self.universe.namespace, 

200 "data": self.data, 

201 }, 

202 stream=self.stream, 

203 sort_keys=False, 

204 ) 

205 

206 

207class YamlRepoImportBackend(RepoImportBackend): 

208 """A repository import implementation that reads from a YAML file. 

209 

210 Parameters 

211 ---------- 

212 stream 

213 A readable file-like object. 

214 registry : `Registry` 

215 The registry datasets will be imported into. Only used to retreive 

216 dataset types during construction; all write happen in `register` 

217 and `load`. 

218 """ 

219 

220 def __init__(self, stream: IO, registry: Registry): 

221 # We read the file fully and convert its contents to Python objects 

222 # instead of loading incrementally so we can spot some problems early; 

223 # because `register` can't be put inside a transaction, we'd rather not 

224 # run that at all if there's going to be problem later in `load`. 

225 wrapper = yaml.safe_load(stream) 

226 if wrapper["version"] == 0: 

227 # Grandfather-in 'version: 0' -> 1.0.0, which is what we wrote 

228 # before we really tried to do versioning here. 

229 fileVersion = VersionTuple(1, 0, 0) 

230 else: 

231 fileVersion = VersionTuple.fromString(wrapper["version"]) 

232 if fileVersion.major != EXPORT_FORMAT_VERSION.major: 

233 raise IncompatibleVersionError( 

234 f"Cannot read repository export file with version={fileVersion} " 

235 f"({EXPORT_FORMAT_VERSION.major}.x.x required)." 

236 ) 

237 if fileVersion.minor > EXPORT_FORMAT_VERSION.minor: 

238 raise IncompatibleVersionError( 

239 f"Cannot read repository export file with version={fileVersion} " 

240 f"< {EXPORT_FORMAT_VERSION.major}.{EXPORT_FORMAT_VERSION.minor}.x required." 

241 ) 

242 self.runs: Dict[str, Tuple[Optional[str], Timespan]] = {} 

243 self.chains: Dict[str, List[str]] = {} 

244 self.collections: Dict[str, CollectionType] = {} 

245 self.collectionDocs: Dict[str, str] = {} 

246 self.datasetTypes: NamedValueSet[DatasetType] = NamedValueSet() 

247 self.dimensions: Mapping[DimensionElement, List[DimensionRecord]] = defaultdict(list) 

248 self.tagAssociations: Dict[str, List[DatasetId]] = defaultdict(list) 

249 self.calibAssociations: Dict[str, Dict[Timespan, List[DatasetId]]] = defaultdict(dict) 

250 self.refsByFileId: Dict[DatasetId, DatasetRef] = {} 

251 self.registry: Registry = registry 

252 

253 universe_version = wrapper.get("universe_version", 0) 

254 universe_namespace = wrapper.get("universe_namespace", "daf_butler") 

255 

256 # If this is data exported before the reorganization of visits 

257 # and visit systems and that new schema is in use, some filtering 

258 # will be needed. The entry in the visit dimension record will be 

259 # silently dropped when visit is created but the 

260 # visit_system_membership must be constructed. 

261 migrate_visit_system = False 

262 if ( 

263 universe_version < 2 

264 and universe_namespace == "daf_butler" 

265 and "visit_system_membership" in self.registry.dimensions 

266 ): 

267 migrate_visit_system = True 

268 

269 datasetData = [] 

270 for data in wrapper["data"]: 

271 if data["type"] == "dimension": 

272 # convert all datetime values to astropy 

273 for record in data["records"]: 

274 for key in record: 

275 # Some older YAML files were produced with native 

276 # YAML support for datetime, we support reading that 

277 # data back. Newer conversion uses _AstropyTimeToYAML 

278 # class with special YAML tag. 

279 if isinstance(record[key], datetime): 

280 record[key] = astropy.time.Time(record[key], scale="utc") 

281 element = self.registry.dimensions[data["element"]] 

282 RecordClass: Type[DimensionRecord] = element.RecordClass 

283 self.dimensions[element].extend(RecordClass(**r) for r in data["records"]) 

284 

285 if data["element"] == "visit" and migrate_visit_system: 

286 # Must create the visit_system_membership records. 

287 element = self.registry.dimensions["visit_system_membership"] 

288 RecordClass = element.RecordClass 

289 self.dimensions[element].extend( 

290 RecordClass(instrument=r["instrument"], visit_system=r["visit_system"], visit=r["id"]) 

291 for r in data["records"] 

292 ) 

293 

294 elif data["type"] == "collection": 

295 collectionType = CollectionType.from_name(data["collection_type"]) 

296 if collectionType is CollectionType.RUN: 

297 self.runs[data["name"]] = ( 

298 data["host"], 

299 Timespan(begin=data["timespan_begin"], end=data["timespan_end"]), 

300 ) 

301 elif collectionType is CollectionType.CHAINED: 

302 children = [] 

303 for child in data["children"]: 

304 if not isinstance(child, str): 

305 warnings.warn( 

306 f"CHAINED collection {data['name']} includes restrictions on child " 

307 "collection searches, which are no longer suppored and will be ignored." 

308 ) 

309 # Old form with dataset type restrictions only, 

310 # supported for backwards compatibility. 

311 child, _ = child 

312 children.append(child) 

313 self.chains[data["name"]] = children 

314 else: 

315 self.collections[data["name"]] = collectionType 

316 doc = data.get("doc") 

317 if doc is not None: 

318 self.collectionDocs[data["name"]] = doc 

319 elif data["type"] == "run": 

320 # Also support old form of saving a run with no extra info. 

321 self.runs[data["name"]] = (None, Timespan(None, None)) 

322 elif data["type"] == "dataset_type": 

323 dimensions = data["dimensions"] 

324 if migrate_visit_system and "visit" in dimensions and "visit_system" in dimensions: 

325 dimensions.remove("visit_system") 

326 self.datasetTypes.add( 

327 DatasetType( 

328 data["name"], 

329 dimensions=dimensions, 

330 storageClass=data["storage_class"], 

331 universe=self.registry.dimensions, 

332 isCalibration=data.get("is_calibration", False), 

333 ) 

334 ) 

335 elif data["type"] == "dataset": 

336 # Save raw dataset data for a second loop, so we can ensure we 

337 # know about all dataset types first. 

338 datasetData.append(data) 

339 elif data["type"] == "associations": 

340 collectionType = CollectionType.from_name(data["collection_type"]) 

341 if collectionType is CollectionType.TAGGED: 

342 self.tagAssociations[data["collection"]].extend(data["dataset_ids"]) 

343 elif collectionType is CollectionType.CALIBRATION: 

344 assocsByTimespan = self.calibAssociations[data["collection"]] 

345 for d in data["validity_ranges"]: 

346 if "timespan" in d: 

347 assocsByTimespan[d["timespan"]] = d["dataset_ids"] 

348 else: 

349 # TODO: this is for backward compatibility, should 

350 # be removed at some point. 

351 assocsByTimespan[Timespan(begin=d["begin"], end=d["end"])] = d["dataset_ids"] 

352 else: 

353 raise ValueError(f"Unexpected calibration type for association: {collectionType.name}.") 

354 else: 

355 raise ValueError(f"Unexpected dictionary type: {data['type']}.") 

356 # key is (dataset type name, run) 

357 self.datasets: Mapping[Tuple[str, str], List[FileDataset]] = defaultdict(list) 

358 for data in datasetData: 

359 datasetType = self.datasetTypes.get(data["dataset_type"]) 

360 if datasetType is None: 

361 datasetType = self.registry.getDatasetType(data["dataset_type"]) 

362 self.datasets[data["dataset_type"], data["run"]].extend( 

363 FileDataset( 

364 d.get("path"), 

365 [ 

366 DatasetRef(datasetType, dataId, run=data["run"], id=refid) 

367 for dataId, refid in zip( 

368 ensure_iterable(d["data_id"]), ensure_iterable(d["dataset_id"]) 

369 ) 

370 ], 

371 formatter=doImportType(d.get("formatter")) if "formatter" in d else None, 

372 ) 

373 for d in data["records"] 

374 ) 

375 

376 def register(self) -> None: 

377 # Docstring inherited from RepoImportBackend.register. 

378 for datasetType in self.datasetTypes: 

379 self.registry.registerDatasetType(datasetType) 

380 for run in self.runs: 

381 self.registry.registerRun(run, doc=self.collectionDocs.get(run)) 

382 # No way to add extra run info to registry yet. 

383 for collection, collection_type in self.collections.items(): 

384 self.registry.registerCollection( 

385 collection, collection_type, doc=self.collectionDocs.get(collection) 

386 ) 

387 for chain, children in self.chains.items(): 

388 self.registry.registerCollection( 

389 chain, CollectionType.CHAINED, doc=self.collectionDocs.get(chain) 

390 ) 

391 self.registry.setCollectionChain(chain, children) 

392 

393 def load( 

394 self, 

395 datastore: Optional[Datastore], 

396 *, 

397 directory: ResourcePathExpression | None = None, 

398 transfer: Optional[str] = None, 

399 skip_dimensions: Optional[Set] = None, 

400 idGenerationMode: DatasetIdGenEnum = DatasetIdGenEnum.UNIQUE, 

401 reuseIds: bool = False, 

402 ) -> None: 

403 # Docstring inherited from RepoImportBackend.load. 

404 for element, dimensionRecords in self.dimensions.items(): 

405 if skip_dimensions and element in skip_dimensions: 

406 continue 

407 # Using skip_existing=True here assumes that the records in the 

408 # database are either equivalent or at least preferable to the ones 

409 # being imported. It'd be ideal to check that, but that would mean 

410 # using syncDimensionData, which is not vectorized and is hence 

411 # unacceptably slo. 

412 self.registry.insertDimensionData(element, *dimensionRecords, skip_existing=True) 

413 # FileDatasets to ingest into the datastore (in bulk): 

414 fileDatasets = [] 

415 for (datasetTypeName, run), records in self.datasets.items(): 

416 # Make a big flattened list of all data IDs and dataset_ids, while 

417 # remembering slices that associate them with the FileDataset 

418 # instances they came from. 

419 datasets: List[DatasetRef] = [] 

420 dataset_ids: List[DatasetId] = [] 

421 slices = [] 

422 for fileDataset in records: 

423 start = len(datasets) 

424 datasets.extend(fileDataset.refs) 

425 dataset_ids.extend(ref.id for ref in fileDataset.refs) # type: ignore 

426 stop = len(datasets) 

427 slices.append(slice(start, stop)) 

428 # Insert all of those DatasetRefs at once. 

429 # For now, we ignore the dataset_id we pulled from the file 

430 # and just insert without one to get a new autoincrement value. 

431 # Eventually (once we have origin in IDs) we'll preserve them. 

432 resolvedRefs = self.registry._importDatasets( 

433 datasets, idGenerationMode=idGenerationMode, reuseIds=reuseIds 

434 ) 

435 # Populate our dictionary that maps int dataset_id values from the 

436 # export file to the new DatasetRefs 

437 for fileId, ref in zip(dataset_ids, resolvedRefs): 

438 self.refsByFileId[fileId] = ref 

439 # Now iterate over the original records, and install the new 

440 # resolved DatasetRefs to replace the unresolved ones as we 

441 # reorganize the collection information. 

442 for sliceForFileDataset, fileDataset in zip(slices, records): 

443 fileDataset.refs = resolvedRefs[sliceForFileDataset] 

444 if directory is not None: 

445 fileDataset.path = ResourcePath(directory, forceDirectory=True).join(fileDataset.path) 

446 fileDatasets.append(fileDataset) 

447 # Ingest everything into the datastore at once. 

448 if datastore is not None and fileDatasets: 

449 datastore.ingest(*fileDatasets, transfer=transfer) 

450 # Associate datasets with tagged collections. 

451 for collection, dataset_ids in self.tagAssociations.items(): 

452 self.registry.associate(collection, [self.refsByFileId[i] for i in dataset_ids]) 

453 # Associate datasets with calibration collections. 

454 for collection, idsByTimespan in self.calibAssociations.items(): 

455 for timespan, dataset_ids in idsByTimespan.items(): 

456 self.registry.certify(collection, [self.refsByFileId[i] for i in dataset_ids], timespan)