Coverage for python/lsst/daf/butler/transfers/_yaml.py: 15%

200 statements  

« prev     ^ index     » next       coverage.py v7.3.2, created at 2023-10-27 09:44 +0000

1# This file is part of daf_butler. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (http://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This software is dual licensed under the GNU General Public License and also 

10# under a 3-clause BSD license. Recipients may choose which of these licenses 

11# to use; please see the files gpl-3.0.txt and/or bsd_license.txt, 

12# respectively. If you choose the GPL option then the following text applies 

13# (but note that there is still no warranty even if you opt for BSD instead): 

14# 

15# This program is free software: you can redistribute it and/or modify 

16# it under the terms of the GNU General Public License as published by 

17# the Free Software Foundation, either version 3 of the License, or 

18# (at your option) any later version. 

19# 

20# This program is distributed in the hope that it will be useful, 

21# but WITHOUT ANY WARRANTY; without even the implied warranty of 

22# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

23# GNU General Public License for more details. 

24# 

25# You should have received a copy of the GNU General Public License 

26# along with this program. If not, see <http://www.gnu.org/licenses/>. 

27 

28from __future__ import annotations 

29 

30__all__ = ["YamlRepoExportBackend", "YamlRepoImportBackend"] 

31 

32import uuid 

33import warnings 

34from collections import UserDict, defaultdict 

35from collections.abc import Iterable, Mapping 

36from datetime import datetime 

37from typing import IO, TYPE_CHECKING, Any 

38 

39import astropy.time 

40import yaml 

41from lsst.resources import ResourcePath 

42from lsst.utils import doImportType 

43from lsst.utils.introspection import find_outside_stacklevel 

44from lsst.utils.iteration import ensure_iterable 

45 

46from .._dataset_association import DatasetAssociation 

47from .._dataset_ref import DatasetId, DatasetRef 

48from .._dataset_type import DatasetType 

49from .._file_dataset import FileDataset 

50from .._named import NamedValueSet 

51from .._timespan import Timespan 

52from ..datastore import Datastore 

53from ..dimensions import DimensionElement, DimensionRecord, DimensionUniverse 

54from ..registry import CollectionType 

55from ..registry.interfaces import ChainedCollectionRecord, CollectionRecord, RunRecord, VersionTuple 

56from ..registry.sql_registry import SqlRegistry 

57from ..registry.versions import IncompatibleVersionError 

58from ._interfaces import RepoExportBackend, RepoImportBackend 

59 

60if TYPE_CHECKING: 

61 from lsst.resources import ResourcePathExpression 

62 

63EXPORT_FORMAT_VERSION = VersionTuple(1, 0, 2) 

64"""Export format version. 

65 

66Files with a different major version or a newer minor version cannot be read by 

67this version of the code. 

68""" 

69 

70 

71class _RefMapper(UserDict[int, uuid.UUID]): 

72 """Create a local dict subclass which creates new deterministic UUID for 

73 missing keys. 

74 """ 

75 

76 _namespace = uuid.UUID("4d4851f4-2890-4d41-8779-5f38a3f5062b") 

77 

78 def __missing__(self, key: int) -> uuid.UUID: 

79 newUUID = uuid.uuid3(namespace=self._namespace, name=str(key)) 

80 self[key] = newUUID 

81 return newUUID 

82 

83 

84_refIntId2UUID = _RefMapper() 

85 

86 

87def _uuid_representer(dumper: yaml.Dumper, data: uuid.UUID) -> yaml.Node: 

88 """Generate YAML representation for UUID. 

89 

90 This produces a scalar node with a tag "!uuid" and value being a regular 

91 string representation of UUID. 

92 """ 

93 return dumper.represent_scalar("!uuid", str(data)) 

94 

95 

96def _uuid_constructor(loader: yaml.Loader, node: yaml.Node) -> uuid.UUID | None: 

97 if node.value is not None: 

98 return uuid.UUID(hex=node.value) 

99 return None 

100 

101 

102yaml.Dumper.add_representer(uuid.UUID, _uuid_representer) 

103yaml.SafeLoader.add_constructor("!uuid", _uuid_constructor) 

104 

105 

106class YamlRepoExportBackend(RepoExportBackend): 

107 """A repository export implementation that saves to a YAML file. 

108 

109 Parameters 

110 ---------- 

111 stream 

112 A writeable file-like object. 

113 """ 

114 

115 def __init__(self, stream: IO, universe: DimensionUniverse): 

116 self.stream = stream 

117 self.universe = universe 

118 self.data: list[dict[str, Any]] = [] 

119 

120 def saveDimensionData(self, element: DimensionElement, *data: DimensionRecord) -> None: 

121 # Docstring inherited from RepoExportBackend.saveDimensionData. 

122 data_dicts = [record.toDict(splitTimespan=True) for record in data] 

123 self.data.append( 

124 { 

125 "type": "dimension", 

126 "element": element.name, 

127 "records": data_dicts, 

128 } 

129 ) 

130 

131 def saveCollection(self, record: CollectionRecord, doc: str | None) -> None: 

132 # Docstring inherited from RepoExportBackend.saveCollections. 

133 data: dict[str, Any] = { 

134 "type": "collection", 

135 "collection_type": record.type.name, 

136 "name": record.name, 

137 } 

138 if doc is not None: 

139 data["doc"] = doc 

140 if isinstance(record, RunRecord): 

141 data["host"] = record.host 

142 data["timespan_begin"] = record.timespan.begin 

143 data["timespan_end"] = record.timespan.end 

144 elif isinstance(record, ChainedCollectionRecord): 

145 data["children"] = list(record.children) 

146 self.data.append(data) 

147 

148 def saveDatasets(self, datasetType: DatasetType, run: str, *datasets: FileDataset) -> None: 

149 # Docstring inherited from RepoExportBackend.saveDatasets. 

150 self.data.append( 

151 { 

152 "type": "dataset_type", 

153 "name": datasetType.name, 

154 "dimensions": [d.name for d in datasetType.dimensions], 

155 "storage_class": datasetType.storageClass_name, 

156 "is_calibration": datasetType.isCalibration(), 

157 } 

158 ) 

159 self.data.append( 

160 { 

161 "type": "dataset", 

162 "dataset_type": datasetType.name, 

163 "run": run, 

164 "records": [ 

165 { 

166 "dataset_id": [ref.id for ref in sorted(dataset.refs)], 

167 "data_id": [ref.dataId.byName() for ref in sorted(dataset.refs)], 

168 "path": dataset.path, 

169 "formatter": dataset.formatter, 

170 # TODO: look up and save other collections 

171 } 

172 for dataset in datasets 

173 ], 

174 } 

175 ) 

176 

177 def saveDatasetAssociations( 

178 self, collection: str, collectionType: CollectionType, associations: Iterable[DatasetAssociation] 

179 ) -> None: 

180 # Docstring inherited from RepoExportBackend.saveDatasetAssociations. 

181 if collectionType is CollectionType.TAGGED: 

182 self.data.append( 

183 { 

184 "type": "associations", 

185 "collection": collection, 

186 "collection_type": collectionType.name, 

187 "dataset_ids": [assoc.ref.id for assoc in associations], 

188 } 

189 ) 

190 elif collectionType is CollectionType.CALIBRATION: 

191 idsByTimespan: dict[Timespan, list[DatasetId]] = defaultdict(list) 

192 for association in associations: 

193 assert association.timespan is not None 

194 idsByTimespan[association.timespan].append(association.ref.id) 

195 self.data.append( 

196 { 

197 "type": "associations", 

198 "collection": collection, 

199 "collection_type": collectionType.name, 

200 "validity_ranges": [ 

201 { 

202 "timespan": timespan, 

203 "dataset_ids": dataset_ids, 

204 } 

205 for timespan, dataset_ids in idsByTimespan.items() 

206 ], 

207 } 

208 ) 

209 

210 def finish(self) -> None: 

211 # Docstring inherited from RepoExportBackend. 

212 yaml.dump( 

213 { 

214 "description": "Butler Data Repository Export", 

215 "version": str(EXPORT_FORMAT_VERSION), 

216 "universe_version": self.universe.version, 

217 "universe_namespace": self.universe.namespace, 

218 "data": self.data, 

219 }, 

220 stream=self.stream, 

221 sort_keys=False, 

222 ) 

223 

224 

225class YamlRepoImportBackend(RepoImportBackend): 

226 """A repository import implementation that reads from a YAML file. 

227 

228 Parameters 

229 ---------- 

230 stream 

231 A readable file-like object. 

232 registry : `SqlRegistry` 

233 The registry datasets will be imported into. Only used to retreive 

234 dataset types during construction; all write happen in `register` 

235 and `load`. 

236 """ 

237 

238 def __init__(self, stream: IO, registry: SqlRegistry): 

239 # We read the file fully and convert its contents to Python objects 

240 # instead of loading incrementally so we can spot some problems early; 

241 # because `register` can't be put inside a transaction, we'd rather not 

242 # run that at all if there's going to be problem later in `load`. 

243 wrapper = yaml.safe_load(stream) 

244 if wrapper["version"] == 0: 

245 # Grandfather-in 'version: 0' -> 1.0.0, which is what we wrote 

246 # before we really tried to do versioning here. 

247 fileVersion = VersionTuple(1, 0, 0) 

248 else: 

249 fileVersion = VersionTuple.fromString(wrapper["version"]) 

250 if fileVersion.major != EXPORT_FORMAT_VERSION.major: 

251 raise IncompatibleVersionError( 

252 f"Cannot read repository export file with version={fileVersion} " 

253 f"({EXPORT_FORMAT_VERSION.major}.x.x required)." 

254 ) 

255 if fileVersion.minor > EXPORT_FORMAT_VERSION.minor: 

256 raise IncompatibleVersionError( 

257 f"Cannot read repository export file with version={fileVersion} " 

258 f"< {EXPORT_FORMAT_VERSION.major}.{EXPORT_FORMAT_VERSION.minor}.x required." 

259 ) 

260 self.runs: dict[str, tuple[str | None, Timespan]] = {} 

261 self.chains: dict[str, list[str]] = {} 

262 self.collections: dict[str, CollectionType] = {} 

263 self.collectionDocs: dict[str, str] = {} 

264 self.datasetTypes: NamedValueSet[DatasetType] = NamedValueSet() 

265 self.dimensions: Mapping[DimensionElement, list[DimensionRecord]] = defaultdict(list) 

266 self.tagAssociations: dict[str, list[DatasetId]] = defaultdict(list) 

267 self.calibAssociations: dict[str, dict[Timespan, list[DatasetId]]] = defaultdict(dict) 

268 self.refsByFileId: dict[DatasetId, DatasetRef] = {} 

269 self.registry: SqlRegistry = registry 

270 

271 universe_version = wrapper.get("universe_version", 0) 

272 universe_namespace = wrapper.get("universe_namespace", "daf_butler") 

273 

274 # If this is data exported before the reorganization of visits 

275 # and visit systems and that new schema is in use, some filtering 

276 # will be needed. The entry in the visit dimension record will be 

277 # silently dropped when visit is created but the 

278 # visit_system_membership must be constructed. 

279 migrate_visit_system = False 

280 if ( 

281 universe_version < 2 

282 and universe_namespace == "daf_butler" 

283 and "visit_system_membership" in self.registry.dimensions 

284 ): 

285 migrate_visit_system = True 

286 

287 datasetData = [] 

288 for data in wrapper["data"]: 

289 if data["type"] == "dimension": 

290 # convert all datetime values to astropy 

291 for record in data["records"]: 

292 for key in record: 

293 # Some older YAML files were produced with native 

294 # YAML support for datetime, we support reading that 

295 # data back. Newer conversion uses _AstropyTimeToYAML 

296 # class with special YAML tag. 

297 if isinstance(record[key], datetime): 

298 record[key] = astropy.time.Time(record[key], scale="utc") 

299 element = self.registry.dimensions[data["element"]] 

300 RecordClass: type[DimensionRecord] = element.RecordClass 

301 self.dimensions[element].extend(RecordClass(**r) for r in data["records"]) 

302 

303 if data["element"] == "visit" and migrate_visit_system: 

304 # Must create the visit_system_membership records. 

305 element = self.registry.dimensions["visit_system_membership"] 

306 RecordClass = element.RecordClass 

307 self.dimensions[element].extend( 

308 RecordClass(instrument=r["instrument"], visit_system=r["visit_system"], visit=r["id"]) 

309 for r in data["records"] 

310 ) 

311 

312 elif data["type"] == "collection": 

313 collectionType = CollectionType.from_name(data["collection_type"]) 

314 if collectionType is CollectionType.RUN: 

315 self.runs[data["name"]] = ( 

316 data["host"], 

317 Timespan(begin=data["timespan_begin"], end=data["timespan_end"]), 

318 ) 

319 elif collectionType is CollectionType.CHAINED: 

320 children = [] 

321 for child in data["children"]: 

322 if not isinstance(child, str): 

323 warnings.warn( 

324 f"CHAINED collection {data['name']} includes restrictions on child " 

325 "collection searches, which are no longer suppored and will be ignored.", 

326 stacklevel=find_outside_stacklevel("lsst.daf.butler"), 

327 ) 

328 # Old form with dataset type restrictions only, 

329 # supported for backwards compatibility. 

330 child, _ = child 

331 children.append(child) 

332 self.chains[data["name"]] = children 

333 else: 

334 self.collections[data["name"]] = collectionType 

335 doc = data.get("doc") 

336 if doc is not None: 

337 self.collectionDocs[data["name"]] = doc 

338 elif data["type"] == "run": 

339 # Also support old form of saving a run with no extra info. 

340 self.runs[data["name"]] = (None, Timespan(None, None)) 

341 elif data["type"] == "dataset_type": 

342 dimensions = data["dimensions"] 

343 if migrate_visit_system and "visit" in dimensions and "visit_system" in dimensions: 

344 dimensions.remove("visit_system") 

345 self.datasetTypes.add( 

346 DatasetType( 

347 data["name"], 

348 dimensions=dimensions, 

349 storageClass=data["storage_class"], 

350 universe=self.registry.dimensions, 

351 isCalibration=data.get("is_calibration", False), 

352 ) 

353 ) 

354 elif data["type"] == "dataset": 

355 # Save raw dataset data for a second loop, so we can ensure we 

356 # know about all dataset types first. 

357 datasetData.append(data) 

358 elif data["type"] == "associations": 

359 collectionType = CollectionType.from_name(data["collection_type"]) 

360 if collectionType is CollectionType.TAGGED: 

361 self.tagAssociations[data["collection"]].extend( 

362 [x if not isinstance(x, int) else _refIntId2UUID[x] for x in data["dataset_ids"]] 

363 ) 

364 elif collectionType is CollectionType.CALIBRATION: 

365 assocsByTimespan = self.calibAssociations[data["collection"]] 

366 for d in data["validity_ranges"]: 

367 if "timespan" in d: 

368 assocsByTimespan[d["timespan"]] = [ 

369 x if not isinstance(x, int) else _refIntId2UUID[x] for x in d["dataset_ids"] 

370 ] 

371 else: 

372 # TODO: this is for backward compatibility, should 

373 # be removed at some point. 

374 assocsByTimespan[Timespan(begin=d["begin"], end=d["end"])] = [ 

375 x if not isinstance(x, int) else _refIntId2UUID[x] for x in d["dataset_ids"] 

376 ] 

377 else: 

378 raise ValueError(f"Unexpected calibration type for association: {collectionType.name}.") 

379 else: 

380 raise ValueError(f"Unexpected dictionary type: {data['type']}.") 

381 # key is (dataset type name, run) 

382 self.datasets: Mapping[tuple[str, str], list[FileDataset]] = defaultdict(list) 

383 for data in datasetData: 

384 datasetType = self.datasetTypes.get(data["dataset_type"]) 

385 if datasetType is None: 

386 datasetType = self.registry.getDatasetType(data["dataset_type"]) 

387 self.datasets[data["dataset_type"], data["run"]].extend( 

388 FileDataset( 

389 d.get("path"), 

390 [ 

391 DatasetRef( 

392 datasetType, 

393 dataId, 

394 run=data["run"], 

395 id=refid if not isinstance(refid, int) else _refIntId2UUID[refid], 

396 ) 

397 for dataId, refid in zip( 

398 ensure_iterable(d["data_id"]), ensure_iterable(d["dataset_id"]), strict=True 

399 ) 

400 ], 

401 formatter=doImportType(d.get("formatter")) if "formatter" in d else None, 

402 ) 

403 for d in data["records"] 

404 ) 

405 

406 def register(self) -> None: 

407 # Docstring inherited from RepoImportBackend.register. 

408 for datasetType in self.datasetTypes: 

409 self.registry.registerDatasetType(datasetType) 

410 for run in self.runs: 

411 self.registry.registerRun(run, doc=self.collectionDocs.get(run)) 

412 # No way to add extra run info to registry yet. 

413 for collection, collection_type in self.collections.items(): 

414 self.registry.registerCollection( 

415 collection, collection_type, doc=self.collectionDocs.get(collection) 

416 ) 

417 for chain, children in self.chains.items(): 

418 self.registry.registerCollection( 

419 chain, CollectionType.CHAINED, doc=self.collectionDocs.get(chain) 

420 ) 

421 self.registry.setCollectionChain(chain, children) 

422 

423 def load( 

424 self, 

425 datastore: Datastore | None, 

426 *, 

427 directory: ResourcePathExpression | None = None, 

428 transfer: str | None = None, 

429 skip_dimensions: set | None = None, 

430 ) -> None: 

431 # Docstring inherited from RepoImportBackend.load. 

432 for element, dimensionRecords in self.dimensions.items(): 

433 if skip_dimensions and element in skip_dimensions: 

434 continue 

435 # Using skip_existing=True here assumes that the records in the 

436 # database are either equivalent or at least preferable to the ones 

437 # being imported. It'd be ideal to check that, but that would mean 

438 # using syncDimensionData, which is not vectorized and is hence 

439 # unacceptably slo. 

440 self.registry.insertDimensionData(element, *dimensionRecords, skip_existing=True) 

441 # FileDatasets to ingest into the datastore (in bulk): 

442 fileDatasets = [] 

443 for records in self.datasets.values(): 

444 # Make a big flattened list of all data IDs and dataset_ids, while 

445 # remembering slices that associate them with the FileDataset 

446 # instances they came from. 

447 datasets: list[DatasetRef] = [] 

448 dataset_ids: list[DatasetId] = [] 

449 slices = [] 

450 for fileDataset in records: 

451 start = len(datasets) 

452 datasets.extend(fileDataset.refs) 

453 dataset_ids.extend(ref.id for ref in fileDataset.refs) 

454 stop = len(datasets) 

455 slices.append(slice(start, stop)) 

456 # Insert all of those DatasetRefs at once. 

457 # For now, we ignore the dataset_id we pulled from the file 

458 # and just insert without one to get a new autoincrement value. 

459 # Eventually (once we have origin in IDs) we'll preserve them. 

460 resolvedRefs = self.registry._importDatasets(datasets) 

461 # Populate our dictionary that maps int dataset_id values from the 

462 # export file to the new DatasetRefs 

463 for fileId, ref in zip(dataset_ids, resolvedRefs, strict=True): 

464 self.refsByFileId[fileId] = ref 

465 # Now iterate over the original records, and install the new 

466 # resolved DatasetRefs to replace the unresolved ones as we 

467 # reorganize the collection information. 

468 for sliceForFileDataset, fileDataset in zip(slices, records, strict=True): 

469 fileDataset.refs = resolvedRefs[sliceForFileDataset] 

470 if directory is not None: 

471 fileDataset.path = ResourcePath(directory, forceDirectory=True).join(fileDataset.path) 

472 fileDatasets.append(fileDataset) 

473 # Ingest everything into the datastore at once. 

474 if datastore is not None and fileDatasets: 

475 datastore.ingest(*fileDatasets, transfer=transfer) 

476 # Associate datasets with tagged collections. 

477 for collection, dataset_ids in self.tagAssociations.items(): 

478 self.registry.associate(collection, [self.refsByFileId[i] for i in dataset_ids]) 

479 # Associate datasets with calibration collections. 

480 for collection, idsByTimespan in self.calibAssociations.items(): 

481 for timespan, dataset_ids in idsByTimespan.items(): 

482 self.registry.certify(collection, [self.refsByFileId[i] for i in dataset_ids], timespan)