Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# This file is part of obs_base. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21 

22from __future__ import annotations 

23 

24__all__ = ("Instrument", "makeExposureRecordFromObsInfo", "loadCamera") 

25 

26import os.path 

27from abc import ABCMeta, abstractmethod 

28from collections import defaultdict 

29import datetime 

30from typing import Any, Optional, Set, Sequence, Tuple, TYPE_CHECKING, Union 

31from functools import lru_cache 

32 

33import astropy.time 

34 

35from lsst.afw.cameraGeom import Camera 

36from lsst.daf.butler import ( 

37 Butler, 

38 CollectionType, 

39 DataCoordinate, 

40 DataId, 

41 DatasetType, 

42 Timespan, 

43) 

44from lsst.utils import getPackageDir, doImport 

45 

46if TYPE_CHECKING: 46 ↛ 47line 46 didn't jump to line 47, because the condition on line 46 was never true

47 from .gen2to3 import TranslatorFactory 

48 from lsst.daf.butler import Registry 

49 

50# To be a standard text curated calibration means that we use a 

51# standard definition for the corresponding DatasetType. 

52StandardCuratedCalibrationDatasetTypes = { 

53 "defects": {"dimensions": ("instrument", "detector"), "storageClass": "Defects"}, 

54 "qe_curve": {"dimensions": ("instrument", "detector"), "storageClass": "QECurve"}, 

55 "crosstalk": {"dimensions": ("instrument", "detector"), "storageClass": "CrosstalkCalib"}, 

56 "linearizer": {"dimensions": ("instrument", "detector"), "storageClass": "Linearizer"}, 

57} 

58 

59 

60class Instrument(metaclass=ABCMeta): 

61 """Base class for instrument-specific logic for the Gen3 Butler. 

62 

63 Parameters 

64 ---------- 

65 collection_prefix : `str`, optional 

66 Prefix for collection names to use instead of the intrument's own name. 

67 This is primarily for use in simulated-data repositories, where the 

68 instrument name may not be necessary and/or sufficient to distinguish 

69 between collections. 

70 

71 Notes 

72 ----- 

73 Concrete instrument subclasses must have the same construction signature as 

74 the base class. 

75 """ 

76 

77 configPaths: Sequence[str] = () 

78 """Paths to config files to read for specific Tasks. 

79 

80 The paths in this list should contain files of the form `task.py`, for 

81 each of the Tasks that requires special configuration. 

82 """ 

83 

84 policyName: Optional[str] = None 

85 """Instrument specific name to use when locating a policy or configuration 

86 file in the file system.""" 

87 

88 obsDataPackage: Optional[str] = None 

89 """Name of the package containing the text curated calibration files. 

90 Usually a obs _data package. If `None` no curated calibration files 

91 will be read. (`str`)""" 

92 

93 standardCuratedDatasetTypes: Set[str] = frozenset(StandardCuratedCalibrationDatasetTypes) 

94 """The dataset types expected to be obtained from the obsDataPackage. 

95 

96 These dataset types are all required to have standard definitions and 

97 must be known to the base class. Clearing this list will prevent 

98 any of these calibrations from being stored. If a dataset type is not 

99 known to a specific instrument it can still be included in this list 

100 since the data package is the source of truth. (`set` of `str`) 

101 """ 

102 

103 additionalCuratedDatasetTypes: Set[str] = frozenset() 

104 """Curated dataset types specific to this particular instrument that do 

105 not follow the standard organization found in obs data packages. 

106 

107 These are the instrument-specific dataset types written by 

108 `writeAdditionalCuratedCalibrations` in addition to the calibrations 

109 found in obs data packages that follow the standard scheme. 

110 (`set` of `str`)""" 

111 

112 @property 

113 @abstractmethod 

114 def filterDefinitions(self): 

115 """`~lsst.obs.base.FilterDefinitionCollection`, defining the filters 

116 for this instrument. 

117 """ 

118 return None 

119 

120 def __init__(self, collection_prefix: Optional[str] = None): 

121 self.filterDefinitions.reset() 

122 self.filterDefinitions.defineFilters() 

123 if collection_prefix is None: 123 ↛ 125line 123 didn't jump to line 125, because the condition on line 123 was never false

124 collection_prefix = self.getName() 

125 self.collection_prefix = collection_prefix 

126 

127 @classmethod 

128 @abstractmethod 

129 def getName(cls): 

130 """Return the short (dimension) name for this instrument. 

131 

132 This is not (in general) the same as the class name - it's what is used 

133 as the value of the "instrument" field in data IDs, and is usually an 

134 abbreviation of the full name. 

135 """ 

136 raise NotImplementedError() 

137 

138 @classmethod 

139 @lru_cache() 

140 def getCuratedCalibrationNames(cls) -> Set[str]: 

141 """Return the names of all the curated calibration dataset types. 

142 

143 Returns 

144 ------- 

145 names : `set` of `str` 

146 The dataset type names of all curated calibrations. This will 

147 include the standard curated calibrations even if the particular 

148 instrument does not support them. 

149 

150 Notes 

151 ----- 

152 The returned list does not indicate whether a particular dataset 

153 is present in the Butler repository, simply that these are the 

154 dataset types that are handled by ``writeCuratedCalibrations``. 

155 """ 

156 

157 # Camera is a special dataset type that is also handled as a 

158 # curated calibration. 

159 curated = {"camera"} 

160 

161 # Make a cursory attempt to filter out curated dataset types 

162 # that are not present for this instrument 

163 for datasetTypeName in cls.standardCuratedDatasetTypes: 

164 calibPath = cls._getSpecificCuratedCalibrationPath(datasetTypeName) 

165 if calibPath is not None: 

166 curated.add(datasetTypeName) 

167 

168 curated.update(cls.additionalCuratedDatasetTypes) 

169 return frozenset(curated) 

170 

171 @abstractmethod 

172 def getCamera(self): 

173 """Retrieve the cameraGeom representation of this instrument. 

174 

175 This is a temporary API that should go away once ``obs`` packages have 

176 a standardized approach to writing versioned cameras to a Gen3 repo. 

177 """ 

178 raise NotImplementedError() 

179 

180 @abstractmethod 

181 def register(self, registry): 

182 """Insert instrument, physical_filter, and detector entries into a 

183 `Registry`. 

184 

185 Implementations should guarantee that registration is atomic (the 

186 registry should not be modified if any error occurs) and idempotent at 

187 the level of individual dimension entries; new detectors and filters 

188 should be added, but changes to any existing record should not be. 

189 This can generally be achieved via a block like:: 

190 

191 with registry.transaction(): 

192 registry.syncDimensionData("instrument", ...) 

193 registry.syncDimensionData("detector", ...) 

194 self.registerFilters(registry) 

195 

196 Raises 

197 ------ 

198 lsst.daf.butler.registry.ConflictingDefinitionError 

199 Raised if any existing record has the same key but a different 

200 definition as one being registered. 

201 """ 

202 raise NotImplementedError() 

203 

204 @classmethod 

205 @lru_cache() 

206 def getObsDataPackageDir(cls): 

207 """The root of the obs data package that provides specializations for 

208 this instrument. 

209 

210 returns 

211 ------- 

212 dir : `str` 

213 The root of the relevat obs data package. 

214 """ 

215 if cls.obsDataPackage is None: 

216 return None 

217 return getPackageDir(cls.obsDataPackage) 

218 

219 @staticmethod 

220 def fromName(name: str, registry: Registry, collection_prefix: Optional[str] = None) -> Instrument: 

221 """Given an instrument name and a butler, retrieve a corresponding 

222 instantiated instrument object. 

223 

224 Parameters 

225 ---------- 

226 name : `str` 

227 Name of the instrument (must match the return value of `getName`). 

228 registry : `lsst.daf.butler.Registry` 

229 Butler registry to query to find the information. 

230 collection_prefix : `str`, optional 

231 Prefix for collection names to use instead of the intrument's own 

232 name. This is primarily for use in simulated-data repositories, 

233 where the instrument name may not be necessary and/or sufficient to 

234 distinguish between collections. 

235 

236 Returns 

237 ------- 

238 instrument : `Instrument` 

239 An instance of the relevant `Instrument`. 

240 

241 Notes 

242 ----- 

243 The instrument must be registered in the corresponding butler. 

244 

245 Raises 

246 ------ 

247 LookupError 

248 Raised if the instrument is not known to the supplied registry. 

249 ModuleNotFoundError 

250 Raised if the class could not be imported. This could mean 

251 that the relevant obs package has not been setup. 

252 TypeError 

253 Raised if the class name retrieved is not a string. 

254 """ 

255 records = list(registry.queryDimensionRecords("instrument", instrument=name)) 

256 if not records: 

257 raise LookupError(f"No registered instrument with name '{name}'.") 

258 cls = records[0].class_name 

259 if not isinstance(cls, str): 

260 raise TypeError(f"Unexpected class name retrieved from {name} instrument dimension (got {cls})") 

261 instrument = doImport(cls) 

262 return instrument(collection_prefix=collection_prefix) 

263 

264 @staticmethod 

265 def importAll(registry: Registry) -> None: 

266 """Import all the instruments known to this registry. 

267 

268 This will ensure that all metadata translators have been registered. 

269 

270 Parameters 

271 ---------- 

272 registry : `lsst.daf.butler.Registry` 

273 Butler registry to query to find the information. 

274 

275 Notes 

276 ----- 

277 It is allowed for a particular instrument class to fail on import. 

278 This might simply indicate that a particular obs package has 

279 not been setup. 

280 """ 

281 records = list(registry.queryDimensionRecords("instrument")) 

282 for record in records: 

283 cls = record.class_name 

284 try: 

285 doImport(cls) 

286 except Exception: 

287 pass 

288 

289 def _registerFilters(self, registry): 

290 """Register the physical and abstract filter Dimension relationships. 

291 This should be called in the `register` implementation, within 

292 a transaction context manager block. 

293 

294 Parameters 

295 ---------- 

296 registry : `lsst.daf.butler.core.Registry` 

297 The registry to add dimensions to. 

298 """ 

299 for filter in self.filterDefinitions: 

300 # fix for undefined abstract filters causing trouble in the 

301 # registry: 

302 if filter.band is None: 

303 band = filter.physical_filter 

304 else: 

305 band = filter.band 

306 

307 registry.syncDimensionData("physical_filter", 

308 {"instrument": self.getName(), 

309 "name": filter.physical_filter, 

310 "band": band 

311 }) 

312 

313 @abstractmethod 

314 def getRawFormatter(self, dataId): 

315 """Return the Formatter class that should be used to read a particular 

316 raw file. 

317 

318 Parameters 

319 ---------- 

320 dataId : `DataCoordinate` 

321 Dimension-based ID for the raw file or files being ingested. 

322 

323 Returns 

324 ------- 

325 formatter : `Formatter` class 

326 Class to be used that reads the file into an 

327 `lsst.afw.image.Exposure` instance. 

328 """ 

329 raise NotImplementedError() 

330 

331 def applyConfigOverrides(self, name, config): 

332 """Apply instrument-specific overrides for a task config. 

333 

334 Parameters 

335 ---------- 

336 name : `str` 

337 Name of the object being configured; typically the _DefaultName 

338 of a Task. 

339 config : `lsst.pex.config.Config` 

340 Config instance to which overrides should be applied. 

341 """ 

342 for root in self.configPaths: 

343 path = os.path.join(root, f"{name}.py") 

344 if os.path.exists(path): 

345 config.load(path) 

346 

347 def writeCuratedCalibrations(self, butler: Butler, collection: Optional[str] = None, 

348 labels: Sequence[str] = ()) -> None: 

349 """Write human-curated calibration Datasets to the given Butler with 

350 the appropriate validity ranges. 

351 

352 Parameters 

353 ---------- 

354 butler : `lsst.daf.butler.Butler` 

355 Butler to use to store these calibrations. 

356 collection : `str`, optional 

357 Name to use for the calibration collection that associates all 

358 datasets with a validity range. If this collection already exists, 

359 it must be a `~CollectionType.CALIBRATION` collection, and it must 

360 not have any datasets that would conflict with those inserted by 

361 this method. If `None`, a collection name is worked out 

362 automatically from the instrument name and other metadata by 

363 calling ``makeCalibrationCollectionName``, but this 

364 default name may not work well for long-lived repositories unless 

365 ``labels`` is also provided (and changed every time curated 

366 calibrations are ingested). 

367 labels : `Sequence` [ `str` ], optional 

368 Extra strings to include in collection names, after concatenating 

369 them with the standard collection name delimeter. If provided, 

370 these are inserted into the names of the `~CollectionType.RUN` 

371 collections that datasets are inserted directly into, as well the 

372 `~CollectionType.CALIBRATION` collection if it is generated 

373 automatically (i.e. if ``collection is None``). Usually this is 

374 just the name of the ticket on which the calibration collection is 

375 being created. 

376 

377 Notes 

378 ----- 

379 Expected to be called from subclasses. The base method calls 

380 ``writeCameraGeom``, ``writeStandardTextCuratedCalibrations``, 

381 and ``writeAdditionalCuratdCalibrations``. 

382 """ 

383 # Delegate registration of collections (and creating names for them) 

384 # to other methods so they can be called independently with the same 

385 # preconditions. Collection registration is idempotent, so this is 

386 # safe, and while it adds a bit of overhead, as long as it's one 

387 # registration attempt per method (not per dataset or dataset type), 

388 # that's negligible. 

389 self.writeCameraGeom(butler, collection, labels=labels) 

390 self.writeStandardTextCuratedCalibrations(butler, collection, labels=labels) 

391 self.writeAdditionalCuratedCalibrations(butler, collection, labels=labels) 

392 

393 def writeAdditionalCuratedCalibrations(self, butler: Butler, collection: Optional[str] = None, 

394 labels: Sequence[str] = ()) -> None: 

395 """Write additional curated calibrations that might be instrument 

396 specific and are not part of the standard set. 

397 

398 Default implementation does nothing. 

399 

400 Parameters 

401 ---------- 

402 butler : `lsst.daf.butler.Butler` 

403 Butler to use to store these calibrations. 

404 collection : `str`, optional 

405 Name to use for the calibration collection that associates all 

406 datasets with a validity range. If this collection already exists, 

407 it must be a `~CollectionType.CALIBRATION` collection, and it must 

408 not have any datasets that would conflict with those inserted by 

409 this method. If `None`, a collection name is worked out 

410 automatically from the instrument name and other metadata by 

411 calling ``makeCalibrationCollectionName``, but this 

412 default name may not work well for long-lived repositories unless 

413 ``labels`` is also provided (and changed every time curated 

414 calibrations are ingested). 

415 labels : `Sequence` [ `str` ], optional 

416 Extra strings to include in collection names, after concatenating 

417 them with the standard collection name delimeter. If provided, 

418 these are inserted into the names of the `~CollectionType.RUN` 

419 collections that datasets are inserted directly into, as well the 

420 `~CollectionType.CALIBRATION` collection if it is generated 

421 automatically (i.e. if ``collection is None``). Usually this is 

422 just the name of the ticket on which the calibration collection is 

423 being created. 

424 """ 

425 return 

426 

427 def writeCameraGeom(self, butler: Butler, collection: Optional[str] = None, 

428 labels: Sequence[str] = ()) -> None: 

429 """Write the default camera geometry to the butler repository and 

430 associate it with the appropriate validity range in a calibration 

431 collection. 

432 

433 Parameters 

434 ---------- 

435 butler : `lsst.daf.butler.Butler` 

436 Butler to use to store these calibrations. 

437 collection : `str`, optional 

438 Name to use for the calibration collection that associates all 

439 datasets with a validity range. If this collection already exists, 

440 it must be a `~CollectionType.CALIBRATION` collection, and it must 

441 not have any datasets that would conflict with those inserted by 

442 this method. If `None`, a collection name is worked out 

443 automatically from the instrument name and other metadata by 

444 calling ``makeCalibrationCollectionName``, but this 

445 default name may not work well for long-lived repositories unless 

446 ``labels`` is also provided (and changed every time curated 

447 calibrations are ingested). 

448 labels : `Sequence` [ `str` ], optional 

449 Extra strings to include in collection names, after concatenating 

450 them with the standard collection name delimeter. If provided, 

451 these are inserted into the names of the `~CollectionType.RUN` 

452 collections that datasets are inserted directly into, as well the 

453 `~CollectionType.CALIBRATION` collection if it is generated 

454 automatically (i.e. if ``collection is None``). Usually this is 

455 just the name of the ticket on which the calibration collection is 

456 being created. 

457 """ 

458 if collection is None: 

459 collection = self.makeCalibrationCollectionName(*labels) 

460 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION) 

461 run = self.makeUnboundedCalibrationRunName(*labels) 

462 butler.registry.registerRun(run) 

463 datasetType = DatasetType("camera", ("instrument",), "Camera", isCalibration=True, 

464 universe=butler.registry.dimensions) 

465 butler.registry.registerDatasetType(datasetType) 

466 camera = self.getCamera() 

467 ref = butler.put(camera, datasetType, {"instrument": self.getName()}, run=run) 

468 butler.registry.certify(collection, [ref], Timespan(begin=None, end=None)) 

469 

470 def writeStandardTextCuratedCalibrations(self, butler: Butler, collection: Optional[str] = None, 

471 labels: Sequence[str] = ()) -> None: 

472 """Write the set of standardized curated text calibrations to 

473 the repository. 

474 

475 Parameters 

476 ---------- 

477 butler : `lsst.daf.butler.Butler` 

478 Butler to receive these calibration datasets. 

479 collection : `str`, optional 

480 Name to use for the calibration collection that associates all 

481 datasets with a validity range. If this collection already exists, 

482 it must be a `~CollectionType.CALIBRATION` collection, and it must 

483 not have any datasets that would conflict with those inserted by 

484 this method. If `None`, a collection name is worked out 

485 automatically from the instrument name and other metadata by 

486 calling ``makeCalibrationCollectionName``, but this 

487 default name may not work well for long-lived repositories unless 

488 ``labels`` is also provided (and changed every time curated 

489 calibrations are ingested). 

490 labels : `Sequence` [ `str` ], optional 

491 Extra strings to include in collection names, after concatenating 

492 them with the standard collection name delimeter. If provided, 

493 these are inserted into the names of the `~CollectionType.RUN` 

494 collections that datasets are inserted directly into, as well the 

495 `~CollectionType.CALIBRATION` collection if it is generated 

496 automatically (i.e. if ``collection is None``). Usually this is 

497 just the name of the ticket on which the calibration collection is 

498 being created. 

499 """ 

500 if collection is None: 

501 collection = self.makeCalibrationCollectionName(*labels) 

502 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION) 

503 runs = set() 

504 for datasetTypeName in self.standardCuratedDatasetTypes: 

505 # We need to define the dataset types. 

506 if datasetTypeName not in StandardCuratedCalibrationDatasetTypes: 

507 raise ValueError(f"DatasetType {datasetTypeName} not in understood list" 

508 f" [{'.'.join(StandardCuratedCalibrationDatasetTypes)}]") 

509 definition = StandardCuratedCalibrationDatasetTypes[datasetTypeName] 

510 datasetType = DatasetType(datasetTypeName, 

511 universe=butler.registry.dimensions, 

512 isCalibration=True, 

513 **definition) 

514 self._writeSpecificCuratedCalibrationDatasets(butler, datasetType, collection, runs=runs, 

515 labels=labels) 

516 

517 @classmethod 

518 def _getSpecificCuratedCalibrationPath(cls, datasetTypeName): 

519 """Return the path of the curated calibration directory. 

520 

521 Parameters 

522 ---------- 

523 datasetTypeName : `str` 

524 The name of the standard dataset type to find. 

525 

526 Returns 

527 ------- 

528 path : `str` 

529 The path to the standard curated data directory. `None` if the 

530 dataset type is not found or the obs data package is not 

531 available. 

532 """ 

533 if cls.getObsDataPackageDir() is None: 

534 # if there is no data package then there can't be datasets 

535 return None 

536 

537 calibPath = os.path.join(cls.getObsDataPackageDir(), cls.policyName, 

538 datasetTypeName) 

539 

540 if os.path.exists(calibPath): 

541 return calibPath 

542 

543 return None 

544 

545 def _writeSpecificCuratedCalibrationDatasets(self, butler: Butler, datasetType: DatasetType, 

546 collection: str, runs: Set[str], labels: Sequence[str]): 

547 """Write standardized curated calibration datasets for this specific 

548 dataset type from an obs data package. 

549 

550 Parameters 

551 ---------- 

552 butler : `lsst.daf.butler.Butler` 

553 Gen3 butler in which to put the calibrations. 

554 datasetType : `lsst.daf.butler.DatasetType` 

555 Dataset type to be put. 

556 collection : `str` 

557 Name of the `~CollectionType.CALIBRATION` collection that 

558 associates all datasets with validity ranges. Must have been 

559 registered prior to this call. 

560 runs : `set` [ `str` ] 

561 Names of runs that have already been registered by previous calls 

562 and need not be registered again. Should be updated by this 

563 method as new runs are registered. 

564 labels : `Sequence` [ `str` ] 

565 Extra strings to include in run names when creating them from 

566 ``CALIBDATE`` metadata, via calls to `makeCuratedCalibrationName`. 

567 Usually this is the name of the ticket on which the calibration 

568 collection is being created. 

569 

570 Notes 

571 ----- 

572 This method scans the location defined in the ``obsDataPackageDir`` 

573 class attribute for curated calibrations corresponding to the 

574 supplied dataset type. The directory name in the data package must 

575 match the name of the dataset type. They are assumed to use the 

576 standard layout and can be read by 

577 `~lsst.pipe.tasks.read_curated_calibs.read_all` and provide standard 

578 metadata. 

579 """ 

580 calibPath = self._getSpecificCuratedCalibrationPath(datasetType.name) 

581 if calibPath is None: 

582 return 

583 

584 # Register the dataset type 

585 butler.registry.registerDatasetType(datasetType) 

586 

587 # obs_base can't depend on pipe_tasks but concrete obs packages 

588 # can -- we therefore have to defer import 

589 from lsst.pipe.tasks.read_curated_calibs import read_all 

590 

591 # Read calibs, registering a new run for each CALIBDATE as needed. 

592 # We try to avoid registering runs multiple times as an optimization 

593 # by putting them in the ``runs`` set that was passed in. 

594 camera = self.getCamera() 

595 calibsDict = read_all(calibPath, camera)[0] # second return is calib type 

596 datasetRecords = [] 

597 for det in calibsDict: 

598 times = sorted([k for k in calibsDict[det]]) 

599 calibs = [calibsDict[det][time] for time in times] 

600 times = [astropy.time.Time(t, format="datetime", scale="utc") for t in times] 

601 times += [None] 

602 for calib, beginTime, endTime in zip(calibs, times[:-1], times[1:]): 

603 md = calib.getMetadata() 

604 run = self.makeCuratedCalibrationRunName(md['CALIBDATE'], *labels) 

605 if run not in runs: 

606 butler.registry.registerRun(run) 

607 runs.add(run) 

608 dataId = DataCoordinate.standardize( 

609 universe=butler.registry.dimensions, 

610 instrument=self.getName(), 

611 detector=md["DETECTOR"], 

612 ) 

613 datasetRecords.append((calib, dataId, run, Timespan(beginTime, endTime))) 

614 

615 # Second loop actually does the inserts and filesystem writes. We 

616 # first do a butler.put on each dataset, inserting it into the run for 

617 # its calibDate. We remember those refs and group them by timespan, so 

618 # we can vectorize the certify calls as much as possible. 

619 refsByTimespan = defaultdict(list) 

620 with butler.transaction(): 

621 for calib, dataId, run, timespan in datasetRecords: 

622 refsByTimespan[timespan].append(butler.put(calib, datasetType, dataId, run=run)) 

623 for timespan, refs in refsByTimespan.items(): 

624 butler.registry.certify(collection, refs, timespan) 

625 

626 @abstractmethod 

627 def makeDataIdTranslatorFactory(self) -> TranslatorFactory: 

628 """Return a factory for creating Gen2->Gen3 data ID translators, 

629 specialized for this instrument. 

630 

631 Derived class implementations should generally call 

632 `TranslatorFactory.addGenericInstrumentRules` with appropriate 

633 arguments, but are not required to (and may not be able to if their 

634 Gen2 raw data IDs are sufficiently different from the HSC/DECam/CFHT 

635 norm). 

636 

637 Returns 

638 ------- 

639 factory : `TranslatorFactory`. 

640 Factory for `Translator` objects. 

641 """ 

642 raise NotImplementedError("Must be implemented by derived classes.") 

643 

644 @staticmethod 

645 def formatCollectionTimestamp(timestamp: Union[str, datetime.datetime]) -> str: 

646 """Format a timestamp for use in a collection name. 

647 

648 Parameters 

649 ---------- 

650 timestamp : `str` or `datetime.datetime` 

651 Timestamp to format. May be a date or datetime string in extended 

652 ISO format (assumed UTC), with or without a timezone specifier, a 

653 datetime string in basic ISO format with a timezone specifier, a 

654 naive `datetime.datetime` instance (assumed UTC) or a 

655 timezone-aware `datetime.datetime` instance (converted to UTC). 

656 This is intended to cover all forms that string ``CALIBDATE`` 

657 metadata values have taken in the past, as well as the format this 

658 method itself writes out (to enable round-tripping). 

659 

660 Returns 

661 ------- 

662 formatted : `str` 

663 Standardized string form for the timestamp. 

664 """ 

665 if isinstance(timestamp, str): 

666 if "-" in timestamp: 

667 # extended ISO format, with - and : delimiters 

668 timestamp = datetime.datetime.fromisoformat(timestamp) 

669 else: 

670 # basic ISO format, with no delimiters (what this method 

671 # returns) 

672 timestamp = datetime.datetime.strptime(timestamp, "%Y%m%dT%H%M%S%z") 

673 if not isinstance(timestamp, datetime.datetime): 

674 raise TypeError(f"Unexpected date/time object: {timestamp!r}.") 

675 if timestamp.tzinfo is not None: 

676 timestamp = timestamp.astimezone(datetime.timezone.utc) 

677 return f"{timestamp:%Y%m%dT%H%M%S}Z" 

678 

679 @staticmethod 

680 def makeCollectionTimestamp() -> str: 

681 """Create a timestamp string for use in a collection name from the 

682 current time. 

683 

684 Returns 

685 ------- 

686 formatted : `str` 

687 Standardized string form of the current time. 

688 """ 

689 return Instrument.formatCollectionTimestamp(datetime.datetime.now(tz=datetime.timezone.utc)) 

690 

691 def makeDefaultRawIngestRunName(self) -> str: 

692 """Make the default instrument-specific run collection string for raw 

693 data ingest. 

694 

695 Returns 

696 ------- 

697 coll : `str` 

698 Run collection name to be used as the default for ingestion of 

699 raws. 

700 """ 

701 return self.makeCollectionName("raw", "all") 

702 

703 def makeUnboundedCalibrationRunName(self, *labels: str) -> str: 

704 """Make a RUN collection name appropriate for inserting calibration 

705 datasets whose validity ranges are unbounded. 

706 

707 Parameters 

708 ---------- 

709 *labels : `str` 

710 Extra strings to be included in the base name, using the default 

711 delimiter for collection names. Usually this is the name of the 

712 ticket on which the calibration collection is being created. 

713 

714 Returns 

715 ------- 

716 name : `str` 

717 Run collection name. 

718 """ 

719 return self.makeCollectionName("calib", *labels, "unbounded") 

720 

721 def makeCuratedCalibrationRunName(self, calibDate: str, *labels: str) -> str: 

722 """Make a RUN collection name appropriate for inserting curated 

723 calibration datasets with the given ``CALIBDATE`` metadata value. 

724 

725 Parameters 

726 ---------- 

727 calibDate : `str` 

728 The ``CALIBDATE`` metadata value. 

729 *labels : `str` 

730 Strings to be included in the collection name (before 

731 ``calibDate``, but after all other terms), using the default 

732 delimiter for collection names. Usually this is the name of the 

733 ticket on which the calibration collection is being created. 

734 

735 Returns 

736 ------- 

737 name : `str` 

738 Run collection name. 

739 """ 

740 return self.makeCollectionName("calib", *labels, "curated", self.formatCollectionTimestamp(calibDate)) 

741 

742 def makeCalibrationCollectionName(self, *labels: str) -> str: 

743 """Make a CALIBRATION collection name appropriate for associating 

744 calibration datasets with validity ranges. 

745 

746 Parameters 

747 ---------- 

748 *labels : `str` 

749 Strings to be appended to the base name, using the default 

750 delimiter for collection names. Usually this is the name of the 

751 ticket on which the calibration collection is being created. 

752 

753 Returns 

754 ------- 

755 name : `str` 

756 Calibration collection name. 

757 """ 

758 return self.makeCollectionName("calib", *labels) 

759 

760 @staticmethod 

761 def makeRefCatCollectionName(*labels: str) -> str: 

762 """Return a global (not instrument-specific) name for a collection that 

763 holds reference catalogs. 

764 

765 With no arguments, this returns the name of the collection that holds 

766 all reference catalogs (usually a ``CHAINED`` collection, at least in 

767 long-lived repos that may contain more than one reference catalog). 

768 

769 Parameters 

770 ---------- 

771 *labels : `str` 

772 Strings to be added to the global collection name, in order to 

773 define a collection name for one or more reference catalogs being 

774 ingested at the same time. 

775 

776 Returns 

777 ------- 

778 name : `str` 

779 Collection name. 

780 

781 Notes 

782 ----- 

783 This is a ``staticmethod``, not a ``classmethod``, because it should 

784 be the same for all instruments. 

785 """ 

786 return "/".join(("refcats",) + labels) 

787 

788 def makeUmbrellaCollectionName(self) -> str: 

789 """Return the name of the umbrella ``CHAINED`` collection for this 

790 instrument that combines all standard recommended input collections. 

791 

792 This method should almost never be overridden by derived classes. 

793 

794 Returns 

795 ------- 

796 name : `str` 

797 Name for the umbrella collection. 

798 """ 

799 return self.makeCollectionName("defaults") 

800 

801 def makeCollectionName(self, *labels: str) -> str: 

802 """Get the instrument-specific collection string to use as derived 

803 from the supplied labels. 

804 

805 Parameters 

806 ---------- 

807 *labels : `str` 

808 Strings to be combined with the instrument name to form a 

809 collection name. 

810 

811 Returns 

812 ------- 

813 name : `str` 

814 Collection name to use that includes the instrument's recommended 

815 prefix. 

816 """ 

817 return "/".join((self.collection_prefix,) + labels) 

818 

819 

820def makeExposureRecordFromObsInfo(obsInfo, universe): 

821 """Construct an exposure DimensionRecord from 

822 `astro_metadata_translator.ObservationInfo`. 

823 

824 Parameters 

825 ---------- 

826 obsInfo : `astro_metadata_translator.ObservationInfo` 

827 A `~astro_metadata_translator.ObservationInfo` object corresponding to 

828 the exposure. 

829 universe : `DimensionUniverse` 

830 Set of all known dimensions. 

831 

832 Returns 

833 ------- 

834 record : `DimensionRecord` 

835 A record containing exposure metadata, suitable for insertion into 

836 a `Registry`. 

837 """ 

838 dimension = universe["exposure"] 

839 

840 ra, dec, sky_angle, zenith_angle = (None, None, None, None) 

841 if obsInfo.tracking_radec is not None: 

842 icrs = obsInfo.tracking_radec.icrs 

843 ra = icrs.ra.degree 

844 dec = icrs.dec.degree 

845 if obsInfo.boresight_rotation_coord == "sky": 

846 sky_angle = obsInfo.boresight_rotation_angle.degree 

847 if obsInfo.altaz_begin is not None: 

848 zenith_angle = obsInfo.altaz_begin.zen.degree 

849 

850 return dimension.RecordClass( 

851 instrument=obsInfo.instrument, 

852 id=obsInfo.exposure_id, 

853 obs_id=obsInfo.observation_id, 

854 group_name=obsInfo.exposure_group, 

855 group_id=obsInfo.visit_id, 

856 datetime_begin=obsInfo.datetime_begin, 

857 datetime_end=obsInfo.datetime_end, 

858 exposure_time=obsInfo.exposure_time.to_value("s"), 

859 # we are not mandating that dark_time be calculable 

860 dark_time=obsInfo.dark_time.to_value("s") if obsInfo.dark_time is not None else None, 

861 observation_type=obsInfo.observation_type, 

862 observation_reason=obsInfo.observation_reason, 

863 day_obs=obsInfo.observing_day, 

864 seq_num=obsInfo.observation_counter, 

865 physical_filter=obsInfo.physical_filter, 

866 science_program=obsInfo.science_program, 

867 target_name=obsInfo.object, 

868 tracking_ra=ra, 

869 tracking_dec=dec, 

870 sky_angle=sky_angle, 

871 zenith_angle=zenith_angle, 

872 ) 

873 

874 

875def loadCamera(butler: Butler, dataId: DataId, *, collections: Any = None) -> Tuple[Camera, bool]: 

876 """Attempt to load versioned camera geometry from a butler, but fall back 

877 to obtaining a nominal camera from the `Instrument` class if that fails. 

878 

879 Parameters 

880 ---------- 

881 butler : `lsst.daf.butler.Butler` 

882 Butler instance to attempt to query for and load a ``camera`` dataset 

883 from. 

884 dataId : `dict` or `DataCoordinate` 

885 Data ID that identifies at least the ``instrument`` and ``exposure`` 

886 dimensions. 

887 collections : Any, optional 

888 Collections to be searched, overriding ``self.butler.collections``. 

889 Can be any of the types supported by the ``collections`` argument 

890 to butler construction. 

891 

892 Returns 

893 ------- 

894 camera : `lsst.afw.cameraGeom.Camera` 

895 Camera object. 

896 versioned : `bool` 

897 If `True`, the camera was obtained from the butler and should represent 

898 a versioned camera from a calibration repository. If `False`, no 

899 camera datasets were found, and the returned camera was produced by 

900 instantiating the appropriate `Instrument` class and calling 

901 `Instrument.getCamera`. 

902 """ 

903 if collections is None: 

904 collections = butler.collections 

905 # Registry would do data ID expansion internally if we didn't do it first, 

906 # but we might want an expanded data ID ourselves later, so we do it here 

907 # to ensure it only happens once. 

908 # This will also catch problems with the data ID not having keys we need. 

909 dataId = butler.registry.expandDataId(dataId, graph=butler.registry.dimensions["exposure"].graph) 

910 try: 

911 cameraRef = butler.get("camera", dataId=dataId, collections=collections) 

912 return cameraRef, True 

913 except LookupError: 

914 pass 

915 instrument = Instrument.fromName(dataId["instrument"], butler.registry) 

916 return instrument.getCamera(), False