Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# This file is part of obs_base. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21 

22from __future__ import annotations 

23 

24__all__ = ("Instrument", "makeExposureRecordFromObsInfo", "loadCamera") 

25 

26import os.path 

27from abc import ABCMeta, abstractmethod 

28from collections import defaultdict 

29import datetime 

30from typing import Any, Optional, Set, Sequence, Tuple, TYPE_CHECKING, Union 

31from functools import lru_cache 

32 

33import astropy.time 

34 

35from lsst.afw.cameraGeom import Camera 

36from lsst.daf.butler import ( 

37 Butler, 

38 CollectionType, 

39 DataCoordinate, 

40 DataId, 

41 DatasetType, 

42 Timespan, 

43) 

44from lsst.utils import getPackageDir, doImport 

45 

46if TYPE_CHECKING: 46 ↛ 47line 46 didn't jump to line 47, because the condition on line 46 was never true

47 from .gen2to3 import TranslatorFactory 

48 from lsst.daf.butler import Registry 

49 

50# To be a standard text curated calibration means that we use a 

51# standard definition for the corresponding DatasetType. 

52StandardCuratedCalibrationDatasetTypes = { 

53 "defects": {"dimensions": ("instrument", "detector"), "storageClass": "Defects"}, 

54 "qe_curve": {"dimensions": ("instrument", "detector"), "storageClass": "QECurve"}, 

55 "crosstalk": {"dimensions": ("instrument", "detector"), "storageClass": "CrosstalkCalib"}, 

56 "linearizer": {"dimensions": ("instrument", "detector"), "storageClass": "Linearizer"}, 

57} 

58 

59 

60class Instrument(metaclass=ABCMeta): 

61 """Base class for instrument-specific logic for the Gen3 Butler. 

62 

63 Concrete instrument subclasses should be directly constructable with no 

64 arguments. 

65 """ 

66 

67 configPaths: Sequence[str] = () 

68 """Paths to config files to read for specific Tasks. 

69 

70 The paths in this list should contain files of the form `task.py`, for 

71 each of the Tasks that requires special configuration. 

72 """ 

73 

74 policyName: Optional[str] = None 

75 """Instrument specific name to use when locating a policy or configuration 

76 file in the file system.""" 

77 

78 obsDataPackage: Optional[str] = None 

79 """Name of the package containing the text curated calibration files. 

80 Usually a obs _data package. If `None` no curated calibration files 

81 will be read. (`str`)""" 

82 

83 standardCuratedDatasetTypes: Set[str] = frozenset(StandardCuratedCalibrationDatasetTypes) 

84 """The dataset types expected to be obtained from the obsDataPackage. 

85 

86 These dataset types are all required to have standard definitions and 

87 must be known to the base class. Clearing this list will prevent 

88 any of these calibrations from being stored. If a dataset type is not 

89 known to a specific instrument it can still be included in this list 

90 since the data package is the source of truth. (`set` of `str`) 

91 """ 

92 

93 additionalCuratedDatasetTypes: Set[str] = frozenset() 

94 """Curated dataset types specific to this particular instrument that do 

95 not follow the standard organization found in obs data packages. 

96 

97 These are the instrument-specific dataset types written by 

98 `writeAdditionalCuratedCalibrations` in addition to the calibrations 

99 found in obs data packages that follow the standard scheme. 

100 (`set` of `str`)""" 

101 

102 @property 

103 @abstractmethod 

104 def filterDefinitions(self): 

105 """`~lsst.obs.base.FilterDefinitionCollection`, defining the filters 

106 for this instrument. 

107 """ 

108 return None 

109 

110 def __init__(self): 

111 self.filterDefinitions.reset() 

112 self.filterDefinitions.defineFilters() 

113 

114 @classmethod 

115 @abstractmethod 

116 def getName(cls): 

117 """Return the short (dimension) name for this instrument. 

118 

119 This is not (in general) the same as the class name - it's what is used 

120 as the value of the "instrument" field in data IDs, and is usually an 

121 abbreviation of the full name. 

122 """ 

123 raise NotImplementedError() 

124 

125 @classmethod 

126 @lru_cache() 

127 def getCuratedCalibrationNames(cls) -> Set[str]: 

128 """Return the names of all the curated calibration dataset types. 

129 

130 Returns 

131 ------- 

132 names : `set` of `str` 

133 The dataset type names of all curated calibrations. This will 

134 include the standard curated calibrations even if the particular 

135 instrument does not support them. 

136 

137 Notes 

138 ----- 

139 The returned list does not indicate whether a particular dataset 

140 is present in the Butler repository, simply that these are the 

141 dataset types that are handled by ``writeCuratedCalibrations``. 

142 """ 

143 

144 # Camera is a special dataset type that is also handled as a 

145 # curated calibration. 

146 curated = {"camera"} 

147 

148 # Make a cursory attempt to filter out curated dataset types 

149 # that are not present for this instrument 

150 for datasetTypeName in cls.standardCuratedDatasetTypes: 

151 calibPath = cls._getSpecificCuratedCalibrationPath(datasetTypeName) 

152 if calibPath is not None: 

153 curated.add(datasetTypeName) 

154 

155 curated.update(cls.additionalCuratedDatasetTypes) 

156 return frozenset(curated) 

157 

158 @abstractmethod 

159 def getCamera(self): 

160 """Retrieve the cameraGeom representation of this instrument. 

161 

162 This is a temporary API that should go away once ``obs`` packages have 

163 a standardized approach to writing versioned cameras to a Gen3 repo. 

164 """ 

165 raise NotImplementedError() 

166 

167 @abstractmethod 

168 def register(self, registry): 

169 """Insert instrument, physical_filter, and detector entries into a 

170 `Registry`. 

171 

172 Implementations should guarantee that registration is atomic (the 

173 registry should not be modified if any error occurs) and idempotent at 

174 the level of individual dimension entries; new detectors and filters 

175 should be added, but changes to any existing record should not be. 

176 This can generally be achieved via a block like:: 

177 

178 with registry.transaction(): 

179 registry.syncDimensionData("instrument", ...) 

180 registry.syncDimensionData("detector", ...) 

181 self.registerFilters(registry) 

182 

183 Raises 

184 ------ 

185 lsst.daf.butler.registry.ConflictingDefinitionError 

186 Raised if any existing record has the same key but a different 

187 definition as one being registered. 

188 """ 

189 raise NotImplementedError() 

190 

191 @classmethod 

192 @lru_cache() 

193 def getObsDataPackageDir(cls): 

194 """The root of the obs data package that provides specializations for 

195 this instrument. 

196 

197 returns 

198 ------- 

199 dir : `str` 

200 The root of the relevat obs data package. 

201 """ 

202 if cls.obsDataPackage is None: 

203 return None 

204 return getPackageDir(cls.obsDataPackage) 

205 

206 @staticmethod 

207 def fromName(name: str, registry: Registry) -> Instrument: 

208 """Given an instrument name and a butler, retrieve a corresponding 

209 instantiated instrument object. 

210 

211 Parameters 

212 ---------- 

213 name : `str` 

214 Name of the instrument (must match the return value of `getName`). 

215 registry : `lsst.daf.butler.Registry` 

216 Butler registry to query to find the information. 

217 

218 Returns 

219 ------- 

220 instrument : `Instrument` 

221 An instance of the relevant `Instrument`. 

222 

223 Notes 

224 ----- 

225 The instrument must be registered in the corresponding butler. 

226 

227 Raises 

228 ------ 

229 LookupError 

230 Raised if the instrument is not known to the supplied registry. 

231 ModuleNotFoundError 

232 Raised if the class could not be imported. This could mean 

233 that the relevant obs package has not been setup. 

234 TypeError 

235 Raised if the class name retrieved is not a string. 

236 """ 

237 records = list(registry.queryDimensionRecords("instrument", instrument=name)) 

238 if not records: 

239 raise LookupError(f"No registered instrument with name '{name}'.") 

240 cls = records[0].class_name 

241 if not isinstance(cls, str): 

242 raise TypeError(f"Unexpected class name retrieved from {name} instrument dimension (got {cls})") 

243 instrument = doImport(cls) 

244 return instrument() 

245 

246 @staticmethod 

247 def importAll(registry: Registry) -> None: 

248 """Import all the instruments known to this registry. 

249 

250 This will ensure that all metadata translators have been registered. 

251 

252 Parameters 

253 ---------- 

254 registry : `lsst.daf.butler.Registry` 

255 Butler registry to query to find the information. 

256 

257 Notes 

258 ----- 

259 It is allowed for a particular instrument class to fail on import. 

260 This might simply indicate that a particular obs package has 

261 not been setup. 

262 """ 

263 records = list(registry.queryDimensionRecords("instrument")) 

264 for record in records: 

265 cls = record.class_name 

266 try: 

267 doImport(cls) 

268 except Exception: 

269 pass 

270 

271 def _registerFilters(self, registry): 

272 """Register the physical and abstract filter Dimension relationships. 

273 This should be called in the `register` implementation, within 

274 a transaction context manager block. 

275 

276 Parameters 

277 ---------- 

278 registry : `lsst.daf.butler.core.Registry` 

279 The registry to add dimensions to. 

280 """ 

281 for filter in self.filterDefinitions: 

282 # fix for undefined abstract filters causing trouble in the 

283 # registry: 

284 if filter.band is None: 

285 band = filter.physical_filter 

286 else: 

287 band = filter.band 

288 

289 registry.syncDimensionData("physical_filter", 

290 {"instrument": self.getName(), 

291 "name": filter.physical_filter, 

292 "band": band 

293 }) 

294 

295 @abstractmethod 

296 def getRawFormatter(self, dataId): 

297 """Return the Formatter class that should be used to read a particular 

298 raw file. 

299 

300 Parameters 

301 ---------- 

302 dataId : `DataCoordinate` 

303 Dimension-based ID for the raw file or files being ingested. 

304 

305 Returns 

306 ------- 

307 formatter : `Formatter` class 

308 Class to be used that reads the file into an 

309 `lsst.afw.image.Exposure` instance. 

310 """ 

311 raise NotImplementedError() 

312 

313 def applyConfigOverrides(self, name, config): 

314 """Apply instrument-specific overrides for a task config. 

315 

316 Parameters 

317 ---------- 

318 name : `str` 

319 Name of the object being configured; typically the _DefaultName 

320 of a Task. 

321 config : `lsst.pex.config.Config` 

322 Config instance to which overrides should be applied. 

323 """ 

324 for root in self.configPaths: 

325 path = os.path.join(root, f"{name}.py") 

326 if os.path.exists(path): 

327 config.load(path) 

328 

329 def writeCuratedCalibrations(self, butler: Butler, collection: Optional[str] = None, 

330 labels: Sequence[str] = ()) -> None: 

331 """Write human-curated calibration Datasets to the given Butler with 

332 the appropriate validity ranges. 

333 

334 Parameters 

335 ---------- 

336 butler : `lsst.daf.butler.Butler` 

337 Butler to use to store these calibrations. 

338 collection : `str`, optional 

339 Name to use for the calibration collection that associates all 

340 datasets with a validity range. If this collection already exists, 

341 it must be a `~CollectionType.CALIBRATION` collection, and it must 

342 not have any datasets that would conflict with those inserted by 

343 this method. If `None`, a collection name is worked out 

344 automatically from the instrument name and other metadata by 

345 calling ``makeCalibrationCollectionName``, but this 

346 default name may not work well for long-lived repositories unless 

347 ``labels`` is also provided (and changed every time curated 

348 calibrations are ingested). 

349 labels : `Sequence` [ `str` ], optional 

350 Extra strings to include in collection names, after concatenating 

351 them with the standard collection name delimeter. If provided, 

352 these are inserted into the names of the `~CollectionType.RUN` 

353 collections that datasets are inserted directly into, as well the 

354 `~CollectionType.CALIBRATION` collection if it is generated 

355 automatically (i.e. if ``collection is None``). Usually this is 

356 just the name of the ticket on which the calibration collection is 

357 being created. 

358 

359 Notes 

360 ----- 

361 Expected to be called from subclasses. The base method calls 

362 ``writeCameraGeom``, ``writeStandardTextCuratedCalibrations``, 

363 and ``writeAdditionalCuratdCalibrations``. 

364 """ 

365 # Delegate registration of collections (and creating names for them) 

366 # to other methods so they can be called independently with the same 

367 # preconditions. Collection registration is idempotent, so this is 

368 # safe, and while it adds a bit of overhead, as long as it's one 

369 # registration attempt per method (not per dataset or dataset type), 

370 # that's negligible. 

371 self.writeCameraGeom(butler, collection, labels=labels) 

372 self.writeStandardTextCuratedCalibrations(butler, collection, labels=labels) 

373 self.writeAdditionalCuratedCalibrations(butler, collection, labels=labels) 

374 

375 def writeAdditionalCuratedCalibrations(self, butler: Butler, collection: Optional[str] = None, 

376 labels: Sequence[str] = ()) -> None: 

377 """Write additional curated calibrations that might be instrument 

378 specific and are not part of the standard set. 

379 

380 Default implementation does nothing. 

381 

382 Parameters 

383 ---------- 

384 butler : `lsst.daf.butler.Butler` 

385 Butler to use to store these calibrations. 

386 collection : `str`, optional 

387 Name to use for the calibration collection that associates all 

388 datasets with a validity range. If this collection already exists, 

389 it must be a `~CollectionType.CALIBRATION` collection, and it must 

390 not have any datasets that would conflict with those inserted by 

391 this method. If `None`, a collection name is worked out 

392 automatically from the instrument name and other metadata by 

393 calling ``makeCalibrationCollectionName``, but this 

394 default name may not work well for long-lived repositories unless 

395 ``labels`` is also provided (and changed every time curated 

396 calibrations are ingested). 

397 labels : `Sequence` [ `str` ], optional 

398 Extra strings to include in collection names, after concatenating 

399 them with the standard collection name delimeter. If provided, 

400 these are inserted into the names of the `~CollectionType.RUN` 

401 collections that datasets are inserted directly into, as well the 

402 `~CollectionType.CALIBRATION` collection if it is generated 

403 automatically (i.e. if ``collection is None``). Usually this is 

404 just the name of the ticket on which the calibration collection is 

405 being created. 

406 """ 

407 return 

408 

409 def writeCameraGeom(self, butler: Butler, collection: Optional[str] = None, 

410 labels: Sequence[str] = ()) -> None: 

411 """Write the default camera geometry to the butler repository and 

412 associate it with the appropriate validity range in a calibration 

413 collection. 

414 

415 Parameters 

416 ---------- 

417 butler : `lsst.daf.butler.Butler` 

418 Butler to use to store these calibrations. 

419 collection : `str`, optional 

420 Name to use for the calibration collection that associates all 

421 datasets with a validity range. If this collection already exists, 

422 it must be a `~CollectionType.CALIBRATION` collection, and it must 

423 not have any datasets that would conflict with those inserted by 

424 this method. If `None`, a collection name is worked out 

425 automatically from the instrument name and other metadata by 

426 calling ``makeCalibrationCollectionName``, but this 

427 default name may not work well for long-lived repositories unless 

428 ``labels`` is also provided (and changed every time curated 

429 calibrations are ingested). 

430 labels : `Sequence` [ `str` ], optional 

431 Extra strings to include in collection names, after concatenating 

432 them with the standard collection name delimeter. If provided, 

433 these are inserted into the names of the `~CollectionType.RUN` 

434 collections that datasets are inserted directly into, as well the 

435 `~CollectionType.CALIBRATION` collection if it is generated 

436 automatically (i.e. if ``collection is None``). Usually this is 

437 just the name of the ticket on which the calibration collection is 

438 being created. 

439 """ 

440 if collection is None: 

441 collection = self.makeCalibrationCollectionName(*labels) 

442 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION) 

443 run = self.makeUnboundedCalibrationRunName(*labels) 

444 butler.registry.registerRun(run) 

445 datasetType = DatasetType("camera", ("instrument",), "Camera", isCalibration=True, 

446 universe=butler.registry.dimensions) 

447 butler.registry.registerDatasetType(datasetType) 

448 camera = self.getCamera() 

449 ref = butler.put(camera, datasetType, {"instrument": self.getName()}, run=run) 

450 butler.registry.certify(collection, [ref], Timespan(begin=None, end=None)) 

451 

452 def writeStandardTextCuratedCalibrations(self, butler: Butler, collection: Optional[str] = None, 

453 labels: Sequence[str] = ()) -> None: 

454 """Write the set of standardized curated text calibrations to 

455 the repository. 

456 

457 Parameters 

458 ---------- 

459 butler : `lsst.daf.butler.Butler` 

460 Butler to receive these calibration datasets. 

461 collection : `str`, optional 

462 Name to use for the calibration collection that associates all 

463 datasets with a validity range. If this collection already exists, 

464 it must be a `~CollectionType.CALIBRATION` collection, and it must 

465 not have any datasets that would conflict with those inserted by 

466 this method. If `None`, a collection name is worked out 

467 automatically from the instrument name and other metadata by 

468 calling ``makeCalibrationCollectionName``, but this 

469 default name may not work well for long-lived repositories unless 

470 ``labels`` is also provided (and changed every time curated 

471 calibrations are ingested). 

472 labels : `Sequence` [ `str` ], optional 

473 Extra strings to include in collection names, after concatenating 

474 them with the standard collection name delimeter. If provided, 

475 these are inserted into the names of the `~CollectionType.RUN` 

476 collections that datasets are inserted directly into, as well the 

477 `~CollectionType.CALIBRATION` collection if it is generated 

478 automatically (i.e. if ``collection is None``). Usually this is 

479 just the name of the ticket on which the calibration collection is 

480 being created. 

481 """ 

482 if collection is None: 

483 collection = self.makeCalibrationCollectionName(*labels) 

484 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION) 

485 runs = set() 

486 for datasetTypeName in self.standardCuratedDatasetTypes: 

487 # We need to define the dataset types. 

488 if datasetTypeName not in StandardCuratedCalibrationDatasetTypes: 

489 raise ValueError(f"DatasetType {datasetTypeName} not in understood list" 

490 f" [{'.'.join(StandardCuratedCalibrationDatasetTypes)}]") 

491 definition = StandardCuratedCalibrationDatasetTypes[datasetTypeName] 

492 datasetType = DatasetType(datasetTypeName, 

493 universe=butler.registry.dimensions, 

494 isCalibration=True, 

495 **definition) 

496 self._writeSpecificCuratedCalibrationDatasets(butler, datasetType, collection, runs=runs, 

497 labels=labels) 

498 

499 @classmethod 

500 def _getSpecificCuratedCalibrationPath(cls, datasetTypeName): 

501 """Return the path of the curated calibration directory. 

502 

503 Parameters 

504 ---------- 

505 datasetTypeName : `str` 

506 The name of the standard dataset type to find. 

507 

508 Returns 

509 ------- 

510 path : `str` 

511 The path to the standard curated data directory. `None` if the 

512 dataset type is not found or the obs data package is not 

513 available. 

514 """ 

515 if cls.getObsDataPackageDir() is None: 

516 # if there is no data package then there can't be datasets 

517 return None 

518 

519 calibPath = os.path.join(cls.getObsDataPackageDir(), cls.policyName, 

520 datasetTypeName) 

521 

522 if os.path.exists(calibPath): 

523 return calibPath 

524 

525 return None 

526 

527 def _writeSpecificCuratedCalibrationDatasets(self, butler: Butler, datasetType: DatasetType, 

528 collection: str, runs: Set[str], labels: Sequence[str]): 

529 """Write standardized curated calibration datasets for this specific 

530 dataset type from an obs data package. 

531 

532 Parameters 

533 ---------- 

534 butler : `lsst.daf.butler.Butler` 

535 Gen3 butler in which to put the calibrations. 

536 datasetType : `lsst.daf.butler.DatasetType` 

537 Dataset type to be put. 

538 collection : `str` 

539 Name of the `~CollectionType.CALIBRATION` collection that 

540 associates all datasets with validity ranges. Must have been 

541 registered prior to this call. 

542 runs : `set` [ `str` ] 

543 Names of runs that have already been registered by previous calls 

544 and need not be registered again. Should be updated by this 

545 method as new runs are registered. 

546 labels : `Sequence` [ `str` ] 

547 Extra strings to include in run names when creating them from 

548 ``CALIBDATE`` metadata, via calls to `makeCuratedCalibrationName`. 

549 Usually this is the name of the ticket on which the calibration 

550 collection is being created. 

551 

552 Notes 

553 ----- 

554 This method scans the location defined in the ``obsDataPackageDir`` 

555 class attribute for curated calibrations corresponding to the 

556 supplied dataset type. The directory name in the data package must 

557 match the name of the dataset type. They are assumed to use the 

558 standard layout and can be read by 

559 `~lsst.pipe.tasks.read_curated_calibs.read_all` and provide standard 

560 metadata. 

561 """ 

562 calibPath = self._getSpecificCuratedCalibrationPath(datasetType.name) 

563 if calibPath is None: 

564 return 

565 

566 # Register the dataset type 

567 butler.registry.registerDatasetType(datasetType) 

568 

569 # obs_base can't depend on pipe_tasks but concrete obs packages 

570 # can -- we therefore have to defer import 

571 from lsst.pipe.tasks.read_curated_calibs import read_all 

572 

573 # Read calibs, registering a new run for each CALIBDATE as needed. 

574 # We try to avoid registering runs multiple times as an optimization 

575 # by putting them in the ``runs`` set that was passed in. 

576 camera = self.getCamera() 

577 calibsDict = read_all(calibPath, camera)[0] # second return is calib type 

578 datasetRecords = [] 

579 for det in calibsDict: 

580 times = sorted([k for k in calibsDict[det]]) 

581 calibs = [calibsDict[det][time] for time in times] 

582 times = [astropy.time.Time(t, format="datetime", scale="utc") for t in times] 

583 times += [None] 

584 for calib, beginTime, endTime in zip(calibs, times[:-1], times[1:]): 

585 md = calib.getMetadata() 

586 run = self.makeCuratedCalibrationRunName(md['CALIBDATE'], *labels) 

587 if run not in runs: 

588 butler.registry.registerRun(run) 

589 runs.add(run) 

590 dataId = DataCoordinate.standardize( 

591 universe=butler.registry.dimensions, 

592 instrument=self.getName(), 

593 detector=md["DETECTOR"], 

594 ) 

595 datasetRecords.append((calib, dataId, run, Timespan(beginTime, endTime))) 

596 

597 # Second loop actually does the inserts and filesystem writes. We 

598 # first do a butler.put on each dataset, inserting it into the run for 

599 # its calibDate. We remember those refs and group them by timespan, so 

600 # we can vectorize the certify calls as much as possible. 

601 refsByTimespan = defaultdict(list) 

602 with butler.transaction(): 

603 for calib, dataId, run, timespan in datasetRecords: 

604 refsByTimespan[timespan].append(butler.put(calib, datasetType, dataId, run=run)) 

605 for timespan, refs in refsByTimespan.items(): 

606 butler.registry.certify(collection, refs, timespan) 

607 

608 @abstractmethod 

609 def makeDataIdTranslatorFactory(self) -> TranslatorFactory: 

610 """Return a factory for creating Gen2->Gen3 data ID translators, 

611 specialized for this instrument. 

612 

613 Derived class implementations should generally call 

614 `TranslatorFactory.addGenericInstrumentRules` with appropriate 

615 arguments, but are not required to (and may not be able to if their 

616 Gen2 raw data IDs are sufficiently different from the HSC/DECam/CFHT 

617 norm). 

618 

619 Returns 

620 ------- 

621 factory : `TranslatorFactory`. 

622 Factory for `Translator` objects. 

623 """ 

624 raise NotImplementedError("Must be implemented by derived classes.") 

625 

626 @staticmethod 

627 def formatCollectionTimestamp(timestamp: Union[str, datetime.datetime]) -> str: 

628 """Format a timestamp for use in a collection name. 

629 

630 Parameters 

631 ---------- 

632 timestamp : `str` or `datetime.datetime` 

633 Timestamp to format. May be a date or datetime string in extended 

634 ISO format (assumed UTC), with or without a timezone specifier, a 

635 datetime string in basic ISO format with a timezone specifier, a 

636 naive `datetime.datetime` instance (assumed UTC) or a 

637 timezone-aware `datetime.datetime` instance (converted to UTC). 

638 This is intended to cover all forms that string ``CALIBDATE`` 

639 metadata values have taken in the past, as well as the format this 

640 method itself writes out (to enable round-tripping). 

641 

642 Returns 

643 ------- 

644 formatted : `str` 

645 Standardized string form for the timestamp. 

646 """ 

647 if isinstance(timestamp, str): 

648 if "-" in timestamp: 

649 # extended ISO format, with - and : delimiters 

650 timestamp = datetime.datetime.fromisoformat(timestamp) 

651 else: 

652 # basic ISO format, with no delimiters (what this method 

653 # returns) 

654 timestamp = datetime.datetime.strptime(timestamp, "%Y%m%dT%H%M%S%z") 

655 if not isinstance(timestamp, datetime.datetime): 

656 raise TypeError(f"Unexpected date/time object: {timestamp!r}.") 

657 if timestamp.tzinfo is not None: 

658 timestamp = timestamp.astimezone(datetime.timezone.utc) 

659 return f"{timestamp:%Y%m%dT%H%M%S}Z" 

660 

661 @staticmethod 

662 def makeCollectionTimestamp() -> str: 

663 """Create a timestamp string for use in a collection name from the 

664 current time. 

665 

666 Returns 

667 ------- 

668 formatted : `str` 

669 Standardized string form of the current time. 

670 """ 

671 return Instrument.formatCollectionTimestamp(datetime.datetime.now(tz=datetime.timezone.utc)) 

672 

673 @classmethod 

674 def makeDefaultRawIngestRunName(cls) -> str: 

675 """Make the default instrument-specific run collection string for raw 

676 data ingest. 

677 

678 Returns 

679 ------- 

680 coll : `str` 

681 Run collection name to be used as the default for ingestion of 

682 raws. 

683 """ 

684 return cls.makeCollectionName("raw", "all") 

685 

686 @classmethod 

687 def makeUnboundedCalibrationRunName(cls, *labels: str) -> str: 

688 """Make a RUN collection name appropriate for inserting calibration 

689 datasets whose validity ranges are unbounded. 

690 

691 Parameters 

692 ---------- 

693 *labels : `str` 

694 Extra strings to be included in the base name, using the default 

695 delimiter for collection names. Usually this is the name of the 

696 ticket on which the calibration collection is being created. 

697 

698 Returns 

699 ------- 

700 name : `str` 

701 Run collection name. 

702 """ 

703 return cls.makeCollectionName("calib", *labels, "unbounded") 

704 

705 @classmethod 

706 def makeCuratedCalibrationRunName(cls, calibDate: str, *labels: str) -> str: 

707 """Make a RUN collection name appropriate for inserting curated 

708 calibration datasets with the given ``CALIBDATE`` metadata value. 

709 

710 Parameters 

711 ---------- 

712 calibDate : `str` 

713 The ``CALIBDATE`` metadata value. 

714 *labels : `str` 

715 Strings to be included in the collection name (before 

716 ``calibDate``, but after all other terms), using the default 

717 delimiter for collection names. Usually this is the name of the 

718 ticket on which the calibration collection is being created. 

719 

720 Returns 

721 ------- 

722 name : `str` 

723 Run collection name. 

724 """ 

725 return cls.makeCollectionName("calib", *labels, "curated", cls.formatCollectionTimestamp(calibDate)) 

726 

727 @classmethod 

728 def makeCalibrationCollectionName(cls, *labels: str) -> str: 

729 """Make a CALIBRATION collection name appropriate for associating 

730 calibration datasets with validity ranges. 

731 

732 Parameters 

733 ---------- 

734 *labels : `str` 

735 Strings to be appended to the base name, using the default 

736 delimiter for collection names. Usually this is the name of the 

737 ticket on which the calibration collection is being created. 

738 

739 Returns 

740 ------- 

741 name : `str` 

742 Calibration collection name. 

743 """ 

744 return cls.makeCollectionName("calib", *labels) 

745 

746 @staticmethod 

747 def makeRefCatCollectionName(*labels: str) -> str: 

748 """Return a global (not instrument-specific) name for a collection that 

749 holds reference catalogs. 

750 

751 With no arguments, this returns the name of the collection that holds 

752 all reference catalogs (usually a ``CHAINED`` collection, at least in 

753 long-lived repos that may contain more than one reference catalog). 

754 

755 Parameters 

756 ---------- 

757 *labels : `str` 

758 Strings to be added to the global collection name, in order to 

759 define a collection name for one or more reference catalogs being 

760 ingested at the same time. 

761 

762 Returns 

763 ------- 

764 name : `str` 

765 Collection name. 

766 

767 Notes 

768 ----- 

769 This is a ``staticmethod``, not a ``classmethod``, because it should 

770 be the same for all instruments. 

771 """ 

772 return "/".join(("refcats",) + labels) 

773 

774 @classmethod 

775 def makeUmbrellaCollectionName(cls) -> str: 

776 """Return the name of the umbrella ``CHAINED`` collection for this 

777 instrument that combines all standard recommended input collections. 

778 

779 This method should almost never be overridden by derived classes. 

780 

781 Returns 

782 ------- 

783 name : `str` 

784 Name for the umbrella collection. 

785 """ 

786 return cls.makeCollectionName("defaults") 

787 

788 @classmethod 

789 def makeCollectionName(cls, *labels: str) -> str: 

790 """Get the instrument-specific collection string to use as derived 

791 from the supplied labels. 

792 

793 Parameters 

794 ---------- 

795 *labels : `str` 

796 Strings to be combined with the instrument name to form a 

797 collection name. 

798 

799 Returns 

800 ------- 

801 name : `str` 

802 Collection name to use that includes the instrument name. 

803 """ 

804 return "/".join((cls.getName(),) + labels) 

805 

806 

807def makeExposureRecordFromObsInfo(obsInfo, universe): 

808 """Construct an exposure DimensionRecord from 

809 `astro_metadata_translator.ObservationInfo`. 

810 

811 Parameters 

812 ---------- 

813 obsInfo : `astro_metadata_translator.ObservationInfo` 

814 A `~astro_metadata_translator.ObservationInfo` object corresponding to 

815 the exposure. 

816 universe : `DimensionUniverse` 

817 Set of all known dimensions. 

818 

819 Returns 

820 ------- 

821 record : `DimensionRecord` 

822 A record containing exposure metadata, suitable for insertion into 

823 a `Registry`. 

824 """ 

825 dimension = universe["exposure"] 

826 

827 ra, dec, sky_angle, zenith_angle = (None, None, None, None) 

828 if obsInfo.tracking_radec is not None: 

829 icrs = obsInfo.tracking_radec.icrs 

830 ra = icrs.ra.degree 

831 dec = icrs.dec.degree 

832 if obsInfo.boresight_rotation_coord == "sky": 

833 sky_angle = obsInfo.boresight_rotation_angle.degree 

834 if obsInfo.altaz_begin is not None: 

835 zenith_angle = obsInfo.altaz_begin.zen.degree 

836 

837 return dimension.RecordClass( 

838 instrument=obsInfo.instrument, 

839 id=obsInfo.exposure_id, 

840 obs_id=obsInfo.observation_id, 

841 group_name=obsInfo.exposure_group, 

842 group_id=obsInfo.visit_id, 

843 datetime_begin=obsInfo.datetime_begin, 

844 datetime_end=obsInfo.datetime_end, 

845 exposure_time=obsInfo.exposure_time.to_value("s"), 

846 # we are not mandating that dark_time be calculable 

847 dark_time=obsInfo.dark_time.to_value("s") if obsInfo.dark_time is not None else None, 

848 observation_type=obsInfo.observation_type, 

849 observation_reason=obsInfo.observation_reason, 

850 day_obs=obsInfo.observing_day, 

851 seq_num=obsInfo.observation_counter, 

852 physical_filter=obsInfo.physical_filter, 

853 science_program=obsInfo.science_program, 

854 target_name=obsInfo.object, 

855 tracking_ra=ra, 

856 tracking_dec=dec, 

857 sky_angle=sky_angle, 

858 zenith_angle=zenith_angle, 

859 ) 

860 

861 

862def loadCamera(butler: Butler, dataId: DataId, *, collections: Any = None) -> Tuple[Camera, bool]: 

863 """Attempt to load versioned camera geometry from a butler, but fall back 

864 to obtaining a nominal camera from the `Instrument` class if that fails. 

865 

866 Parameters 

867 ---------- 

868 butler : `lsst.daf.butler.Butler` 

869 Butler instance to attempt to query for and load a ``camera`` dataset 

870 from. 

871 dataId : `dict` or `DataCoordinate` 

872 Data ID that identifies at least the ``instrument`` and ``exposure`` 

873 dimensions. 

874 collections : Any, optional 

875 Collections to be searched, overriding ``self.butler.collections``. 

876 Can be any of the types supported by the ``collections`` argument 

877 to butler construction. 

878 

879 Returns 

880 ------- 

881 camera : `lsst.afw.cameraGeom.Camera` 

882 Camera object. 

883 versioned : `bool` 

884 If `True`, the camera was obtained from the butler and should represent 

885 a versioned camera from a calibration repository. If `False`, no 

886 camera datasets were found, and the returned camera was produced by 

887 instantiating the appropriate `Instrument` class and calling 

888 `Instrument.getCamera`. 

889 """ 

890 if collections is None: 

891 collections = butler.collections 

892 # Registry would do data ID expansion internally if we didn't do it first, 

893 # but we might want an expanded data ID ourselves later, so we do it here 

894 # to ensure it only happens once. 

895 # This will also catch problems with the data ID not having keys we need. 

896 dataId = butler.registry.expandDataId(dataId, graph=butler.registry.dimensions["exposure"].graph) 

897 try: 

898 cameraRef = butler.get("camera", dataId=dataId, collections=collections) 

899 return cameraRef, True 

900 except LookupError: 

901 pass 

902 instrument = Instrument.fromName(dataId["instrument"], butler.registry) 

903 return instrument.getCamera(), False