Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# This file is part of obs_base. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21 

22from __future__ import annotations 

23 

24__all__ = ("Instrument", "makeExposureRecordFromObsInfo", "loadCamera") 

25 

26import os.path 

27from abc import ABCMeta, abstractmethod 

28from collections import defaultdict 

29import datetime 

30from typing import Any, Optional, Set, Sequence, Tuple, TYPE_CHECKING, Union 

31from functools import lru_cache 

32 

33import astropy.time 

34 

35from lsst.afw.cameraGeom import Camera 

36from lsst.daf.butler import ( 

37 Butler, 

38 CollectionType, 

39 DataCoordinate, 

40 DataId, 

41 DatasetType, 

42 Timespan, 

43) 

44from lsst.utils import getPackageDir, doImport 

45 

46if TYPE_CHECKING: 46 ↛ 47line 46 didn't jump to line 47, because the condition on line 46 was never true

47 from .gen2to3 import TranslatorFactory 

48 from lsst.daf.butler import Registry 

49 

50# To be a standard text curated calibration means that we use a 

51# standard definition for the corresponding DatasetType. 

52StandardCuratedCalibrationDatasetTypes = { 

53 "defects": {"dimensions": ("instrument", "detector"), "storageClass": "Defects"}, 

54 "qe_curve": {"dimensions": ("instrument", "detector"), "storageClass": "QECurve"}, 

55 "crosstalk": {"dimensions": ("instrument", "detector"), "storageClass": "CrosstalkCalib"}, 

56} 

57 

58 

59class Instrument(metaclass=ABCMeta): 

60 """Base class for instrument-specific logic for the Gen3 Butler. 

61 

62 Concrete instrument subclasses should be directly constructable with no 

63 arguments. 

64 """ 

65 

66 configPaths: Sequence[str] = () 

67 """Paths to config files to read for specific Tasks. 

68 

69 The paths in this list should contain files of the form `task.py`, for 

70 each of the Tasks that requires special configuration. 

71 """ 

72 

73 policyName: Optional[str] = None 

74 """Instrument specific name to use when locating a policy or configuration 

75 file in the file system.""" 

76 

77 obsDataPackage: Optional[str] = None 

78 """Name of the package containing the text curated calibration files. 

79 Usually a obs _data package. If `None` no curated calibration files 

80 will be read. (`str`)""" 

81 

82 standardCuratedDatasetTypes: Set[str] = frozenset(StandardCuratedCalibrationDatasetTypes) 

83 """The dataset types expected to be obtained from the obsDataPackage. 

84 

85 These dataset types are all required to have standard definitions and 

86 must be known to the base class. Clearing this list will prevent 

87 any of these calibrations from being stored. If a dataset type is not 

88 known to a specific instrument it can still be included in this list 

89 since the data package is the source of truth. (`set` of `str`) 

90 """ 

91 

92 additionalCuratedDatasetTypes: Set[str] = frozenset() 

93 """Curated dataset types specific to this particular instrument that do 

94 not follow the standard organization found in obs data packages. 

95 

96 These are the instrument-specific dataset types written by 

97 `writeAdditionalCuratedCalibrations` in addition to the calibrations 

98 found in obs data packages that follow the standard scheme. 

99 (`set` of `str`)""" 

100 

101 @property 

102 @abstractmethod 

103 def filterDefinitions(self): 

104 """`~lsst.obs.base.FilterDefinitionCollection`, defining the filters 

105 for this instrument. 

106 """ 

107 return None 

108 

109 def __init__(self): 

110 self.filterDefinitions.reset() 

111 self.filterDefinitions.defineFilters() 

112 

113 @classmethod 

114 @abstractmethod 

115 def getName(cls): 

116 """Return the short (dimension) name for this instrument. 

117 

118 This is not (in general) the same as the class name - it's what is used 

119 as the value of the "instrument" field in data IDs, and is usually an 

120 abbreviation of the full name. 

121 """ 

122 raise NotImplementedError() 

123 

124 @classmethod 

125 @lru_cache() 

126 def getCuratedCalibrationNames(cls) -> Set[str]: 

127 """Return the names of all the curated calibration dataset types. 

128 

129 Returns 

130 ------- 

131 names : `set` of `str` 

132 The dataset type names of all curated calibrations. This will 

133 include the standard curated calibrations even if the particular 

134 instrument does not support them. 

135 

136 Notes 

137 ----- 

138 The returned list does not indicate whether a particular dataset 

139 is present in the Butler repository, simply that these are the 

140 dataset types that are handled by ``writeCuratedCalibrations``. 

141 """ 

142 

143 # Camera is a special dataset type that is also handled as a 

144 # curated calibration. 

145 curated = {"camera"} 

146 

147 # Make a cursory attempt to filter out curated dataset types 

148 # that are not present for this instrument 

149 for datasetTypeName in cls.standardCuratedDatasetTypes: 

150 calibPath = cls._getSpecificCuratedCalibrationPath(datasetTypeName) 

151 if calibPath is not None: 

152 curated.add(datasetTypeName) 

153 

154 curated.update(cls.additionalCuratedDatasetTypes) 

155 return frozenset(curated) 

156 

157 @abstractmethod 

158 def getCamera(self): 

159 """Retrieve the cameraGeom representation of this instrument. 

160 

161 This is a temporary API that should go away once ``obs`` packages have 

162 a standardized approach to writing versioned cameras to a Gen3 repo. 

163 """ 

164 raise NotImplementedError() 

165 

166 @abstractmethod 

167 def register(self, registry): 

168 """Insert instrument, physical_filter, and detector entries into a 

169 `Registry`. 

170 

171 Implementations should guarantee that registration is atomic (the 

172 registry should not be modified if any error occurs) and idempotent at 

173 the level of individual dimension entries; new detectors and filters 

174 should be added, but changes to any existing record should not be. 

175 This can generally be achieved via a block like:: 

176 

177 with registry.transaction(): 

178 registry.syncDimensionData("instrument", ...) 

179 registry.syncDimensionData("detector", ...) 

180 self.registerFilters(registry) 

181 

182 Raises 

183 ------ 

184 lsst.daf.butler.registry.ConflictingDefinitionError 

185 Raised if any existing record has the same key but a different 

186 definition as one being registered. 

187 """ 

188 raise NotImplementedError() 

189 

190 @classmethod 

191 @lru_cache() 

192 def getObsDataPackageDir(cls): 

193 """The root of the obs data package that provides specializations for 

194 this instrument. 

195 

196 returns 

197 ------- 

198 dir : `str` 

199 The root of the relevat obs data package. 

200 """ 

201 if cls.obsDataPackage is None: 

202 return None 

203 return getPackageDir(cls.obsDataPackage) 

204 

205 @staticmethod 

206 def fromName(name: str, registry: Registry) -> Instrument: 

207 """Given an instrument name and a butler, retrieve a corresponding 

208 instantiated instrument object. 

209 

210 Parameters 

211 ---------- 

212 name : `str` 

213 Name of the instrument (must match the return value of `getName`). 

214 registry : `lsst.daf.butler.Registry` 

215 Butler registry to query to find the information. 

216 

217 Returns 

218 ------- 

219 instrument : `Instrument` 

220 An instance of the relevant `Instrument`. 

221 

222 Notes 

223 ----- 

224 The instrument must be registered in the corresponding butler. 

225 

226 Raises 

227 ------ 

228 LookupError 

229 Raised if the instrument is not known to the supplied registry. 

230 ModuleNotFoundError 

231 Raised if the class could not be imported. This could mean 

232 that the relevant obs package has not been setup. 

233 TypeError 

234 Raised if the class name retrieved is not a string. 

235 """ 

236 records = list(registry.queryDimensionRecords("instrument", instrument=name)) 

237 if not records: 

238 raise LookupError(f"No registered instrument with name '{name}'.") 

239 cls = records[0].class_name 

240 if not isinstance(cls, str): 

241 raise TypeError(f"Unexpected class name retrieved from {name} instrument dimension (got {cls})") 

242 instrument = doImport(cls) 

243 return instrument() 

244 

245 @staticmethod 

246 def importAll(registry: Registry) -> None: 

247 """Import all the instruments known to this registry. 

248 

249 This will ensure that all metadata translators have been registered. 

250 

251 Parameters 

252 ---------- 

253 registry : `lsst.daf.butler.Registry` 

254 Butler registry to query to find the information. 

255 

256 Notes 

257 ----- 

258 It is allowed for a particular instrument class to fail on import. 

259 This might simply indicate that a particular obs package has 

260 not been setup. 

261 """ 

262 records = list(registry.queryDimensionRecords("instrument")) 

263 for record in records: 

264 cls = record.class_name 

265 try: 

266 doImport(cls) 

267 except Exception: 

268 pass 

269 

270 def _registerFilters(self, registry): 

271 """Register the physical and abstract filter Dimension relationships. 

272 This should be called in the `register` implementation, within 

273 a transaction context manager block. 

274 

275 Parameters 

276 ---------- 

277 registry : `lsst.daf.butler.core.Registry` 

278 The registry to add dimensions to. 

279 """ 

280 for filter in self.filterDefinitions: 

281 # fix for undefined abstract filters causing trouble in the 

282 # registry: 

283 if filter.band is None: 

284 band = filter.physical_filter 

285 else: 

286 band = filter.band 

287 

288 registry.syncDimensionData("physical_filter", 

289 {"instrument": self.getName(), 

290 "name": filter.physical_filter, 

291 "band": band 

292 }) 

293 

294 @abstractmethod 

295 def getRawFormatter(self, dataId): 

296 """Return the Formatter class that should be used to read a particular 

297 raw file. 

298 

299 Parameters 

300 ---------- 

301 dataId : `DataCoordinate` 

302 Dimension-based ID for the raw file or files being ingested. 

303 

304 Returns 

305 ------- 

306 formatter : `Formatter` class 

307 Class to be used that reads the file into an 

308 `lsst.afw.image.Exposure` instance. 

309 """ 

310 raise NotImplementedError() 

311 

312 def applyConfigOverrides(self, name, config): 

313 """Apply instrument-specific overrides for a task config. 

314 

315 Parameters 

316 ---------- 

317 name : `str` 

318 Name of the object being configured; typically the _DefaultName 

319 of a Task. 

320 config : `lsst.pex.config.Config` 

321 Config instance to which overrides should be applied. 

322 """ 

323 for root in self.configPaths: 

324 path = os.path.join(root, f"{name}.py") 

325 if os.path.exists(path): 

326 config.load(path) 

327 

328 def writeCuratedCalibrations(self, butler: Butler, collection: Optional[str] = None, 

329 labels: Sequence[str] = ()) -> None: 

330 """Write human-curated calibration Datasets to the given Butler with 

331 the appropriate validity ranges. 

332 

333 Parameters 

334 ---------- 

335 butler : `lsst.daf.butler.Butler` 

336 Butler to use to store these calibrations. 

337 collection : `str`, optional 

338 Name to use for the calibration collection that associates all 

339 datasets with a validity range. If this collection already exists, 

340 it must be a `~CollectionType.CALIBRATION` collection, and it must 

341 not have any datasets that would conflict with those inserted by 

342 this method. If `None`, a collection name is worked out 

343 automatically from the instrument name and other metadata by 

344 calling ``makeCalibrationCollectionName``, but this 

345 default name may not work well for long-lived repositories unless 

346 ``labels`` is also provided (and changed every time curated 

347 calibrations are ingested). 

348 labels : `Sequence` [ `str` ], optional 

349 Extra strings to include in collection names, after concatenating 

350 them with the standard collection name delimeter. If provided, 

351 these are inserted into the names of the `~CollectionType.RUN` 

352 collections that datasets are inserted directly into, as well the 

353 `~CollectionType.CALIBRATION` collection if it is generated 

354 automatically (i.e. if ``collection is None``). Usually this is 

355 just the name of the ticket on which the calibration collection is 

356 being created. 

357 

358 Notes 

359 ----- 

360 Expected to be called from subclasses. The base method calls 

361 ``writeCameraGeom``, ``writeStandardTextCuratedCalibrations``, 

362 and ``writeAdditionalCuratdCalibrations``. 

363 """ 

364 # Delegate registration of collections (and creating names for them) 

365 # to other methods so they can be called independently with the same 

366 # preconditions. Collection registration is idempotent, so this is 

367 # safe, and while it adds a bit of overhead, as long as it's one 

368 # registration attempt per method (not per dataset or dataset type), 

369 # that's negligible. 

370 self.writeCameraGeom(butler, collection, labels=labels) 

371 self.writeStandardTextCuratedCalibrations(butler, collection, labels=labels) 

372 self.writeAdditionalCuratedCalibrations(butler, collection, labels=labels) 

373 

374 def writeAdditionalCuratedCalibrations(self, butler: Butler, collection: Optional[str] = None, 

375 labels: Sequence[str] = ()) -> None: 

376 """Write additional curated calibrations that might be instrument 

377 specific and are not part of the standard set. 

378 

379 Default implementation does nothing. 

380 

381 Parameters 

382 ---------- 

383 butler : `lsst.daf.butler.Butler` 

384 Butler to use to store these calibrations. 

385 collection : `str`, optional 

386 Name to use for the calibration collection that associates all 

387 datasets with a validity range. If this collection already exists, 

388 it must be a `~CollectionType.CALIBRATION` collection, and it must 

389 not have any datasets that would conflict with those inserted by 

390 this method. If `None`, a collection name is worked out 

391 automatically from the instrument name and other metadata by 

392 calling ``makeCalibrationCollectionName``, but this 

393 default name may not work well for long-lived repositories unless 

394 ``labels`` is also provided (and changed every time curated 

395 calibrations are ingested). 

396 labels : `Sequence` [ `str` ], optional 

397 Extra strings to include in collection names, after concatenating 

398 them with the standard collection name delimeter. If provided, 

399 these are inserted into the names of the `~CollectionType.RUN` 

400 collections that datasets are inserted directly into, as well the 

401 `~CollectionType.CALIBRATION` collection if it is generated 

402 automatically (i.e. if ``collection is None``). Usually this is 

403 just the name of the ticket on which the calibration collection is 

404 being created. 

405 """ 

406 return 

407 

408 def writeCameraGeom(self, butler: Butler, collection: Optional[str] = None, 

409 labels: Sequence[str] = ()) -> None: 

410 """Write the default camera geometry to the butler repository and 

411 associate it with the appropriate validity range in a calibration 

412 collection. 

413 

414 Parameters 

415 ---------- 

416 butler : `lsst.daf.butler.Butler` 

417 Butler to use to store these calibrations. 

418 collection : `str`, optional 

419 Name to use for the calibration collection that associates all 

420 datasets with a validity range. If this collection already exists, 

421 it must be a `~CollectionType.CALIBRATION` collection, and it must 

422 not have any datasets that would conflict with those inserted by 

423 this method. If `None`, a collection name is worked out 

424 automatically from the instrument name and other metadata by 

425 calling ``makeCalibrationCollectionName``, but this 

426 default name may not work well for long-lived repositories unless 

427 ``labels`` is also provided (and changed every time curated 

428 calibrations are ingested). 

429 labels : `Sequence` [ `str` ], optional 

430 Extra strings to include in collection names, after concatenating 

431 them with the standard collection name delimeter. If provided, 

432 these are inserted into the names of the `~CollectionType.RUN` 

433 collections that datasets are inserted directly into, as well the 

434 `~CollectionType.CALIBRATION` collection if it is generated 

435 automatically (i.e. if ``collection is None``). Usually this is 

436 just the name of the ticket on which the calibration collection is 

437 being created. 

438 """ 

439 if collection is None: 

440 collection = self.makeCalibrationCollectionName(*labels) 

441 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION) 

442 run = self.makeUnboundedCalibrationRunName(*labels) 

443 butler.registry.registerRun(run) 

444 datasetType = DatasetType("camera", ("instrument",), "Camera", isCalibration=True, 

445 universe=butler.registry.dimensions) 

446 butler.registry.registerDatasetType(datasetType) 

447 camera = self.getCamera() 

448 ref = butler.put(camera, datasetType, {"instrument": self.getName()}, run=run) 

449 butler.registry.certify(collection, [ref], Timespan(begin=None, end=None)) 

450 

451 def writeStandardTextCuratedCalibrations(self, butler: Butler, collection: Optional[str] = None, 

452 labels: Sequence[str] = ()) -> None: 

453 """Write the set of standardized curated text calibrations to 

454 the repository. 

455 

456 Parameters 

457 ---------- 

458 butler : `lsst.daf.butler.Butler` 

459 Butler to receive these calibration datasets. 

460 collection : `str`, optional 

461 Name to use for the calibration collection that associates all 

462 datasets with a validity range. If this collection already exists, 

463 it must be a `~CollectionType.CALIBRATION` collection, and it must 

464 not have any datasets that would conflict with those inserted by 

465 this method. If `None`, a collection name is worked out 

466 automatically from the instrument name and other metadata by 

467 calling ``makeCalibrationCollectionName``, but this 

468 default name may not work well for long-lived repositories unless 

469 ``labels`` is also provided (and changed every time curated 

470 calibrations are ingested). 

471 labels : `Sequence` [ `str` ], optional 

472 Extra strings to include in collection names, after concatenating 

473 them with the standard collection name delimeter. If provided, 

474 these are inserted into the names of the `~CollectionType.RUN` 

475 collections that datasets are inserted directly into, as well the 

476 `~CollectionType.CALIBRATION` collection if it is generated 

477 automatically (i.e. if ``collection is None``). Usually this is 

478 just the name of the ticket on which the calibration collection is 

479 being created. 

480 """ 

481 if collection is None: 

482 collection = self.makeCalibrationCollectionName(*labels) 

483 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION) 

484 runs = set() 

485 for datasetTypeName in self.standardCuratedDatasetTypes: 

486 # We need to define the dataset types. 

487 if datasetTypeName not in StandardCuratedCalibrationDatasetTypes: 

488 raise ValueError(f"DatasetType {datasetTypeName} not in understood list" 

489 f" [{'.'.join(StandardCuratedCalibrationDatasetTypes)}]") 

490 definition = StandardCuratedCalibrationDatasetTypes[datasetTypeName] 

491 datasetType = DatasetType(datasetTypeName, 

492 universe=butler.registry.dimensions, 

493 isCalibration=True, 

494 **definition) 

495 self._writeSpecificCuratedCalibrationDatasets(butler, datasetType, collection, runs=runs, 

496 labels=labels) 

497 

498 @classmethod 

499 def _getSpecificCuratedCalibrationPath(cls, datasetTypeName): 

500 """Return the path of the curated calibration directory. 

501 

502 Parameters 

503 ---------- 

504 datasetTypeName : `str` 

505 The name of the standard dataset type to find. 

506 

507 Returns 

508 ------- 

509 path : `str` 

510 The path to the standard curated data directory. `None` if the 

511 dataset type is not found or the obs data package is not 

512 available. 

513 """ 

514 if cls.getObsDataPackageDir() is None: 

515 # if there is no data package then there can't be datasets 

516 return None 

517 

518 calibPath = os.path.join(cls.getObsDataPackageDir(), cls.policyName, 

519 datasetTypeName) 

520 

521 if os.path.exists(calibPath): 

522 return calibPath 

523 

524 return None 

525 

526 def _writeSpecificCuratedCalibrationDatasets(self, butler: Butler, datasetType: DatasetType, 

527 collection: str, runs: Set[str], labels: Sequence[str]): 

528 """Write standardized curated calibration datasets for this specific 

529 dataset type from an obs data package. 

530 

531 Parameters 

532 ---------- 

533 butler : `lsst.daf.butler.Butler` 

534 Gen3 butler in which to put the calibrations. 

535 datasetType : `lsst.daf.butler.DatasetType` 

536 Dataset type to be put. 

537 collection : `str` 

538 Name of the `~CollectionType.CALIBRATION` collection that 

539 associates all datasets with validity ranges. Must have been 

540 registered prior to this call. 

541 runs : `set` [ `str` ] 

542 Names of runs that have already been registered by previous calls 

543 and need not be registered again. Should be updated by this 

544 method as new runs are registered. 

545 labels : `Sequence` [ `str` ] 

546 Extra strings to include in run names when creating them from 

547 ``CALIBDATE`` metadata, via calls to `makeCuratedCalibrationName`. 

548 Usually this is the name of the ticket on which the calibration 

549 collection is being created. 

550 

551 Notes 

552 ----- 

553 This method scans the location defined in the ``obsDataPackageDir`` 

554 class attribute for curated calibrations corresponding to the 

555 supplied dataset type. The directory name in the data package must 

556 match the name of the dataset type. They are assumed to use the 

557 standard layout and can be read by 

558 `~lsst.pipe.tasks.read_curated_calibs.read_all` and provide standard 

559 metadata. 

560 """ 

561 calibPath = self._getSpecificCuratedCalibrationPath(datasetType.name) 

562 if calibPath is None: 

563 return 

564 

565 # Register the dataset type 

566 butler.registry.registerDatasetType(datasetType) 

567 

568 # obs_base can't depend on pipe_tasks but concrete obs packages 

569 # can -- we therefore have to defer import 

570 from lsst.pipe.tasks.read_curated_calibs import read_all 

571 

572 # Read calibs, registering a new run for each CALIBDATE as needed. 

573 # We try to avoid registering runs multiple times as an optimization 

574 # by putting them in the ``runs`` set that was passed in. 

575 camera = self.getCamera() 

576 calibsDict = read_all(calibPath, camera)[0] # second return is calib type 

577 datasetRecords = [] 

578 for det in calibsDict: 

579 times = sorted([k for k in calibsDict[det]]) 

580 calibs = [calibsDict[det][time] for time in times] 

581 times = [astropy.time.Time(t, format="datetime", scale="utc") for t in times] 

582 times += [None] 

583 for calib, beginTime, endTime in zip(calibs, times[:-1], times[1:]): 

584 md = calib.getMetadata() 

585 run = self.makeCuratedCalibrationRunName(md['CALIBDATE'], *labels) 

586 if run not in runs: 

587 butler.registry.registerRun(run) 

588 runs.add(run) 

589 dataId = DataCoordinate.standardize( 

590 universe=butler.registry.dimensions, 

591 instrument=self.getName(), 

592 detector=md["DETECTOR"], 

593 ) 

594 datasetRecords.append((calib, dataId, run, Timespan(beginTime, endTime))) 

595 

596 # Second loop actually does the inserts and filesystem writes. We 

597 # first do a butler.put on each dataset, inserting it into the run for 

598 # its calibDate. We remember those refs and group them by timespan, so 

599 # we can vectorize the certify calls as much as possible. 

600 refsByTimespan = defaultdict(list) 

601 with butler.transaction(): 

602 for calib, dataId, run, timespan in datasetRecords: 

603 refsByTimespan[timespan].append(butler.put(calib, datasetType, dataId, run=run)) 

604 for timespan, refs in refsByTimespan.items(): 

605 butler.registry.certify(collection, refs, timespan) 

606 

607 @abstractmethod 

608 def makeDataIdTranslatorFactory(self) -> TranslatorFactory: 

609 """Return a factory for creating Gen2->Gen3 data ID translators, 

610 specialized for this instrument. 

611 

612 Derived class implementations should generally call 

613 `TranslatorFactory.addGenericInstrumentRules` with appropriate 

614 arguments, but are not required to (and may not be able to if their 

615 Gen2 raw data IDs are sufficiently different from the HSC/DECam/CFHT 

616 norm). 

617 

618 Returns 

619 ------- 

620 factory : `TranslatorFactory`. 

621 Factory for `Translator` objects. 

622 """ 

623 raise NotImplementedError("Must be implemented by derived classes.") 

624 

625 @staticmethod 

626 def formatCollectionTimestamp(timestamp: Union[str, datetime.datetime]) -> str: 

627 """Format a timestamp for use in a collection name. 

628 

629 Parameters 

630 ---------- 

631 timestamp : `str` or `datetime.datetime` 

632 Timestamp to format. May be a date or datetime string in extended 

633 ISO format (assumed UTC), with or without a timezone specifier, a 

634 datetime string in basic ISO format with a timezone specifier, a 

635 naive `datetime.datetime` instance (assumed UTC) or a 

636 timezone-aware `datetime.datetime` instance (converted to UTC). 

637 This is intended to cover all forms that string ``CALIBDATE`` 

638 metadata values have taken in the past, as well as the format this 

639 method itself writes out (to enable round-tripping). 

640 

641 Returns 

642 ------- 

643 formatted : `str` 

644 Standardized string form for the timestamp. 

645 """ 

646 if isinstance(timestamp, str): 

647 if "-" in timestamp: 

648 # extended ISO format, with - and : delimiters 

649 timestamp = datetime.datetime.fromisoformat(timestamp) 

650 else: 

651 # basic ISO format, with no delimiters (what this method 

652 # returns) 

653 timestamp = datetime.datetime.strptime(timestamp, "%Y%m%dT%H%M%S%z") 

654 if not isinstance(timestamp, datetime.datetime): 

655 raise TypeError(f"Unexpected date/time object: {timestamp!r}.") 

656 if timestamp.tzinfo is not None: 

657 timestamp = timestamp.astimezone(datetime.timezone.utc) 

658 return f"{timestamp:%Y%m%dT%H%M%S}Z" 

659 

660 @staticmethod 

661 def makeCollectionTimestamp() -> str: 

662 """Create a timestamp string for use in a collection name from the 

663 current time. 

664 

665 Returns 

666 ------- 

667 formatted : `str` 

668 Standardized string form of the current time. 

669 """ 

670 return Instrument.formatCollectionTimestamp(datetime.datetime.now(tz=datetime.timezone.utc)) 

671 

672 @classmethod 

673 def makeDefaultRawIngestRunName(cls) -> str: 

674 """Make the default instrument-specific run collection string for raw 

675 data ingest. 

676 

677 Returns 

678 ------- 

679 coll : `str` 

680 Run collection name to be used as the default for ingestion of 

681 raws. 

682 """ 

683 return cls.makeCollectionName("raw", "all") 

684 

685 @classmethod 

686 def makeUnboundedCalibrationRunName(cls, *labels: str) -> str: 

687 """Make a RUN collection name appropriate for inserting calibration 

688 datasets whose validity ranges are unbounded. 

689 

690 Parameters 

691 ---------- 

692 *labels : `str` 

693 Extra strings to be included in the base name, using the default 

694 delimiter for collection names. Usually this is the name of the 

695 ticket on which the calibration collection is being created. 

696 

697 Returns 

698 ------- 

699 name : `str` 

700 Run collection name. 

701 """ 

702 return cls.makeCollectionName("calib", *labels, "unbounded") 

703 

704 @classmethod 

705 def makeCuratedCalibrationRunName(cls, calibDate: str, *labels: str) -> str: 

706 """Make a RUN collection name appropriate for inserting curated 

707 calibration datasets with the given ``CALIBDATE`` metadata value. 

708 

709 Parameters 

710 ---------- 

711 calibDate : `str` 

712 The ``CALIBDATE`` metadata value. 

713 *labels : `str` 

714 Strings to be included in the collection name (before 

715 ``calibDate``, but after all other terms), using the default 

716 delimiter for collection names. Usually this is the name of the 

717 ticket on which the calibration collection is being created. 

718 

719 Returns 

720 ------- 

721 name : `str` 

722 Run collection name. 

723 """ 

724 return cls.makeCollectionName("calib", *labels, "curated", cls.formatCollectionTimestamp(calibDate)) 

725 

726 @classmethod 

727 def makeCalibrationCollectionName(cls, *labels: str) -> str: 

728 """Make a CALIBRATION collection name appropriate for associating 

729 calibration datasets with validity ranges. 

730 

731 Parameters 

732 ---------- 

733 *labels : `str` 

734 Strings to be appended to the base name, using the default 

735 delimiter for collection names. Usually this is the name of the 

736 ticket on which the calibration collection is being created. 

737 

738 Returns 

739 ------- 

740 name : `str` 

741 Calibration collection name. 

742 """ 

743 return cls.makeCollectionName("calib", *labels) 

744 

745 @staticmethod 

746 def makeRefCatCollectionName(*labels: str) -> str: 

747 """Return a global (not instrument-specific) name for a collection that 

748 holds reference catalogs. 

749 

750 With no arguments, this returns the name of the collection that holds 

751 all reference catalogs (usually a ``CHAINED`` collection, at least in 

752 long-lived repos that may contain more than one reference catalog). 

753 

754 Parameters 

755 ---------- 

756 *labels : `str` 

757 Strings to be added to the global collection name, in order to 

758 define a collection name for one or more reference catalogs being 

759 ingested at the same time. 

760 

761 Returns 

762 ------- 

763 name : `str` 

764 Collection name. 

765 

766 Notes 

767 ----- 

768 This is a ``staticmethod``, not a ``classmethod``, because it should 

769 be the same for all instruments. 

770 """ 

771 return "/".join(("refcats",) + labels) 

772 

773 @classmethod 

774 def makeUmbrellaCollectionName(cls) -> str: 

775 """Return the name of the umbrella ``CHAINED`` collection for this 

776 instrument that combines all standard recommended input collections. 

777 

778 This method should almost never be overridden by derived classes. 

779 

780 Returns 

781 ------- 

782 name : `str` 

783 Name for the umbrella collection. 

784 """ 

785 return cls.makeCollectionName("defaults") 

786 

787 @classmethod 

788 def makeCollectionName(cls, *labels: str) -> str: 

789 """Get the instrument-specific collection string to use as derived 

790 from the supplied labels. 

791 

792 Parameters 

793 ---------- 

794 *labels : `str` 

795 Strings to be combined with the instrument name to form a 

796 collection name. 

797 

798 Returns 

799 ------- 

800 name : `str` 

801 Collection name to use that includes the instrument name. 

802 """ 

803 return "/".join((cls.getName(),) + labels) 

804 

805 

806def makeExposureRecordFromObsInfo(obsInfo, universe): 

807 """Construct an exposure DimensionRecord from 

808 `astro_metadata_translator.ObservationInfo`. 

809 

810 Parameters 

811 ---------- 

812 obsInfo : `astro_metadata_translator.ObservationInfo` 

813 A `~astro_metadata_translator.ObservationInfo` object corresponding to 

814 the exposure. 

815 universe : `DimensionUniverse` 

816 Set of all known dimensions. 

817 

818 Returns 

819 ------- 

820 record : `DimensionRecord` 

821 A record containing exposure metadata, suitable for insertion into 

822 a `Registry`. 

823 """ 

824 dimension = universe["exposure"] 

825 

826 ra, dec, sky_angle, zenith_angle = (None, None, None, None) 

827 if obsInfo.tracking_radec is not None: 

828 icrs = obsInfo.tracking_radec.icrs 

829 ra = icrs.ra.degree 

830 dec = icrs.dec.degree 

831 if obsInfo.boresight_rotation_coord == "sky": 

832 sky_angle = obsInfo.boresight_rotation_angle.degree 

833 if obsInfo.altaz_begin is not None: 

834 zenith_angle = obsInfo.altaz_begin.zen.degree 

835 

836 return dimension.RecordClass( 

837 instrument=obsInfo.instrument, 

838 id=obsInfo.exposure_id, 

839 obs_id=obsInfo.observation_id, 

840 group_name=obsInfo.exposure_group, 

841 group_id=obsInfo.visit_id, 

842 datetime_begin=obsInfo.datetime_begin, 

843 datetime_end=obsInfo.datetime_end, 

844 exposure_time=obsInfo.exposure_time.to_value("s"), 

845 # we are not mandating that dark_time be calculable 

846 dark_time=obsInfo.dark_time.to_value("s") if obsInfo.dark_time is not None else None, 

847 observation_type=obsInfo.observation_type, 

848 observation_reason=obsInfo.observation_reason, 

849 day_obs=obsInfo.observing_day, 

850 seq_num=obsInfo.observation_counter, 

851 physical_filter=obsInfo.physical_filter, 

852 science_program=obsInfo.science_program, 

853 target_name=obsInfo.object, 

854 tracking_ra=ra, 

855 tracking_dec=dec, 

856 sky_angle=sky_angle, 

857 zenith_angle=zenith_angle, 

858 ) 

859 

860 

861def loadCamera(butler: Butler, dataId: DataId, *, collections: Any = None) -> Tuple[Camera, bool]: 

862 """Attempt to load versioned camera geometry from a butler, but fall back 

863 to obtaining a nominal camera from the `Instrument` class if that fails. 

864 

865 Parameters 

866 ---------- 

867 butler : `lsst.daf.butler.Butler` 

868 Butler instance to attempt to query for and load a ``camera`` dataset 

869 from. 

870 dataId : `dict` or `DataCoordinate` 

871 Data ID that identifies at least the ``instrument`` and ``exposure`` 

872 dimensions. 

873 collections : Any, optional 

874 Collections to be searched, overriding ``self.butler.collections``. 

875 Can be any of the types supported by the ``collections`` argument 

876 to butler construction. 

877 

878 Returns 

879 ------- 

880 camera : `lsst.afw.cameraGeom.Camera` 

881 Camera object. 

882 versioned : `bool` 

883 If `True`, the camera was obtained from the butler and should represent 

884 a versioned camera from a calibration repository. If `False`, no 

885 camera datasets were found, and the returned camera was produced by 

886 instantiating the appropriate `Instrument` class and calling 

887 `Instrument.getCamera`. 

888 """ 

889 if collections is None: 

890 collections = butler.collections 

891 # Registry would do data ID expansion internally if we didn't do it first, 

892 # but we might want an expanded data ID ourselves later, so we do it here 

893 # to ensure it only happens once. 

894 # This will also catch problems with the data ID not having keys we need. 

895 dataId = butler.registry.expandDataId(dataId, graph=butler.registry.dimensions["exposure"].graph) 

896 try: 

897 cameraRef = butler.get("camera", dataId=dataId, collections=collections) 

898 return cameraRef, True 

899 except LookupError: 

900 pass 

901 instrument = Instrument.fromName(dataId["instrument"], butler.registry) 

902 return instrument.getCamera(), False