Coverage for python / lsst / obs / base / _instrument.py: 22%

202 statements  

« prev     ^ index     » next       coverage.py v7.13.5, created at 2026-04-23 08:28 +0000

1# This file is part of obs_base. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21 

22from __future__ import annotations 

23 

24__all__ = ("Instrument", "loadCamera", "makeExposureRecordFromObsInfo") 

25 

26import logging 

27import re 

28from abc import abstractmethod 

29from collections import defaultdict 

30from collections.abc import Sequence, Set 

31from functools import lru_cache 

32from typing import TYPE_CHECKING, Any, cast 

33 

34import astropy.time 

35 

36from lsst.afw.cameraGeom import Camera 

37from lsst.daf.butler import ( 

38 Butler, 

39 CollectionType, 

40 DataCoordinate, 

41 DataId, 

42 DatasetType, 

43 DimensionRecord, 

44 DimensionUniverse, 

45 Timespan, 

46) 

47from lsst.daf.butler.registry import DataIdError 

48from lsst.pipe.base import Instrument as InstrumentBase 

49from lsst.resources import ResourcePath 

50from lsst.utils import doImport, getPackageDir 

51 

52from ._read_curated_calibs import CuratedCalibration, read_all 

53 

54if TYPE_CHECKING: 

55 from astro_metadata_translator import MetadataTranslator, ObservationInfo 

56 

57 from lsst.daf.butler import Registry 

58 

59 from .filters import FilterDefinitionCollection 

60 

61_LOG = logging.getLogger(__name__) 

62 

63# To be a standard text curated calibration means that we use a 

64# standard definition for the corresponding DatasetType. 

65StandardCuratedCalibrationDatasetTypes = { 

66 "defects": {"dimensions": ("instrument", "detector"), "storageClass": "Defects"}, 

67 "manual_defects": {"dimensions": ("instrument", "detector"), "storageClass": "Defects"}, 

68 "qe_curve": {"dimensions": ("instrument", "detector"), "storageClass": "QECurve"}, 

69 "crosstalk": {"dimensions": ("instrument", "detector"), "storageClass": "CrosstalkCalib"}, 

70 "linearizer": {"dimensions": ("instrument", "detector"), "storageClass": "Linearizer"}, 

71 "bfk": {"dimensions": ("instrument", "detector"), "storageClass": "BrighterFatterKernel"}, 

72 "transmission_optics": {"dimensions": ("instrument",), "storageClass": "TransmissionCurve"}, 

73 "transmission_filter": { 

74 "dimensions": ("instrument", "physical_filter"), 

75 "storageClass": "TransmissionCurve", 

76 }, 

77 "transmission_filter_detector": { 

78 "dimensions": ("instrument", "detector", "physical_filter"), 

79 "storageClass": "TransmissionCurve", 

80 }, 

81 "transmission_sensor": {"dimensions": ("instrument", "detector"), "storageClass": "TransmissionCurve"}, 

82 "transmission_atmosphere": {"dimensions": ("instrument",), "storageClass": "TransmissionCurve"}, 

83 "transmission_system": { 

84 "dimensions": ("instrument", "detector", "physical_filter"), 

85 "storageClass": "TransmissionCurve", 

86 }, 

87} 

88 

89 

90class Instrument(InstrumentBase): 

91 """Rubin-specified base for instrument-specific logic for the Gen3 Butler. 

92 

93 Parameters 

94 ---------- 

95 collection_prefix : `str`, optional 

96 Prefix for collection names to use instead of the instrument's own 

97 name. This is primarily for use in simulated-data repositories, where 

98 the instrument name may not be necessary and/or sufficient to 

99 distinguish between collections. 

100 

101 Notes 

102 ----- 

103 Concrete instrument subclasses must have the same construction signature as 

104 the base class. 

105 """ 

106 

107 policyName: str | None = None 

108 """Instrument specific name to use when locating a policy or configuration 

109 file in the file system.""" 

110 

111 obsDataPackage: str | None = None 

112 """Name of the package containing the text curated calibration files. 

113 Usually a obs _data package. If `None` no curated calibration files 

114 will be read. (`str`)""" 

115 

116 standardCuratedDatasetTypes: Set[str] = frozenset(StandardCuratedCalibrationDatasetTypes) 

117 """The dataset types expected to be obtained from the obsDataPackage. 

118 

119 These dataset types are all required to have standard definitions and 

120 must be known to the base class. Clearing this list will prevent 

121 any of these calibrations from being stored. If a dataset type is not 

122 known to a specific instrument it can still be included in this list 

123 since the data package is the source of truth. (`set` of `str`) 

124 """ 

125 

126 additionalCuratedDatasetTypes: Set[str] = frozenset() 

127 """Curated dataset types specific to this particular instrument that do 

128 not follow the standard organization found in obs data packages. 

129 

130 These are the instrument-specific dataset types written by 

131 `writeAdditionalCuratedCalibrations` in addition to the calibrations 

132 found in obs data packages that follow the standard scheme. 

133 (`set` of `str`)""" 

134 

135 translatorClass: MetadataTranslator | None = None 

136 """Class to use when extracting information from metadata. If `None` 

137 the metadata extraction system will determine the translator class itself. 

138 This class can also be used to calculate the observing day offset in some 

139 scenarios. 

140 """ 

141 

142 @property 

143 @abstractmethod 

144 def filterDefinitions(self) -> FilterDefinitionCollection: 

145 """`~lsst.obs.base.FilterDefinitionCollection`, defining the filters 

146 for this instrument. 

147 """ 

148 raise NotImplementedError() 

149 

150 def __init__(self, collection_prefix: str | None = None): 

151 super().__init__(collection_prefix=collection_prefix) 

152 

153 @classmethod 

154 @lru_cache 

155 def getCuratedCalibrationNames(cls) -> frozenset[str]: 

156 """Return the names of all the curated calibration dataset types. 

157 

158 Returns 

159 ------- 

160 names : `frozenset` of `str` 

161 The dataset type names of all curated calibrations. This will 

162 include the standard curated calibrations even if the particular 

163 instrument does not support them. 

164 

165 Notes 

166 ----- 

167 The returned list does not indicate whether a particular dataset 

168 is present in the Butler repository, simply that these are the 

169 dataset types that are handled by `writeCuratedCalibrations`. 

170 """ 

171 # Camera is a special dataset type that is also handled as a 

172 # curated calibration. 

173 curated = {"camera"} 

174 

175 # Make a cursory attempt to filter out curated dataset types 

176 # that are not present for this instrument 

177 for datasetTypeName in cls.standardCuratedDatasetTypes: 

178 calibPath = cls._getSpecificCuratedCalibrationPath(datasetTypeName) 

179 if calibPath is not None: 

180 curated.add(datasetTypeName) 

181 

182 curated.update(cls.additionalCuratedDatasetTypes) 

183 return frozenset(curated) 

184 

185 @abstractmethod 

186 def getCamera(self) -> Camera: 

187 """Retrieve the cameraGeom representation of this instrument. 

188 

189 This is a temporary API that should go away once ``obs`` packages have 

190 a standardized approach to writing versioned cameras to a Gen3 repo. 

191 """ 

192 raise NotImplementedError() 

193 

194 @classmethod 

195 @lru_cache 

196 def getObsDataPackageDir(cls) -> str | None: 

197 """Return the root of the obs data package that provides 

198 specializations for this instrument. 

199 

200 Returns 

201 ------- 

202 dir : `str` or `None` 

203 The root of the relevant obs data package, or `None` if this 

204 instrument does not have one. 

205 

206 Notes 

207 ----- 

208 This is a less portable version of ``getObsDataPackageRoot``. Please 

209 use that method in new code. 

210 """ 

211 if cls.obsDataPackage is None: 

212 return None 

213 return getPackageDir(cls.obsDataPackage) 

214 

215 @classmethod 

216 @lru_cache 

217 def getObsDataPackageRoot(cls) -> ResourcePath | None: 

218 """Return the root of the obs data package that provides 

219 specializations for this instrument. 

220 

221 Returns 

222 ------- 

223 dir : `lsst.resources.ResourcePath` or `None` 

224 The root URI of the file resources of the relevant obs data 

225 package, or `None` if this instrument does not have one. 

226 """ 

227 if cls.obsDataPackage is None: 

228 return None 

229 return ResourcePath(f"eups://{cls.obsDataPackage}/", forceDirectory=True) 

230 

231 def _registerFilters(self, registry: Registry, update: bool = False) -> None: 

232 """Register the physical and abstract filter Dimension relationships. 

233 This should be called in the `register` implementation, within 

234 a transaction context manager block. 

235 

236 Parameters 

237 ---------- 

238 registry : `lsst.daf.butler.Registry` 

239 The registry to add dimensions to. 

240 update : `bool`, optional 

241 If `True` (`False` is default), update existing records if they 

242 differ from the new ones. 

243 """ 

244 for filter in self.filterDefinitions: 

245 # fix for undefined abstract filters causing trouble in the 

246 # registry: 

247 if filter.band is None: 

248 band = filter.physical_filter 

249 else: 

250 band = filter.band 

251 

252 registry.syncDimensionData( 

253 "physical_filter", 

254 {"instrument": self.getName(), "name": filter.physical_filter, "band": band}, 

255 update=update, 

256 ) 

257 

258 def writeCuratedCalibrations( 

259 self, butler: Butler, collection: str | None = None, labels: Sequence[str] = () 

260 ) -> None: 

261 """Write human-curated calibration Datasets to the given Butler with 

262 the appropriate validity ranges. 

263 

264 Parameters 

265 ---------- 

266 butler : `lsst.daf.butler.Butler` 

267 Butler to use to store these calibrations. 

268 collection : `str`, optional 

269 Name to use for the calibration collection that associates all 

270 datasets with a validity range. If this collection already exists, 

271 it must be a `~lsst.daf.butler.CollectionType.CALIBRATION` 

272 collection, and it must not have any datasets that would conflict 

273 with those inserted by this method. If `None`, a collection name 

274 is worked out automatically from the instrument name and other 

275 metadata by calling 

276 `~lsst.obs.base.Instrument.makeCalibrationCollectionName`, but this 

277 default name may not work well for long-lived repositories unless 

278 ``labels`` is also provided (and changed every time curated 

279 calibrations are ingested). 

280 labels : `collections.abc.Sequence` [ `str` ], optional 

281 Extra strings to include in collection names, after concatenating 

282 them with the standard collection name delimiter. If provided, 

283 these are inserted into the names of the 

284 `~lsst.daf.butler.CollectionType.RUN` collections that datasets are 

285 inserted directly into, as well the 

286 `~lsst.daf.butler.CollectionType.CALIBRATION` collection if it is 

287 generated automatically (i.e., if ``collection is None``). 

288 Usually this is just the name of the ticket on which the 

289 calibration collection is being created. 

290 

291 Notes 

292 ----- 

293 Expected to be called from subclasses. The base method calls 

294 ``writeCameraGeom``, ``writeStandardTextCuratedCalibrations``, 

295 and ``writeAdditionalCuratedCalibrations``. 

296 """ 

297 # Delegate registration of collections (and creating names for them) 

298 # to other methods so they can be called independently with the same 

299 # preconditions. Collection registration is idempotent, so this is 

300 # safe, and while it adds a bit of overhead, as long as it's one 

301 # registration attempt per method (not per dataset or dataset type), 

302 # that's negligible. 

303 self.writeCameraGeom(butler, collection, labels=labels) 

304 self.writeStandardTextCuratedCalibrations(butler, collection, labels=labels) 

305 self.writeAdditionalCuratedCalibrations(butler, collection, labels=labels) 

306 

307 def writeAdditionalCuratedCalibrations( 

308 self, butler: Butler, collection: str | None = None, labels: Sequence[str] = () 

309 ) -> None: 

310 """Write additional curated calibrations that might be instrument 

311 specific and are not part of the standard set. 

312 

313 Default implementation does nothing. 

314 

315 Parameters 

316 ---------- 

317 butler : `lsst.daf.butler.Butler` 

318 Butler to use to store these calibrations. 

319 collection : `str`, optional 

320 Name to use for the calibration collection that associates all 

321 datasets with a validity range. If this collection already exists, 

322 it must be a `~lsst.daf.butler.CollectionType.CALIBRATION` 

323 collection, and it must not have any datasets that would conflict 

324 with those inserted by this method. If `None`, a collection name 

325 is worked out automatically from the instrument name and other 

326 metadata by calling 

327 `~lsst.obs.base.Instrument.makeCalibrationCollectionName`, but this 

328 default name may not work well for long-lived repositories unless 

329 ``labels`` is also provided (and changed every time curated 

330 calibrations are ingested). 

331 labels : `collections.abc.Sequence` [ `str` ], optional 

332 Extra strings to include in collection names, after concatenating 

333 them with the standard collection name delimiter. If provided, 

334 these are inserted into the names of the 

335 `~lsst.daf.butler.CollectionType.RUN` collections that datasets are 

336 inserted directly into, as well the 

337 `~lsst.daf.butler.CollectionType.CALIBRATION` collection if it is 

338 generated automatically (i.e. if ``collection is None``). Usually 

339 this is just the name of the ticket on which the calibration 

340 collection is being created. 

341 """ 

342 return 

343 

344 def writeCameraGeom( 

345 self, butler: Butler, collection: str | None = None, labels: Sequence[str] = () 

346 ) -> None: 

347 """Write the default camera geometry to the butler repository and 

348 associate it with the appropriate validity range in a calibration 

349 collection. 

350 

351 Parameters 

352 ---------- 

353 butler : `lsst.daf.butler.Butler` 

354 Butler to use to store these calibrations. 

355 collection : `str`, optional 

356 Name to use for the calibration collection that associates all 

357 datasets with a validity range. If this collection already exists, 

358 it must be a `~lsst.daf.butler.CollectionType.CALIBRATION` 

359 collection, and it must not have any datasets that would conflict 

360 with those inserted by this method. If `None`, a collection name 

361 is worked out automatically from the instrument name and other 

362 metadata by calling 

363 `~lsst.obs.base.Instrument.makeCalibrationCollectionName`, but this 

364 default name may not work well for long-lived repositories unless 

365 ``labels`` is also provided (and changed every time curated 

366 calibrations are ingested). 

367 labels : `collections.abc.Sequence` [ `str` ], optional 

368 Extra strings to include in collection names, after concatenating 

369 them with the standard collection name delimiter. If provided, 

370 these are inserted into the names of the 

371 `~lsst.daf.butler.CollectionType.RUN` collections that datasets are 

372 inserted directly into, as well the 

373 `~lsst.daf.butler.CollectionType.CALIBRATION` collection if it is 

374 generated automatically (i.e. if ``collection is None``). Usually 

375 this is just the name of the ticket on which the calibration 

376 collection is being created. 

377 """ 

378 if collection is None: 

379 collection = self.makeCalibrationCollectionName(*labels) 

380 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION) 

381 run = self.makeUnboundedCalibrationRunName(*labels) 

382 butler.registry.registerRun(run) 

383 datasetType = DatasetType( 

384 "camera", ("instrument",), "Camera", isCalibration=True, universe=butler.dimensions 

385 ) 

386 butler.registry.registerDatasetType(datasetType) 

387 camera = self.getCamera() 

388 ref = butler.put(camera, datasetType, {"instrument": self.getName()}, run=run) 

389 butler.registry.certify(collection, [ref], Timespan(begin=None, end=None)) 

390 

391 def writeStandardTextCuratedCalibrations( 

392 self, butler: Butler, collection: str | None = None, labels: Sequence[str] = () 

393 ) -> None: 

394 """Write the set of standardized curated text calibrations to 

395 the repository. 

396 

397 Parameters 

398 ---------- 

399 butler : `lsst.daf.butler.Butler` 

400 Butler to receive these calibration datasets. 

401 collection : `str`, optional 

402 Name to use for the calibration collection that associates all 

403 datasets with a validity range. If this collection already exists, 

404 it must be a `~lsst.daf.butler.CollectionType.CALIBRATION` 

405 collection, and it must not have any datasets that would conflict 

406 with those inserted by this method. If `None`, a collection name 

407 is worked out automatically from the instrument name and other 

408 metadata by calling 

409 `~lsst.obs.base.Instrument.makeCalibrationCollectionName`, but this 

410 default name may not work well for long-lived repositories unless 

411 ``labels`` is also provided (and changed every time curated 

412 calibrations are ingested). 

413 labels : `collections.abc.Sequence` [ `str` ], optional 

414 Extra strings to include in collection names, after concatenating 

415 them with the standard collection name delimiter. If provided, 

416 these are inserted into the names of the 

417 `~lsst.daf.butler.CollectionType.RUN` collections that datasets are 

418 inserted directly into, as well the 

419 `~lsst.daf.butler.CollectionType.CALIBRATION` collection if it is 

420 generated automatically (i.e., if ``collection is None``). Usually 

421 this is just the name of the ticket on which the calibration 

422 collection is being created. 

423 """ 

424 if collection is None: 

425 collection = self.makeCalibrationCollectionName(*labels) 

426 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION) 

427 runs: set[str] = set() 

428 for datasetTypeName in self.standardCuratedDatasetTypes: 

429 # We need to define the dataset types. 

430 if datasetTypeName not in StandardCuratedCalibrationDatasetTypes: 

431 raise ValueError( 

432 f"DatasetType {datasetTypeName} not in understood list" 

433 f" [{'.'.join(StandardCuratedCalibrationDatasetTypes)}]" 

434 ) 

435 definition = StandardCuratedCalibrationDatasetTypes[datasetTypeName] 

436 datasetType = DatasetType( 

437 datasetTypeName, 

438 universe=butler.dimensions, 

439 isCalibration=True, 

440 # MyPy should be able to figure out that the kwargs here have 

441 # the right types, but it can't. 

442 **definition, # type: ignore 

443 ) 

444 self._writeSpecificCuratedCalibrationDatasets( 

445 butler, datasetType, collection, runs=runs, labels=labels 

446 ) 

447 

448 @classmethod 

449 def _getSpecificCuratedCalibrationPath(cls, datasetTypeName: str) -> ResourcePath | None: 

450 """Return the path of the curated calibration directory. 

451 

452 Parameters 

453 ---------- 

454 datasetTypeName : `str` 

455 The name of the standard dataset type to find. 

456 

457 Returns 

458 ------- 

459 path : `str` or `None` 

460 The path to the standard curated data directory. `None` if the 

461 dataset type is not found or the obs data package is not 

462 available. 

463 """ 

464 data_package_dir = cls.getObsDataPackageRoot() 

465 if data_package_dir is None: 

466 # if there is no data package then there can't be datasets 

467 return None 

468 

469 if cls.policyName is None: 

470 raise TypeError(f"Instrument {cls.getName()} has an obs data package but no policy name.") 

471 

472 calibPath = data_package_dir.join(cls.policyName).join(datasetTypeName, forceDirectory=True) 

473 

474 if calibPath.exists(): 

475 return calibPath 

476 

477 return None 

478 

479 def _writeSpecificCuratedCalibrationDatasets( 

480 self, butler: Butler, datasetType: DatasetType, collection: str, runs: set[str], labels: Sequence[str] 

481 ) -> None: 

482 """Write standardized curated calibration datasets for this specific 

483 dataset type from an obs data package. 

484 

485 Parameters 

486 ---------- 

487 butler : `lsst.daf.butler.Butler` 

488 Gen3 butler in which to put the calibrations. 

489 datasetType : `lsst.daf.butler.DatasetType` 

490 Dataset type to be put. 

491 collection : `str` 

492 Name of the `~lsst.daf.butler.CollectionType.CALIBRATION` 

493 collection that associates all datasets with validity ranges. 

494 Must have been registered prior to this call. 

495 runs : `set` [ `str` ] 

496 Names of runs that have already been registered by previous calls 

497 and need not be registered again. Should be updated by this 

498 method as new runs are registered. 

499 labels : `collections.abc.Sequence` [ `str` ] 

500 Extra strings to include in run names when creating them from 

501 ``CALIBDATE`` metadata, via calls to `makeCuratedCalibrationName`. 

502 Usually this is the name of the ticket on which the calibration 

503 collection is being created. 

504 

505 Notes 

506 ----- 

507 This method scans the location defined in the ``obsDataPackageDir`` 

508 class attribute for curated calibrations corresponding to the 

509 supplied dataset type. The directory name in the data package must 

510 match the name of the dataset type. They are assumed to use the 

511 standard layout and can be read by 

512 `~lsst.obs.base._read_curated_calibs.read_all` and provide standard 

513 metadata. 

514 """ 

515 calibPath = self._getSpecificCuratedCalibrationPath(datasetType.name) 

516 if calibPath is None: 

517 return 

518 

519 # Register the dataset type 

520 butler.registry.registerDatasetType(datasetType) 

521 _LOG.info("Processing %r curated calibration", datasetType.name) 

522 

523 # The class to use to read these calibrations comes from the storage 

524 # class. 

525 calib_class: Any 

526 calib_class = datasetType.storageClass.pytype 

527 if not hasattr(calib_class, "readText"): 

528 # Let's try the default calib class. All curated 

529 # calibrations should be subclasses of that, and the 

530 # parent can identify the correct one to use. 

531 calib_class = doImport("lsst.ip.isr.IsrCalib") 

532 

533 calib_class = cast(type[CuratedCalibration], calib_class) 

534 

535 # Read calibs, registering a new run for each CALIBDATE as needed. 

536 # We try to avoid registering runs multiple times as an optimization 

537 # by putting them in the ``runs`` set that was passed in. 

538 camera = self.getCamera() 

539 filters = set(self.filterDefinitions.physical_to_band.keys()) 

540 calib_dimensions: list[str] 

541 if datasetType.name in StandardCuratedCalibrationDatasetTypes: 

542 calib_dimensions = list(StandardCuratedCalibrationDatasetTypes[datasetType.name]["dimensions"]) 

543 else: 

544 # This should never trigger with real data, but will 

545 # trigger on the unit tests. 

546 _LOG.warning( 

547 "Unknown curated calibration type %s. Attempting to use supplied definition.", 

548 datasetType.name, 

549 ) 

550 calib_dimensions = list(datasetType.dimensions.names) 

551 

552 calibsDict, calib_type = read_all(calibPath, camera, calib_class, calib_dimensions, filters) 

553 

554 datasetRecords = [] 

555 for path in calibsDict: 

556 times = sorted(calibsDict[path]) 

557 calibs = [calibsDict[path][time] for time in times] 

558 atimes: list[astropy.time.Time | None] = [ 

559 astropy.time.Time(t, format="datetime", scale="utc") for t in times 

560 ] 

561 atimes += [None] 

562 for calib, beginTime, endTime in zip(calibs, atimes[:-1], atimes[1:], strict=True): 

563 md = calib.getMetadata() 

564 run = self.makeCuratedCalibrationRunName(md["CALIBDATE"], *labels) 

565 if run not in runs: 

566 butler.registry.registerRun(run) 

567 runs.add(run) 

568 

569 # DETECTOR and FILTER keywords in the calibration 

570 # metadata must exist if the calibration depends on 

571 # those dimensions. 

572 dimension_arguments = {} 

573 if "DETECTOR" in md: 

574 dimension_arguments["detector"] = md["DETECTOR"] 

575 if "FILTER" in md: 

576 dimension_arguments["physical_filter"] = md["FILTER"] 

577 

578 dataId = DataCoordinate.standardize( 

579 universe=butler.dimensions, 

580 instrument=self.getName(), 

581 **dimension_arguments, 

582 ) 

583 datasetRecords.append((calib, dataId, run, Timespan(beginTime, endTime))) 

584 

585 # Second loop actually does the inserts and filesystem writes. We 

586 # first do a butler.put on each dataset, inserting it into the run for 

587 # its calibDate. We remember those refs and group them by timespan, so 

588 # we can vectorize the certify calls as much as possible. 

589 refsByTimespan = defaultdict(list) 

590 with butler.transaction(): 

591 for calib, dataId, run, timespan in datasetRecords: 

592 refsByTimespan[timespan].append(butler.put(calib, datasetType, dataId, run=run)) 

593 for timespan, refs in refsByTimespan.items(): 

594 butler.registry.certify(collection, refs, timespan) 

595 

596 @classmethod 

597 def group_name_to_group_id(cls, group_name: str) -> int: 

598 """Translate the exposure group name to an integer. 

599 

600 Parameters 

601 ---------- 

602 group_name : `str` 

603 The name of the exposure group. 

604 

605 Returns 

606 ------- 

607 id : `int` 

608 The exposure group name in integer form. This integer might be 

609 used as an ID to uniquely identify the group in contexts where 

610 a string can not be used. 

611 

612 Notes 

613 ----- 

614 The default implementation removes all non numeric characters and casts 

615 to an integer. 

616 """ 

617 cleaned = re.sub(r"\D", "", group_name) 

618 return int(cleaned) 

619 

620 def get_curated_calibration_labels(self) -> list[str]: 

621 """Return appropriate labels (pieces of a collection name) for a 

622 collection populated by `writeCuratedCalibrations`. 

623 

624 If this returns an empty list (as the default implementation does), 

625 the user will be required to provide a label. 

626 """ 

627 return [] 

628 

629 

630def makeExposureRecordFromObsInfo( 

631 obsInfo: ObservationInfo, universe: DimensionUniverse, **kwargs: Any 

632) -> DimensionRecord: 

633 """Construct an exposure DimensionRecord from 

634 `astro_metadata_translator.ObservationInfo`. 

635 

636 Parameters 

637 ---------- 

638 obsInfo : `astro_metadata_translator.ObservationInfo` 

639 A `~astro_metadata_translator.ObservationInfo` object corresponding to 

640 the exposure. 

641 universe : `~lsst.daf.butler.DimensionUniverse` 

642 Set of all known dimensions. 

643 **kwargs 

644 Additional field values for this record. 

645 

646 Returns 

647 ------- 

648 record : `~lsst.daf.butler.DimensionRecord` 

649 A record containing exposure metadata, suitable for insertion into 

650 a `~lsst.daf.butler.Registry`. 

651 """ 

652 dimension = universe["exposure"] 

653 

654 # Some registries support additional items. 

655 supported = {meta.name for meta in dimension.metadata} 

656 

657 ra, dec, sky_angle, azimuth, zenith_angle = (None, None, None, None, None) 

658 if obsInfo.tracking_radec is not None: 

659 icrs = obsInfo.tracking_radec.icrs 

660 ra = float(icrs.ra.degree) 

661 dec = float(icrs.dec.degree) 

662 if obsInfo.boresight_rotation_coord == "sky": 

663 assert obsInfo.boresight_rotation_angle is not None 

664 sky_angle = float(obsInfo.boresight_rotation_angle.degree) 

665 if obsInfo.altaz_begin is not None: 

666 zenith_angle = float(obsInfo.altaz_begin.zen.degree) 

667 azimuth = float(obsInfo.altaz_begin.az.degree) 

668 

669 extras: dict[str, Any] = {} 

670 for meta_key, info_key in ( 

671 ("has_simulated", "has_simulated_content"), 

672 ("seq_start", "group_counter_start"), 

673 ("seq_end", "group_counter_end"), 

674 ("can_see_sky", "can_see_sky"), 

675 ): 

676 if meta_key in supported: 

677 extras[meta_key] = getattr(obsInfo, info_key) 

678 

679 if (k := "azimuth") in supported: 

680 extras[k] = float(azimuth) if azimuth is not None else None 

681 

682 if "group" in dimension.implied: 

683 extras["group"] = obsInfo.exposure_group 

684 elif "group_name" in supported: 

685 extras["group_name"] = obsInfo.exposure_group 

686 extras["group_id"] = obsInfo.visit_id 

687 else: 

688 raise RuntimeError(f"Unable to determine where to put group metadata in exposure record: {supported}") 

689 

690 # In some bad observations, the end time is before the begin time. We 

691 # can not let that be ingested as-is because it becomes an unbounded 

692 # timespan that will not work correctly with calibration lookups. Instead 

693 # force the end time to be the begin time. 

694 datetime_end = obsInfo.datetime_end 

695 assert obsInfo.datetime_begin is not None 

696 if datetime_end < obsInfo.datetime_begin: 

697 datetime_end = obsInfo.datetime_begin 

698 _LOG.warning( 

699 "Exposure %s:%s has end time before begin time. Forcing it to use the begin time.", 

700 obsInfo.instrument, 

701 obsInfo.observation_id, 

702 ) 

703 

704 assert obsInfo.exposure_time_requested is not None 

705 return dimension.RecordClass( 

706 instrument=obsInfo.instrument, 

707 id=obsInfo.exposure_id, 

708 obs_id=obsInfo.observation_id, 

709 datetime_begin=obsInfo.datetime_begin, 

710 datetime_end=datetime_end, 

711 exposure_time=float(obsInfo.exposure_time_requested.to_value("s")), 

712 # we are not mandating that dark_time be calculable 

713 dark_time=float(obsInfo.dark_time.to_value("s")) if obsInfo.dark_time is not None else None, 

714 observation_type=obsInfo.observation_type, 

715 observation_reason=obsInfo.observation_reason, 

716 day_obs=obsInfo.observing_day, 

717 seq_num=obsInfo.observation_counter, 

718 physical_filter=obsInfo.physical_filter, 

719 science_program=obsInfo.science_program, 

720 target_name=obsInfo.object, 

721 tracking_ra=ra, 

722 tracking_dec=dec, 

723 sky_angle=sky_angle, 

724 zenith_angle=zenith_angle, 

725 **extras, 

726 **kwargs, 

727 ) 

728 

729 

730def loadCamera(butler: Butler, dataId: DataId, *, collections: Any = None) -> tuple[Camera, bool]: 

731 """Attempt to load versioned camera geometry from a butler, but fall back 

732 to obtaining a nominal camera from the `Instrument` class if that fails. 

733 

734 Parameters 

735 ---------- 

736 butler : `lsst.daf.butler.Butler` 

737 Butler instance to attempt to query for and load a ``camera`` dataset 

738 from. 

739 dataId : `dict` or `~lsst.daf.butler.DataCoordinate` 

740 Data ID that identifies at least the ``instrument`` and ``exposure`` 

741 dimensions. 

742 collections : `typing.Any`, optional 

743 Collections to be searched, overriding 

744 ``self.butler.collections.defaults``. Can be any of the types 

745 supported by the ``collections`` argument to butler construction. 

746 

747 Returns 

748 ------- 

749 camera : `lsst.afw.cameraGeom.Camera` 

750 Camera object. 

751 versioned : `bool` 

752 If `True`, the camera was obtained from the butler and should represent 

753 a versioned camera from a calibration repository. If `False`, no 

754 camera datasets were found, and the returned camera was produced by 

755 instantiating the appropriate `Instrument` class and calling 

756 `Instrument.getCamera`. 

757 

758 Raises 

759 ------ 

760 LookupError 

761 Raised when ``dataId`` does not specify a valid data ID. 

762 """ 

763 if collections is None: 

764 collections = list(butler.collections.defaults) 

765 # Registry would do data ID expansion internally if we didn't do it first, 

766 # but we might want an expanded data ID ourselves later, so we do it here 

767 # to ensure it only happens once. 

768 # This will also catch problems with the data ID not having keys we need. 

769 try: 

770 dataId = butler.registry.expandDataId(dataId, dimensions=butler.dimensions["exposure"].minimal_group) 

771 except DataIdError as exc: 

772 raise LookupError(str(exc)) from exc 

773 try: 

774 cameraRef = butler.get("camera", dataId=dataId, collections=collections) 

775 return cameraRef, True 

776 except LookupError: 

777 pass 

778 # We know an instrument data ID is a value, but MyPy doesn't. 

779 instrument = Instrument.fromName(dataId["instrument"], butler.registry) # type: ignore 

780 assert isinstance(instrument, Instrument) # for mypy 

781 return instrument.getCamera(), False