Coverage for python/lsst/obs/base/_instrument.py: 27%

210 statements  

« prev     ^ index     » next       coverage.py v7.2.1, created at 2023-03-12 01:53 -0800

1# This file is part of obs_base. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21 

22from __future__ import annotations 

23 

24__all__ = ("Instrument", "makeExposureRecordFromObsInfo", "loadCamera") 

25 

26import os.path 

27from abc import ABCMeta, abstractmethod 

28from collections import defaultdict 

29import datetime 

30from typing import Any, Optional, Set, Sequence, Tuple, TYPE_CHECKING, Union 

31from functools import lru_cache 

32 

33import astropy.time 

34 

35from lsst.afw.cameraGeom import Camera 

36from lsst.daf.butler import ( 

37 Butler, 

38 CollectionType, 

39 DataCoordinate, 

40 DataId, 

41 DatasetType, 

42 Timespan, 

43) 

44from lsst.utils import getPackageDir, doImport 

45 

46if TYPE_CHECKING: 46 ↛ 47line 46 didn't jump to line 47, because the condition on line 46 was never true

47 from .gen2to3 import TranslatorFactory 

48 from lsst.daf.butler import Registry 

49 

50# To be a standard text curated calibration means that we use a 

51# standard definition for the corresponding DatasetType. 

52StandardCuratedCalibrationDatasetTypes = { 

53 "defects": {"dimensions": ("instrument", "detector"), "storageClass": "Defects"}, 

54 "qe_curve": {"dimensions": ("instrument", "detector"), "storageClass": "QECurve"}, 

55 "crosstalk": {"dimensions": ("instrument", "detector"), "storageClass": "CrosstalkCalib"}, 

56 "linearizer": {"dimensions": ("instrument", "detector"), "storageClass": "Linearizer"}, 

57 "bfk": {"dimensions": ("instrument", "detector"), "storageClass": "BrighterFatterKernel"}, 

58} 

59 

60 

61class Instrument(metaclass=ABCMeta): 

62 """Base class for instrument-specific logic for the Gen3 Butler. 

63 

64 Parameters 

65 ---------- 

66 collection_prefix : `str`, optional 

67 Prefix for collection names to use instead of the intrument's own name. 

68 This is primarily for use in simulated-data repositories, where the 

69 instrument name may not be necessary and/or sufficient to distinguish 

70 between collections. 

71 

72 Notes 

73 ----- 

74 Concrete instrument subclasses must have the same construction signature as 

75 the base class. 

76 """ 

77 

78 configPaths: Sequence[str] = () 

79 """Paths to config files to read for specific Tasks. 

80 

81 The paths in this list should contain files of the form `task.py`, for 

82 each of the Tasks that requires special configuration. 

83 """ 

84 

85 policyName: Optional[str] = None 

86 """Instrument specific name to use when locating a policy or configuration 

87 file in the file system.""" 

88 

89 obsDataPackage: Optional[str] = None 

90 """Name of the package containing the text curated calibration files. 

91 Usually a obs _data package. If `None` no curated calibration files 

92 will be read. (`str`)""" 

93 

94 standardCuratedDatasetTypes: Set[str] = frozenset(StandardCuratedCalibrationDatasetTypes) 

95 """The dataset types expected to be obtained from the obsDataPackage. 

96 

97 These dataset types are all required to have standard definitions and 

98 must be known to the base class. Clearing this list will prevent 

99 any of these calibrations from being stored. If a dataset type is not 

100 known to a specific instrument it can still be included in this list 

101 since the data package is the source of truth. (`set` of `str`) 

102 """ 

103 

104 additionalCuratedDatasetTypes: Set[str] = frozenset() 

105 """Curated dataset types specific to this particular instrument that do 

106 not follow the standard organization found in obs data packages. 

107 

108 These are the instrument-specific dataset types written by 

109 `writeAdditionalCuratedCalibrations` in addition to the calibrations 

110 found in obs data packages that follow the standard scheme. 

111 (`set` of `str`)""" 

112 

113 @property 

114 @abstractmethod 

115 def filterDefinitions(self): 

116 """`~lsst.obs.base.FilterDefinitionCollection`, defining the filters 

117 for this instrument. 

118 """ 

119 return None 

120 

121 def __init__(self, collection_prefix: Optional[str] = None): 

122 self.filterDefinitions.reset() 

123 self.filterDefinitions.defineFilters() 

124 if collection_prefix is None: 124 ↛ 126line 124 didn't jump to line 126, because the condition on line 124 was never false

125 collection_prefix = self.getName() 

126 self.collection_prefix = collection_prefix 

127 

128 @classmethod 

129 @abstractmethod 

130 def getName(cls): 

131 """Return the short (dimension) name for this instrument. 

132 

133 This is not (in general) the same as the class name - it's what is used 

134 as the value of the "instrument" field in data IDs, and is usually an 

135 abbreviation of the full name. 

136 """ 

137 raise NotImplementedError() 

138 

139 @classmethod 

140 @lru_cache() 

141 def getCuratedCalibrationNames(cls) -> Set[str]: 

142 """Return the names of all the curated calibration dataset types. 

143 

144 Returns 

145 ------- 

146 names : `set` of `str` 

147 The dataset type names of all curated calibrations. This will 

148 include the standard curated calibrations even if the particular 

149 instrument does not support them. 

150 

151 Notes 

152 ----- 

153 The returned list does not indicate whether a particular dataset 

154 is present in the Butler repository, simply that these are the 

155 dataset types that are handled by ``writeCuratedCalibrations``. 

156 """ 

157 

158 # Camera is a special dataset type that is also handled as a 

159 # curated calibration. 

160 curated = {"camera"} 

161 

162 # Make a cursory attempt to filter out curated dataset types 

163 # that are not present for this instrument 

164 for datasetTypeName in cls.standardCuratedDatasetTypes: 

165 calibPath = cls._getSpecificCuratedCalibrationPath(datasetTypeName) 

166 if calibPath is not None: 

167 curated.add(datasetTypeName) 

168 

169 curated.update(cls.additionalCuratedDatasetTypes) 

170 return frozenset(curated) 

171 

172 @abstractmethod 

173 def getCamera(self): 

174 """Retrieve the cameraGeom representation of this instrument. 

175 

176 This is a temporary API that should go away once ``obs`` packages have 

177 a standardized approach to writing versioned cameras to a Gen3 repo. 

178 """ 

179 raise NotImplementedError() 

180 

181 @abstractmethod 

182 def register(self, registry, *, update=False): 

183 """Insert instrument, physical_filter, and detector entries into a 

184 `Registry`. 

185 

186 Parameters 

187 ---------- 

188 registry : `lsst.daf.butler.Registry` 

189 Registry client for the data repository to modify. 

190 update : `bool`, optional 

191 If `True` (`False` is default), update existing records if they 

192 differ from the new ones. 

193 

194 Raises 

195 ------ 

196 lsst.daf.butler.registry.ConflictingDefinitionError 

197 Raised if any existing record has the same key but a different 

198 definition as one being registered. 

199 

200 Notes 

201 ----- 

202 New detectors and physical filters can always be added by calling this 

203 method multiple times, as long as no existing records have changed (if 

204 existing records have changed, ``update=True`` must be used). Old 

205 records can never be removed by this method. 

206 

207 Implementations should guarantee that registration is atomic (the 

208 registry should not be modified if any error occurs) and idempotent at 

209 the level of individual dimension entries; new detectors and filters 

210 should be added, but changes to any existing record should not be. 

211 This can generally be achieved via a block like:: 

212 

213 with registry.transaction(): 

214 registry.syncDimensionData("instrument", ...) 

215 registry.syncDimensionData("detector", ...) 

216 self.registerFilters(registry) 

217 

218 """ 

219 raise NotImplementedError() 

220 

221 @classmethod 

222 @lru_cache() 

223 def getObsDataPackageDir(cls): 

224 """The root of the obs data package that provides specializations for 

225 this instrument. 

226 

227 returns 

228 ------- 

229 dir : `str` 

230 The root of the relevat obs data package. 

231 """ 

232 if cls.obsDataPackage is None: 

233 return None 

234 return getPackageDir(cls.obsDataPackage) 

235 

236 @staticmethod 

237 def fromName(name: str, registry: Registry, collection_prefix: Optional[str] = None) -> Instrument: 

238 """Given an instrument name and a butler, retrieve a corresponding 

239 instantiated instrument object. 

240 

241 Parameters 

242 ---------- 

243 name : `str` 

244 Name of the instrument (must match the return value of `getName`). 

245 registry : `lsst.daf.butler.Registry` 

246 Butler registry to query to find the information. 

247 collection_prefix : `str`, optional 

248 Prefix for collection names to use instead of the intrument's own 

249 name. This is primarily for use in simulated-data repositories, 

250 where the instrument name may not be necessary and/or sufficient to 

251 distinguish between collections. 

252 

253 Returns 

254 ------- 

255 instrument : `Instrument` 

256 An instance of the relevant `Instrument`. 

257 

258 Notes 

259 ----- 

260 The instrument must be registered in the corresponding butler. 

261 

262 Raises 

263 ------ 

264 LookupError 

265 Raised if the instrument is not known to the supplied registry. 

266 ModuleNotFoundError 

267 Raised if the class could not be imported. This could mean 

268 that the relevant obs package has not been setup. 

269 TypeError 

270 Raised if the class name retrieved is not a string. 

271 """ 

272 records = list(registry.queryDimensionRecords("instrument", instrument=name)) 

273 if not records: 

274 raise LookupError(f"No registered instrument with name '{name}'.") 

275 cls = records[0].class_name 

276 if not isinstance(cls, str): 

277 raise TypeError(f"Unexpected class name retrieved from {name} instrument dimension (got {cls})") 

278 instrument = doImport(cls) 

279 return instrument(collection_prefix=collection_prefix) 

280 

281 @staticmethod 

282 def importAll(registry: Registry) -> None: 

283 """Import all the instruments known to this registry. 

284 

285 This will ensure that all metadata translators have been registered. 

286 

287 Parameters 

288 ---------- 

289 registry : `lsst.daf.butler.Registry` 

290 Butler registry to query to find the information. 

291 

292 Notes 

293 ----- 

294 It is allowed for a particular instrument class to fail on import. 

295 This might simply indicate that a particular obs package has 

296 not been setup. 

297 """ 

298 records = list(registry.queryDimensionRecords("instrument")) 

299 for record in records: 

300 cls = record.class_name 

301 try: 

302 doImport(cls) 

303 except Exception: 

304 pass 

305 

306 def _registerFilters(self, registry, update=False): 

307 """Register the physical and abstract filter Dimension relationships. 

308 This should be called in the `register` implementation, within 

309 a transaction context manager block. 

310 

311 Parameters 

312 ---------- 

313 registry : `lsst.daf.butler.core.Registry` 

314 The registry to add dimensions to. 

315 update : `bool`, optional 

316 If `True` (`False` is default), update existing records if they 

317 differ from the new ones. 

318 """ 

319 for filter in self.filterDefinitions: 

320 # fix for undefined abstract filters causing trouble in the 

321 # registry: 

322 if filter.band is None: 

323 band = filter.physical_filter 

324 else: 

325 band = filter.band 

326 

327 registry.syncDimensionData("physical_filter", 

328 {"instrument": self.getName(), 

329 "name": filter.physical_filter, 

330 "band": band 

331 }, 

332 update=update) 

333 

334 @abstractmethod 

335 def getRawFormatter(self, dataId): 

336 """Return the Formatter class that should be used to read a particular 

337 raw file. 

338 

339 Parameters 

340 ---------- 

341 dataId : `DataCoordinate` 

342 Dimension-based ID for the raw file or files being ingested. 

343 

344 Returns 

345 ------- 

346 formatter : `Formatter` class 

347 Class to be used that reads the file into an 

348 `lsst.afw.image.Exposure` instance. 

349 """ 

350 raise NotImplementedError() 

351 

352 def applyConfigOverrides(self, name, config): 

353 """Apply instrument-specific overrides for a task config. 

354 

355 Parameters 

356 ---------- 

357 name : `str` 

358 Name of the object being configured; typically the _DefaultName 

359 of a Task. 

360 config : `lsst.pex.config.Config` 

361 Config instance to which overrides should be applied. 

362 """ 

363 for root in self.configPaths: 

364 path = os.path.join(root, f"{name}.py") 

365 if os.path.exists(path): 

366 config.load(path) 

367 

368 def writeCuratedCalibrations(self, butler: Butler, collection: Optional[str] = None, 

369 labels: Sequence[str] = ()) -> None: 

370 """Write human-curated calibration Datasets to the given Butler with 

371 the appropriate validity ranges. 

372 

373 Parameters 

374 ---------- 

375 butler : `lsst.daf.butler.Butler` 

376 Butler to use to store these calibrations. 

377 collection : `str`, optional 

378 Name to use for the calibration collection that associates all 

379 datasets with a validity range. If this collection already exists, 

380 it must be a `~CollectionType.CALIBRATION` collection, and it must 

381 not have any datasets that would conflict with those inserted by 

382 this method. If `None`, a collection name is worked out 

383 automatically from the instrument name and other metadata by 

384 calling ``makeCalibrationCollectionName``, but this 

385 default name may not work well for long-lived repositories unless 

386 ``labels`` is also provided (and changed every time curated 

387 calibrations are ingested). 

388 labels : `Sequence` [ `str` ], optional 

389 Extra strings to include in collection names, after concatenating 

390 them with the standard collection name delimeter. If provided, 

391 these are inserted into the names of the `~CollectionType.RUN` 

392 collections that datasets are inserted directly into, as well the 

393 `~CollectionType.CALIBRATION` collection if it is generated 

394 automatically (i.e. if ``collection is None``). Usually this is 

395 just the name of the ticket on which the calibration collection is 

396 being created. 

397 

398 Notes 

399 ----- 

400 Expected to be called from subclasses. The base method calls 

401 ``writeCameraGeom``, ``writeStandardTextCuratedCalibrations``, 

402 and ``writeAdditionalCuratdCalibrations``. 

403 """ 

404 # Delegate registration of collections (and creating names for them) 

405 # to other methods so they can be called independently with the same 

406 # preconditions. Collection registration is idempotent, so this is 

407 # safe, and while it adds a bit of overhead, as long as it's one 

408 # registration attempt per method (not per dataset or dataset type), 

409 # that's negligible. 

410 self.writeCameraGeom(butler, collection, labels=labels) 

411 self.writeStandardTextCuratedCalibrations(butler, collection, labels=labels) 

412 self.writeAdditionalCuratedCalibrations(butler, collection, labels=labels) 

413 

414 def writeAdditionalCuratedCalibrations(self, butler: Butler, collection: Optional[str] = None, 

415 labels: Sequence[str] = ()) -> None: 

416 """Write additional curated calibrations that might be instrument 

417 specific and are not part of the standard set. 

418 

419 Default implementation does nothing. 

420 

421 Parameters 

422 ---------- 

423 butler : `lsst.daf.butler.Butler` 

424 Butler to use to store these calibrations. 

425 collection : `str`, optional 

426 Name to use for the calibration collection that associates all 

427 datasets with a validity range. If this collection already exists, 

428 it must be a `~CollectionType.CALIBRATION` collection, and it must 

429 not have any datasets that would conflict with those inserted by 

430 this method. If `None`, a collection name is worked out 

431 automatically from the instrument name and other metadata by 

432 calling ``makeCalibrationCollectionName``, but this 

433 default name may not work well for long-lived repositories unless 

434 ``labels`` is also provided (and changed every time curated 

435 calibrations are ingested). 

436 labels : `Sequence` [ `str` ], optional 

437 Extra strings to include in collection names, after concatenating 

438 them with the standard collection name delimeter. If provided, 

439 these are inserted into the names of the `~CollectionType.RUN` 

440 collections that datasets are inserted directly into, as well the 

441 `~CollectionType.CALIBRATION` collection if it is generated 

442 automatically (i.e. if ``collection is None``). Usually this is 

443 just the name of the ticket on which the calibration collection is 

444 being created. 

445 """ 

446 return 

447 

448 def writeCameraGeom(self, butler: Butler, collection: Optional[str] = None, 

449 labels: Sequence[str] = ()) -> None: 

450 """Write the default camera geometry to the butler repository and 

451 associate it with the appropriate validity range in a calibration 

452 collection. 

453 

454 Parameters 

455 ---------- 

456 butler : `lsst.daf.butler.Butler` 

457 Butler to use to store these calibrations. 

458 collection : `str`, optional 

459 Name to use for the calibration collection that associates all 

460 datasets with a validity range. If this collection already exists, 

461 it must be a `~CollectionType.CALIBRATION` collection, and it must 

462 not have any datasets that would conflict with those inserted by 

463 this method. If `None`, a collection name is worked out 

464 automatically from the instrument name and other metadata by 

465 calling ``makeCalibrationCollectionName``, but this 

466 default name may not work well for long-lived repositories unless 

467 ``labels`` is also provided (and changed every time curated 

468 calibrations are ingested). 

469 labels : `Sequence` [ `str` ], optional 

470 Extra strings to include in collection names, after concatenating 

471 them with the standard collection name delimeter. If provided, 

472 these are inserted into the names of the `~CollectionType.RUN` 

473 collections that datasets are inserted directly into, as well the 

474 `~CollectionType.CALIBRATION` collection if it is generated 

475 automatically (i.e. if ``collection is None``). Usually this is 

476 just the name of the ticket on which the calibration collection is 

477 being created. 

478 """ 

479 if collection is None: 

480 collection = self.makeCalibrationCollectionName(*labels) 

481 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION) 

482 run = self.makeUnboundedCalibrationRunName(*labels) 

483 butler.registry.registerRun(run) 

484 datasetType = DatasetType("camera", ("instrument",), "Camera", isCalibration=True, 

485 universe=butler.registry.dimensions) 

486 butler.registry.registerDatasetType(datasetType) 

487 camera = self.getCamera() 

488 ref = butler.put(camera, datasetType, {"instrument": self.getName()}, run=run) 

489 butler.registry.certify(collection, [ref], Timespan(begin=None, end=None)) 

490 

491 def writeStandardTextCuratedCalibrations(self, butler: Butler, collection: Optional[str] = None, 

492 labels: Sequence[str] = ()) -> None: 

493 """Write the set of standardized curated text calibrations to 

494 the repository. 

495 

496 Parameters 

497 ---------- 

498 butler : `lsst.daf.butler.Butler` 

499 Butler to receive these calibration datasets. 

500 collection : `str`, optional 

501 Name to use for the calibration collection that associates all 

502 datasets with a validity range. If this collection already exists, 

503 it must be a `~CollectionType.CALIBRATION` collection, and it must 

504 not have any datasets that would conflict with those inserted by 

505 this method. If `None`, a collection name is worked out 

506 automatically from the instrument name and other metadata by 

507 calling ``makeCalibrationCollectionName``, but this 

508 default name may not work well for long-lived repositories unless 

509 ``labels`` is also provided (and changed every time curated 

510 calibrations are ingested). 

511 labels : `Sequence` [ `str` ], optional 

512 Extra strings to include in collection names, after concatenating 

513 them with the standard collection name delimeter. If provided, 

514 these are inserted into the names of the `~CollectionType.RUN` 

515 collections that datasets are inserted directly into, as well the 

516 `~CollectionType.CALIBRATION` collection if it is generated 

517 automatically (i.e. if ``collection is None``). Usually this is 

518 just the name of the ticket on which the calibration collection is 

519 being created. 

520 """ 

521 if collection is None: 

522 collection = self.makeCalibrationCollectionName(*labels) 

523 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION) 

524 runs = set() 

525 for datasetTypeName in self.standardCuratedDatasetTypes: 

526 # We need to define the dataset types. 

527 if datasetTypeName not in StandardCuratedCalibrationDatasetTypes: 

528 raise ValueError(f"DatasetType {datasetTypeName} not in understood list" 

529 f" [{'.'.join(StandardCuratedCalibrationDatasetTypes)}]") 

530 definition = StandardCuratedCalibrationDatasetTypes[datasetTypeName] 

531 datasetType = DatasetType(datasetTypeName, 

532 universe=butler.registry.dimensions, 

533 isCalibration=True, 

534 **definition) 

535 self._writeSpecificCuratedCalibrationDatasets(butler, datasetType, collection, runs=runs, 

536 labels=labels) 

537 

538 @classmethod 

539 def _getSpecificCuratedCalibrationPath(cls, datasetTypeName): 

540 """Return the path of the curated calibration directory. 

541 

542 Parameters 

543 ---------- 

544 datasetTypeName : `str` 

545 The name of the standard dataset type to find. 

546 

547 Returns 

548 ------- 

549 path : `str` 

550 The path to the standard curated data directory. `None` if the 

551 dataset type is not found or the obs data package is not 

552 available. 

553 """ 

554 if cls.getObsDataPackageDir() is None: 

555 # if there is no data package then there can't be datasets 

556 return None 

557 

558 calibPath = os.path.join(cls.getObsDataPackageDir(), cls.policyName, 

559 datasetTypeName) 

560 

561 if os.path.exists(calibPath): 

562 return calibPath 

563 

564 return None 

565 

566 def _writeSpecificCuratedCalibrationDatasets(self, butler: Butler, datasetType: DatasetType, 

567 collection: str, runs: Set[str], labels: Sequence[str]): 

568 """Write standardized curated calibration datasets for this specific 

569 dataset type from an obs data package. 

570 

571 Parameters 

572 ---------- 

573 butler : `lsst.daf.butler.Butler` 

574 Gen3 butler in which to put the calibrations. 

575 datasetType : `lsst.daf.butler.DatasetType` 

576 Dataset type to be put. 

577 collection : `str` 

578 Name of the `~CollectionType.CALIBRATION` collection that 

579 associates all datasets with validity ranges. Must have been 

580 registered prior to this call. 

581 runs : `set` [ `str` ] 

582 Names of runs that have already been registered by previous calls 

583 and need not be registered again. Should be updated by this 

584 method as new runs are registered. 

585 labels : `Sequence` [ `str` ] 

586 Extra strings to include in run names when creating them from 

587 ``CALIBDATE`` metadata, via calls to `makeCuratedCalibrationName`. 

588 Usually this is the name of the ticket on which the calibration 

589 collection is being created. 

590 

591 Notes 

592 ----- 

593 This method scans the location defined in the ``obsDataPackageDir`` 

594 class attribute for curated calibrations corresponding to the 

595 supplied dataset type. The directory name in the data package must 

596 match the name of the dataset type. They are assumed to use the 

597 standard layout and can be read by 

598 `~lsst.pipe.tasks.read_curated_calibs.read_all` and provide standard 

599 metadata. 

600 """ 

601 calibPath = self._getSpecificCuratedCalibrationPath(datasetType.name) 

602 if calibPath is None: 

603 return 

604 

605 # Register the dataset type 

606 butler.registry.registerDatasetType(datasetType) 

607 

608 # obs_base can't depend on pipe_tasks but concrete obs packages 

609 # can -- we therefore have to defer import 

610 from lsst.pipe.tasks.read_curated_calibs import read_all 

611 

612 # Read calibs, registering a new run for each CALIBDATE as needed. 

613 # We try to avoid registering runs multiple times as an optimization 

614 # by putting them in the ``runs`` set that was passed in. 

615 camera = self.getCamera() 

616 calibsDict = read_all(calibPath, camera)[0] # second return is calib type 

617 datasetRecords = [] 

618 for det in calibsDict: 

619 times = sorted([k for k in calibsDict[det]]) 

620 calibs = [calibsDict[det][time] for time in times] 

621 times = [astropy.time.Time(t, format="datetime", scale="utc") for t in times] 

622 times += [None] 

623 for calib, beginTime, endTime in zip(calibs, times[:-1], times[1:]): 

624 md = calib.getMetadata() 

625 run = self.makeCuratedCalibrationRunName(md['CALIBDATE'], *labels) 

626 if run not in runs: 

627 butler.registry.registerRun(run) 

628 runs.add(run) 

629 dataId = DataCoordinate.standardize( 

630 universe=butler.registry.dimensions, 

631 instrument=self.getName(), 

632 detector=md["DETECTOR"], 

633 ) 

634 datasetRecords.append((calib, dataId, run, Timespan(beginTime, endTime))) 

635 

636 # Second loop actually does the inserts and filesystem writes. We 

637 # first do a butler.put on each dataset, inserting it into the run for 

638 # its calibDate. We remember those refs and group them by timespan, so 

639 # we can vectorize the certify calls as much as possible. 

640 refsByTimespan = defaultdict(list) 

641 with butler.transaction(): 

642 for calib, dataId, run, timespan in datasetRecords: 

643 refsByTimespan[timespan].append(butler.put(calib, datasetType, dataId, run=run)) 

644 for timespan, refs in refsByTimespan.items(): 

645 butler.registry.certify(collection, refs, timespan) 

646 

647 @abstractmethod 

648 def makeDataIdTranslatorFactory(self) -> TranslatorFactory: 

649 """Return a factory for creating Gen2->Gen3 data ID translators, 

650 specialized for this instrument. 

651 

652 Derived class implementations should generally call 

653 `TranslatorFactory.addGenericInstrumentRules` with appropriate 

654 arguments, but are not required to (and may not be able to if their 

655 Gen2 raw data IDs are sufficiently different from the HSC/DECam/CFHT 

656 norm). 

657 

658 Returns 

659 ------- 

660 factory : `TranslatorFactory`. 

661 Factory for `Translator` objects. 

662 """ 

663 raise NotImplementedError("Must be implemented by derived classes.") 

664 

665 @staticmethod 

666 def formatCollectionTimestamp(timestamp: Union[str, datetime.datetime]) -> str: 

667 """Format a timestamp for use in a collection name. 

668 

669 Parameters 

670 ---------- 

671 timestamp : `str` or `datetime.datetime` 

672 Timestamp to format. May be a date or datetime string in extended 

673 ISO format (assumed UTC), with or without a timezone specifier, a 

674 datetime string in basic ISO format with a timezone specifier, a 

675 naive `datetime.datetime` instance (assumed UTC) or a 

676 timezone-aware `datetime.datetime` instance (converted to UTC). 

677 This is intended to cover all forms that string ``CALIBDATE`` 

678 metadata values have taken in the past, as well as the format this 

679 method itself writes out (to enable round-tripping). 

680 

681 Returns 

682 ------- 

683 formatted : `str` 

684 Standardized string form for the timestamp. 

685 """ 

686 if isinstance(timestamp, str): 

687 if "-" in timestamp: 

688 # extended ISO format, with - and : delimiters 

689 timestamp = datetime.datetime.fromisoformat(timestamp) 

690 else: 

691 # basic ISO format, with no delimiters (what this method 

692 # returns) 

693 timestamp = datetime.datetime.strptime(timestamp, "%Y%m%dT%H%M%S%z") 

694 if not isinstance(timestamp, datetime.datetime): 

695 raise TypeError(f"Unexpected date/time object: {timestamp!r}.") 

696 if timestamp.tzinfo is not None: 

697 timestamp = timestamp.astimezone(datetime.timezone.utc) 

698 return f"{timestamp:%Y%m%dT%H%M%S}Z" 

699 

700 @staticmethod 

701 def makeCollectionTimestamp() -> str: 

702 """Create a timestamp string for use in a collection name from the 

703 current time. 

704 

705 Returns 

706 ------- 

707 formatted : `str` 

708 Standardized string form of the current time. 

709 """ 

710 return Instrument.formatCollectionTimestamp(datetime.datetime.now(tz=datetime.timezone.utc)) 

711 

712 def makeDefaultRawIngestRunName(self) -> str: 

713 """Make the default instrument-specific run collection string for raw 

714 data ingest. 

715 

716 Returns 

717 ------- 

718 coll : `str` 

719 Run collection name to be used as the default for ingestion of 

720 raws. 

721 """ 

722 return self.makeCollectionName("raw", "all") 

723 

724 def makeUnboundedCalibrationRunName(self, *labels: str) -> str: 

725 """Make a RUN collection name appropriate for inserting calibration 

726 datasets whose validity ranges are unbounded. 

727 

728 Parameters 

729 ---------- 

730 *labels : `str` 

731 Extra strings to be included in the base name, using the default 

732 delimiter for collection names. Usually this is the name of the 

733 ticket on which the calibration collection is being created. 

734 

735 Returns 

736 ------- 

737 name : `str` 

738 Run collection name. 

739 """ 

740 return self.makeCollectionName("calib", *labels, "unbounded") 

741 

742 def makeCuratedCalibrationRunName(self, calibDate: str, *labels: str) -> str: 

743 """Make a RUN collection name appropriate for inserting curated 

744 calibration datasets with the given ``CALIBDATE`` metadata value. 

745 

746 Parameters 

747 ---------- 

748 calibDate : `str` 

749 The ``CALIBDATE`` metadata value. 

750 *labels : `str` 

751 Strings to be included in the collection name (before 

752 ``calibDate``, but after all other terms), using the default 

753 delimiter for collection names. Usually this is the name of the 

754 ticket on which the calibration collection is being created. 

755 

756 Returns 

757 ------- 

758 name : `str` 

759 Run collection name. 

760 """ 

761 return self.makeCollectionName("calib", *labels, "curated", self.formatCollectionTimestamp(calibDate)) 

762 

763 def makeCalibrationCollectionName(self, *labels: str) -> str: 

764 """Make a CALIBRATION collection name appropriate for associating 

765 calibration datasets with validity ranges. 

766 

767 Parameters 

768 ---------- 

769 *labels : `str` 

770 Strings to be appended to the base name, using the default 

771 delimiter for collection names. Usually this is the name of the 

772 ticket on which the calibration collection is being created. 

773 

774 Returns 

775 ------- 

776 name : `str` 

777 Calibration collection name. 

778 """ 

779 return self.makeCollectionName("calib", *labels) 

780 

781 @staticmethod 

782 def makeRefCatCollectionName(*labels: str) -> str: 

783 """Return a global (not instrument-specific) name for a collection that 

784 holds reference catalogs. 

785 

786 With no arguments, this returns the name of the collection that holds 

787 all reference catalogs (usually a ``CHAINED`` collection, at least in 

788 long-lived repos that may contain more than one reference catalog). 

789 

790 Parameters 

791 ---------- 

792 *labels : `str` 

793 Strings to be added to the global collection name, in order to 

794 define a collection name for one or more reference catalogs being 

795 ingested at the same time. 

796 

797 Returns 

798 ------- 

799 name : `str` 

800 Collection name. 

801 

802 Notes 

803 ----- 

804 This is a ``staticmethod``, not a ``classmethod``, because it should 

805 be the same for all instruments. 

806 """ 

807 return "/".join(("refcats",) + labels) 

808 

809 def makeUmbrellaCollectionName(self) -> str: 

810 """Return the name of the umbrella ``CHAINED`` collection for this 

811 instrument that combines all standard recommended input collections. 

812 

813 This method should almost never be overridden by derived classes. 

814 

815 Returns 

816 ------- 

817 name : `str` 

818 Name for the umbrella collection. 

819 """ 

820 return self.makeCollectionName("defaults") 

821 

822 def makeCollectionName(self, *labels: str) -> str: 

823 """Get the instrument-specific collection string to use as derived 

824 from the supplied labels. 

825 

826 Parameters 

827 ---------- 

828 *labels : `str` 

829 Strings to be combined with the instrument name to form a 

830 collection name. 

831 

832 Returns 

833 ------- 

834 name : `str` 

835 Collection name to use that includes the instrument's recommended 

836 prefix. 

837 """ 

838 return "/".join((self.collection_prefix,) + labels) 

839 

840 

841def makeExposureRecordFromObsInfo(obsInfo, universe): 

842 """Construct an exposure DimensionRecord from 

843 `astro_metadata_translator.ObservationInfo`. 

844 

845 Parameters 

846 ---------- 

847 obsInfo : `astro_metadata_translator.ObservationInfo` 

848 A `~astro_metadata_translator.ObservationInfo` object corresponding to 

849 the exposure. 

850 universe : `DimensionUniverse` 

851 Set of all known dimensions. 

852 

853 Returns 

854 ------- 

855 record : `DimensionRecord` 

856 A record containing exposure metadata, suitable for insertion into 

857 a `Registry`. 

858 """ 

859 dimension = universe["exposure"] 

860 

861 ra, dec, sky_angle, zenith_angle = (None, None, None, None) 

862 if obsInfo.tracking_radec is not None: 

863 icrs = obsInfo.tracking_radec.icrs 

864 ra = icrs.ra.degree 

865 dec = icrs.dec.degree 

866 if obsInfo.boresight_rotation_coord == "sky": 

867 sky_angle = obsInfo.boresight_rotation_angle.degree 

868 if obsInfo.altaz_begin is not None: 

869 zenith_angle = obsInfo.altaz_begin.zen.degree 

870 

871 return dimension.RecordClass( 

872 instrument=obsInfo.instrument, 

873 id=obsInfo.exposure_id, 

874 obs_id=obsInfo.observation_id, 

875 group_name=obsInfo.exposure_group, 

876 group_id=obsInfo.visit_id, 

877 datetime_begin=obsInfo.datetime_begin, 

878 datetime_end=obsInfo.datetime_end, 

879 exposure_time=obsInfo.exposure_time.to_value("s"), 

880 # we are not mandating that dark_time be calculable 

881 dark_time=obsInfo.dark_time.to_value("s") if obsInfo.dark_time is not None else None, 

882 observation_type=obsInfo.observation_type, 

883 observation_reason=obsInfo.observation_reason, 

884 day_obs=obsInfo.observing_day, 

885 seq_num=obsInfo.observation_counter, 

886 physical_filter=obsInfo.physical_filter, 

887 science_program=obsInfo.science_program, 

888 target_name=obsInfo.object, 

889 tracking_ra=ra, 

890 tracking_dec=dec, 

891 sky_angle=sky_angle, 

892 zenith_angle=zenith_angle, 

893 ) 

894 

895 

896def loadCamera(butler: Butler, dataId: DataId, *, collections: Any = None) -> Tuple[Camera, bool]: 

897 """Attempt to load versioned camera geometry from a butler, but fall back 

898 to obtaining a nominal camera from the `Instrument` class if that fails. 

899 

900 Parameters 

901 ---------- 

902 butler : `lsst.daf.butler.Butler` 

903 Butler instance to attempt to query for and load a ``camera`` dataset 

904 from. 

905 dataId : `dict` or `DataCoordinate` 

906 Data ID that identifies at least the ``instrument`` and ``exposure`` 

907 dimensions. 

908 collections : Any, optional 

909 Collections to be searched, overriding ``self.butler.collections``. 

910 Can be any of the types supported by the ``collections`` argument 

911 to butler construction. 

912 

913 Returns 

914 ------- 

915 camera : `lsst.afw.cameraGeom.Camera` 

916 Camera object. 

917 versioned : `bool` 

918 If `True`, the camera was obtained from the butler and should represent 

919 a versioned camera from a calibration repository. If `False`, no 

920 camera datasets were found, and the returned camera was produced by 

921 instantiating the appropriate `Instrument` class and calling 

922 `Instrument.getCamera`. 

923 """ 

924 if collections is None: 

925 collections = butler.collections 

926 # Registry would do data ID expansion internally if we didn't do it first, 

927 # but we might want an expanded data ID ourselves later, so we do it here 

928 # to ensure it only happens once. 

929 # This will also catch problems with the data ID not having keys we need. 

930 dataId = butler.registry.expandDataId(dataId, graph=butler.registry.dimensions["exposure"].graph) 

931 try: 

932 cameraRef = butler.get("camera", dataId=dataId, collections=collections) 

933 return cameraRef, True 

934 except LookupError: 

935 pass 

936 instrument = Instrument.fromName(dataId["instrument"], butler.registry) 

937 return instrument.getCamera(), False