Coverage for python/lsst/obs/base/_instrument.py: 27%

Shortcuts on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

210 statements  

1# This file is part of obs_base. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21 

22from __future__ import annotations 

23 

24__all__ = ("Instrument", "makeExposureRecordFromObsInfo", "loadCamera") 

25 

26import datetime 

27import os.path 

28from abc import ABCMeta, abstractmethod 

29from collections import defaultdict 

30from functools import lru_cache 

31from typing import TYPE_CHECKING, Any, Optional, Sequence, Set, Tuple, Union 

32 

33import astropy.time 

34from lsst.afw.cameraGeom import Camera 

35from lsst.daf.butler import Butler, CollectionType, DataCoordinate, DataId, DatasetType, Timespan 

36from lsst.utils import doImport, getPackageDir 

37 

38if TYPE_CHECKING: 38 ↛ 39line 38 didn't jump to line 39, because the condition on line 38 was never true

39 from lsst.daf.butler import Registry 

40 

41 from .gen2to3 import TranslatorFactory 

42 

43# To be a standard text curated calibration means that we use a 

44# standard definition for the corresponding DatasetType. 

45StandardCuratedCalibrationDatasetTypes = { 

46 "defects": {"dimensions": ("instrument", "detector"), "storageClass": "Defects"}, 

47 "qe_curve": {"dimensions": ("instrument", "detector"), "storageClass": "QECurve"}, 

48 "crosstalk": {"dimensions": ("instrument", "detector"), "storageClass": "CrosstalkCalib"}, 

49 "linearizer": {"dimensions": ("instrument", "detector"), "storageClass": "Linearizer"}, 

50 "bfk": {"dimensions": ("instrument", "detector"), "storageClass": "BrighterFatterKernel"}, 

51} 

52 

53 

54class Instrument(metaclass=ABCMeta): 

55 """Base class for instrument-specific logic for the Gen3 Butler. 

56 

57 Parameters 

58 ---------- 

59 collection_prefix : `str`, optional 

60 Prefix for collection names to use instead of the intrument's own name. 

61 This is primarily for use in simulated-data repositories, where the 

62 instrument name may not be necessary and/or sufficient to distinguish 

63 between collections. 

64 

65 Notes 

66 ----- 

67 Concrete instrument subclasses must have the same construction signature as 

68 the base class. 

69 """ 

70 

71 configPaths: Sequence[str] = () 

72 """Paths to config files to read for specific Tasks. 

73 

74 The paths in this list should contain files of the form `task.py`, for 

75 each of the Tasks that requires special configuration. 

76 """ 

77 

78 policyName: Optional[str] = None 

79 """Instrument specific name to use when locating a policy or configuration 

80 file in the file system.""" 

81 

82 obsDataPackage: Optional[str] = None 

83 """Name of the package containing the text curated calibration files. 

84 Usually a obs _data package. If `None` no curated calibration files 

85 will be read. (`str`)""" 

86 

87 standardCuratedDatasetTypes: Set[str] = frozenset(StandardCuratedCalibrationDatasetTypes) 

88 """The dataset types expected to be obtained from the obsDataPackage. 

89 

90 These dataset types are all required to have standard definitions and 

91 must be known to the base class. Clearing this list will prevent 

92 any of these calibrations from being stored. If a dataset type is not 

93 known to a specific instrument it can still be included in this list 

94 since the data package is the source of truth. (`set` of `str`) 

95 """ 

96 

97 additionalCuratedDatasetTypes: Set[str] = frozenset() 

98 """Curated dataset types specific to this particular instrument that do 

99 not follow the standard organization found in obs data packages. 

100 

101 These are the instrument-specific dataset types written by 

102 `writeAdditionalCuratedCalibrations` in addition to the calibrations 

103 found in obs data packages that follow the standard scheme. 

104 (`set` of `str`)""" 

105 

106 @property 

107 @abstractmethod 

108 def filterDefinitions(self): 

109 """`~lsst.obs.base.FilterDefinitionCollection`, defining the filters 

110 for this instrument. 

111 """ 

112 return None 

113 

114 def __init__(self, collection_prefix: Optional[str] = None): 

115 self.filterDefinitions.reset() 

116 self.filterDefinitions.defineFilters() 

117 if collection_prefix is None: 117 ↛ 119line 117 didn't jump to line 119, because the condition on line 117 was never false

118 collection_prefix = self.getName() 

119 self.collection_prefix = collection_prefix 

120 

121 @classmethod 

122 @abstractmethod 

123 def getName(cls): 

124 """Return the short (dimension) name for this instrument. 

125 

126 This is not (in general) the same as the class name - it's what is used 

127 as the value of the "instrument" field in data IDs, and is usually an 

128 abbreviation of the full name. 

129 """ 

130 raise NotImplementedError() 

131 

132 @classmethod 

133 @lru_cache() 

134 def getCuratedCalibrationNames(cls) -> Set[str]: 

135 """Return the names of all the curated calibration dataset types. 

136 

137 Returns 

138 ------- 

139 names : `set` of `str` 

140 The dataset type names of all curated calibrations. This will 

141 include the standard curated calibrations even if the particular 

142 instrument does not support them. 

143 

144 Notes 

145 ----- 

146 The returned list does not indicate whether a particular dataset 

147 is present in the Butler repository, simply that these are the 

148 dataset types that are handled by ``writeCuratedCalibrations``. 

149 """ 

150 

151 # Camera is a special dataset type that is also handled as a 

152 # curated calibration. 

153 curated = {"camera"} 

154 

155 # Make a cursory attempt to filter out curated dataset types 

156 # that are not present for this instrument 

157 for datasetTypeName in cls.standardCuratedDatasetTypes: 

158 calibPath = cls._getSpecificCuratedCalibrationPath(datasetTypeName) 

159 if calibPath is not None: 

160 curated.add(datasetTypeName) 

161 

162 curated.update(cls.additionalCuratedDatasetTypes) 

163 return frozenset(curated) 

164 

165 @abstractmethod 

166 def getCamera(self): 

167 """Retrieve the cameraGeom representation of this instrument. 

168 

169 This is a temporary API that should go away once ``obs`` packages have 

170 a standardized approach to writing versioned cameras to a Gen3 repo. 

171 """ 

172 raise NotImplementedError() 

173 

174 @abstractmethod 

175 def register(self, registry, *, update=False): 

176 """Insert instrument, physical_filter, and detector entries into a 

177 `Registry`. 

178 

179 Parameters 

180 ---------- 

181 registry : `lsst.daf.butler.Registry` 

182 Registry client for the data repository to modify. 

183 update : `bool`, optional 

184 If `True` (`False` is default), update existing records if they 

185 differ from the new ones. 

186 

187 Raises 

188 ------ 

189 lsst.daf.butler.registry.ConflictingDefinitionError 

190 Raised if any existing record has the same key but a different 

191 definition as one being registered. 

192 

193 Notes 

194 ----- 

195 New detectors and physical filters can always be added by calling this 

196 method multiple times, as long as no existing records have changed (if 

197 existing records have changed, ``update=True`` must be used). Old 

198 records can never be removed by this method. 

199 

200 Implementations should guarantee that registration is atomic (the 

201 registry should not be modified if any error occurs) and idempotent at 

202 the level of individual dimension entries; new detectors and filters 

203 should be added, but changes to any existing record should not be. 

204 This can generally be achieved via a block like:: 

205 

206 with registry.transaction(): 

207 registry.syncDimensionData("instrument", ...) 

208 registry.syncDimensionData("detector", ...) 

209 self.registerFilters(registry) 

210 

211 """ 

212 raise NotImplementedError() 

213 

214 @classmethod 

215 @lru_cache() 

216 def getObsDataPackageDir(cls): 

217 """The root of the obs data package that provides specializations for 

218 this instrument. 

219 

220 returns 

221 ------- 

222 dir : `str` 

223 The root of the relevat obs data package. 

224 """ 

225 if cls.obsDataPackage is None: 

226 return None 

227 return getPackageDir(cls.obsDataPackage) 

228 

229 @staticmethod 

230 def fromName(name: str, registry: Registry, collection_prefix: Optional[str] = None) -> Instrument: 

231 """Given an instrument name and a butler, retrieve a corresponding 

232 instantiated instrument object. 

233 

234 Parameters 

235 ---------- 

236 name : `str` 

237 Name of the instrument (must match the return value of `getName`). 

238 registry : `lsst.daf.butler.Registry` 

239 Butler registry to query to find the information. 

240 collection_prefix : `str`, optional 

241 Prefix for collection names to use instead of the intrument's own 

242 name. This is primarily for use in simulated-data repositories, 

243 where the instrument name may not be necessary and/or sufficient to 

244 distinguish between collections. 

245 

246 Returns 

247 ------- 

248 instrument : `Instrument` 

249 An instance of the relevant `Instrument`. 

250 

251 Notes 

252 ----- 

253 The instrument must be registered in the corresponding butler. 

254 

255 Raises 

256 ------ 

257 LookupError 

258 Raised if the instrument is not known to the supplied registry. 

259 ModuleNotFoundError 

260 Raised if the class could not be imported. This could mean 

261 that the relevant obs package has not been setup. 

262 TypeError 

263 Raised if the class name retrieved is not a string. 

264 """ 

265 records = list(registry.queryDimensionRecords("instrument", instrument=name)) 

266 if not records: 

267 raise LookupError(f"No registered instrument with name '{name}'.") 

268 cls = records[0].class_name 

269 if not isinstance(cls, str): 

270 raise TypeError(f"Unexpected class name retrieved from {name} instrument dimension (got {cls})") 

271 instrument = doImport(cls) 

272 return instrument(collection_prefix=collection_prefix) 

273 

274 @staticmethod 

275 def importAll(registry: Registry) -> None: 

276 """Import all the instruments known to this registry. 

277 

278 This will ensure that all metadata translators have been registered. 

279 

280 Parameters 

281 ---------- 

282 registry : `lsst.daf.butler.Registry` 

283 Butler registry to query to find the information. 

284 

285 Notes 

286 ----- 

287 It is allowed for a particular instrument class to fail on import. 

288 This might simply indicate that a particular obs package has 

289 not been setup. 

290 """ 

291 records = list(registry.queryDimensionRecords("instrument")) 

292 for record in records: 

293 cls = record.class_name 

294 try: 

295 doImport(cls) 

296 except Exception: 

297 pass 

298 

299 def _registerFilters(self, registry, update=False): 

300 """Register the physical and abstract filter Dimension relationships. 

301 This should be called in the `register` implementation, within 

302 a transaction context manager block. 

303 

304 Parameters 

305 ---------- 

306 registry : `lsst.daf.butler.core.Registry` 

307 The registry to add dimensions to. 

308 update : `bool`, optional 

309 If `True` (`False` is default), update existing records if they 

310 differ from the new ones. 

311 """ 

312 for filter in self.filterDefinitions: 

313 # fix for undefined abstract filters causing trouble in the 

314 # registry: 

315 if filter.band is None: 

316 band = filter.physical_filter 

317 else: 

318 band = filter.band 

319 

320 registry.syncDimensionData( 

321 "physical_filter", 

322 {"instrument": self.getName(), "name": filter.physical_filter, "band": band}, 

323 update=update, 

324 ) 

325 

326 @abstractmethod 

327 def getRawFormatter(self, dataId): 

328 """Return the Formatter class that should be used to read a particular 

329 raw file. 

330 

331 Parameters 

332 ---------- 

333 dataId : `DataCoordinate` 

334 Dimension-based ID for the raw file or files being ingested. 

335 

336 Returns 

337 ------- 

338 formatter : `Formatter` class 

339 Class to be used that reads the file into an 

340 `lsst.afw.image.Exposure` instance. 

341 """ 

342 raise NotImplementedError() 

343 

344 def applyConfigOverrides(self, name, config): 

345 """Apply instrument-specific overrides for a task config. 

346 

347 Parameters 

348 ---------- 

349 name : `str` 

350 Name of the object being configured; typically the _DefaultName 

351 of a Task. 

352 config : `lsst.pex.config.Config` 

353 Config instance to which overrides should be applied. 

354 """ 

355 for root in self.configPaths: 

356 path = os.path.join(root, f"{name}.py") 

357 if os.path.exists(path): 

358 config.load(path) 

359 

360 def writeCuratedCalibrations( 

361 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = () 

362 ) -> None: 

363 """Write human-curated calibration Datasets to the given Butler with 

364 the appropriate validity ranges. 

365 

366 Parameters 

367 ---------- 

368 butler : `lsst.daf.butler.Butler` 

369 Butler to use to store these calibrations. 

370 collection : `str`, optional 

371 Name to use for the calibration collection that associates all 

372 datasets with a validity range. If this collection already exists, 

373 it must be a `~CollectionType.CALIBRATION` collection, and it must 

374 not have any datasets that would conflict with those inserted by 

375 this method. If `None`, a collection name is worked out 

376 automatically from the instrument name and other metadata by 

377 calling ``makeCalibrationCollectionName``, but this 

378 default name may not work well for long-lived repositories unless 

379 ``labels`` is also provided (and changed every time curated 

380 calibrations are ingested). 

381 labels : `Sequence` [ `str` ], optional 

382 Extra strings to include in collection names, after concatenating 

383 them with the standard collection name delimeter. If provided, 

384 these are inserted into the names of the `~CollectionType.RUN` 

385 collections that datasets are inserted directly into, as well the 

386 `~CollectionType.CALIBRATION` collection if it is generated 

387 automatically (i.e. if ``collection is None``). Usually this is 

388 just the name of the ticket on which the calibration collection is 

389 being created. 

390 

391 Notes 

392 ----- 

393 Expected to be called from subclasses. The base method calls 

394 ``writeCameraGeom``, ``writeStandardTextCuratedCalibrations``, 

395 and ``writeAdditionalCuratdCalibrations``. 

396 """ 

397 # Delegate registration of collections (and creating names for them) 

398 # to other methods so they can be called independently with the same 

399 # preconditions. Collection registration is idempotent, so this is 

400 # safe, and while it adds a bit of overhead, as long as it's one 

401 # registration attempt per method (not per dataset or dataset type), 

402 # that's negligible. 

403 self.writeCameraGeom(butler, collection, labels=labels) 

404 self.writeStandardTextCuratedCalibrations(butler, collection, labels=labels) 

405 self.writeAdditionalCuratedCalibrations(butler, collection, labels=labels) 

406 

407 def writeAdditionalCuratedCalibrations( 

408 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = () 

409 ) -> None: 

410 """Write additional curated calibrations that might be instrument 

411 specific and are not part of the standard set. 

412 

413 Default implementation does nothing. 

414 

415 Parameters 

416 ---------- 

417 butler : `lsst.daf.butler.Butler` 

418 Butler to use to store these calibrations. 

419 collection : `str`, optional 

420 Name to use for the calibration collection that associates all 

421 datasets with a validity range. If this collection already exists, 

422 it must be a `~CollectionType.CALIBRATION` collection, and it must 

423 not have any datasets that would conflict with those inserted by 

424 this method. If `None`, a collection name is worked out 

425 automatically from the instrument name and other metadata by 

426 calling ``makeCalibrationCollectionName``, but this 

427 default name may not work well for long-lived repositories unless 

428 ``labels`` is also provided (and changed every time curated 

429 calibrations are ingested). 

430 labels : `Sequence` [ `str` ], optional 

431 Extra strings to include in collection names, after concatenating 

432 them with the standard collection name delimeter. If provided, 

433 these are inserted into the names of the `~CollectionType.RUN` 

434 collections that datasets are inserted directly into, as well the 

435 `~CollectionType.CALIBRATION` collection if it is generated 

436 automatically (i.e. if ``collection is None``). Usually this is 

437 just the name of the ticket on which the calibration collection is 

438 being created. 

439 """ 

440 return 

441 

442 def writeCameraGeom( 

443 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = () 

444 ) -> None: 

445 """Write the default camera geometry to the butler repository and 

446 associate it with the appropriate validity range in a calibration 

447 collection. 

448 

449 Parameters 

450 ---------- 

451 butler : `lsst.daf.butler.Butler` 

452 Butler to use to store these calibrations. 

453 collection : `str`, optional 

454 Name to use for the calibration collection that associates all 

455 datasets with a validity range. If this collection already exists, 

456 it must be a `~CollectionType.CALIBRATION` collection, and it must 

457 not have any datasets that would conflict with those inserted by 

458 this method. If `None`, a collection name is worked out 

459 automatically from the instrument name and other metadata by 

460 calling ``makeCalibrationCollectionName``, but this 

461 default name may not work well for long-lived repositories unless 

462 ``labels`` is also provided (and changed every time curated 

463 calibrations are ingested). 

464 labels : `Sequence` [ `str` ], optional 

465 Extra strings to include in collection names, after concatenating 

466 them with the standard collection name delimeter. If provided, 

467 these are inserted into the names of the `~CollectionType.RUN` 

468 collections that datasets are inserted directly into, as well the 

469 `~CollectionType.CALIBRATION` collection if it is generated 

470 automatically (i.e. if ``collection is None``). Usually this is 

471 just the name of the ticket on which the calibration collection is 

472 being created. 

473 """ 

474 if collection is None: 

475 collection = self.makeCalibrationCollectionName(*labels) 

476 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION) 

477 run = self.makeUnboundedCalibrationRunName(*labels) 

478 butler.registry.registerRun(run) 

479 datasetType = DatasetType( 

480 "camera", ("instrument",), "Camera", isCalibration=True, universe=butler.registry.dimensions 

481 ) 

482 butler.registry.registerDatasetType(datasetType) 

483 camera = self.getCamera() 

484 ref = butler.put(camera, datasetType, {"instrument": self.getName()}, run=run) 

485 butler.registry.certify(collection, [ref], Timespan(begin=None, end=None)) 

486 

487 def writeStandardTextCuratedCalibrations( 

488 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = () 

489 ) -> None: 

490 """Write the set of standardized curated text calibrations to 

491 the repository. 

492 

493 Parameters 

494 ---------- 

495 butler : `lsst.daf.butler.Butler` 

496 Butler to receive these calibration datasets. 

497 collection : `str`, optional 

498 Name to use for the calibration collection that associates all 

499 datasets with a validity range. If this collection already exists, 

500 it must be a `~CollectionType.CALIBRATION` collection, and it must 

501 not have any datasets that would conflict with those inserted by 

502 this method. If `None`, a collection name is worked out 

503 automatically from the instrument name and other metadata by 

504 calling ``makeCalibrationCollectionName``, but this 

505 default name may not work well for long-lived repositories unless 

506 ``labels`` is also provided (and changed every time curated 

507 calibrations are ingested). 

508 labels : `Sequence` [ `str` ], optional 

509 Extra strings to include in collection names, after concatenating 

510 them with the standard collection name delimeter. If provided, 

511 these are inserted into the names of the `~CollectionType.RUN` 

512 collections that datasets are inserted directly into, as well the 

513 `~CollectionType.CALIBRATION` collection if it is generated 

514 automatically (i.e. if ``collection is None``). Usually this is 

515 just the name of the ticket on which the calibration collection is 

516 being created. 

517 """ 

518 if collection is None: 

519 collection = self.makeCalibrationCollectionName(*labels) 

520 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION) 

521 runs = set() 

522 for datasetTypeName in self.standardCuratedDatasetTypes: 

523 # We need to define the dataset types. 

524 if datasetTypeName not in StandardCuratedCalibrationDatasetTypes: 

525 raise ValueError( 

526 f"DatasetType {datasetTypeName} not in understood list" 

527 f" [{'.'.join(StandardCuratedCalibrationDatasetTypes)}]" 

528 ) 

529 definition = StandardCuratedCalibrationDatasetTypes[datasetTypeName] 

530 datasetType = DatasetType( 

531 datasetTypeName, universe=butler.registry.dimensions, isCalibration=True, **definition 

532 ) 

533 self._writeSpecificCuratedCalibrationDatasets( 

534 butler, datasetType, collection, runs=runs, labels=labels 

535 ) 

536 

537 @classmethod 

538 def _getSpecificCuratedCalibrationPath(cls, datasetTypeName): 

539 """Return the path of the curated calibration directory. 

540 

541 Parameters 

542 ---------- 

543 datasetTypeName : `str` 

544 The name of the standard dataset type to find. 

545 

546 Returns 

547 ------- 

548 path : `str` 

549 The path to the standard curated data directory. `None` if the 

550 dataset type is not found or the obs data package is not 

551 available. 

552 """ 

553 if cls.getObsDataPackageDir() is None: 

554 # if there is no data package then there can't be datasets 

555 return None 

556 

557 calibPath = os.path.join(cls.getObsDataPackageDir(), cls.policyName, datasetTypeName) 

558 

559 if os.path.exists(calibPath): 

560 return calibPath 

561 

562 return None 

563 

564 def _writeSpecificCuratedCalibrationDatasets( 

565 self, butler: Butler, datasetType: DatasetType, collection: str, runs: Set[str], labels: Sequence[str] 

566 ): 

567 """Write standardized curated calibration datasets for this specific 

568 dataset type from an obs data package. 

569 

570 Parameters 

571 ---------- 

572 butler : `lsst.daf.butler.Butler` 

573 Gen3 butler in which to put the calibrations. 

574 datasetType : `lsst.daf.butler.DatasetType` 

575 Dataset type to be put. 

576 collection : `str` 

577 Name of the `~CollectionType.CALIBRATION` collection that 

578 associates all datasets with validity ranges. Must have been 

579 registered prior to this call. 

580 runs : `set` [ `str` ] 

581 Names of runs that have already been registered by previous calls 

582 and need not be registered again. Should be updated by this 

583 method as new runs are registered. 

584 labels : `Sequence` [ `str` ] 

585 Extra strings to include in run names when creating them from 

586 ``CALIBDATE`` metadata, via calls to `makeCuratedCalibrationName`. 

587 Usually this is the name of the ticket on which the calibration 

588 collection is being created. 

589 

590 Notes 

591 ----- 

592 This method scans the location defined in the ``obsDataPackageDir`` 

593 class attribute for curated calibrations corresponding to the 

594 supplied dataset type. The directory name in the data package must 

595 match the name of the dataset type. They are assumed to use the 

596 standard layout and can be read by 

597 `~lsst.pipe.tasks.read_curated_calibs.read_all` and provide standard 

598 metadata. 

599 """ 

600 calibPath = self._getSpecificCuratedCalibrationPath(datasetType.name) 

601 if calibPath is None: 

602 return 

603 

604 # Register the dataset type 

605 butler.registry.registerDatasetType(datasetType) 

606 

607 # obs_base can't depend on pipe_tasks but concrete obs packages 

608 # can -- we therefore have to defer import 

609 from lsst.pipe.tasks.read_curated_calibs import read_all 

610 

611 # Read calibs, registering a new run for each CALIBDATE as needed. 

612 # We try to avoid registering runs multiple times as an optimization 

613 # by putting them in the ``runs`` set that was passed in. 

614 camera = self.getCamera() 

615 calibsDict = read_all(calibPath, camera)[0] # second return is calib type 

616 datasetRecords = [] 

617 for det in calibsDict: 

618 times = sorted([k for k in calibsDict[det]]) 

619 calibs = [calibsDict[det][time] for time in times] 

620 times = [astropy.time.Time(t, format="datetime", scale="utc") for t in times] 

621 times += [None] 

622 for calib, beginTime, endTime in zip(calibs, times[:-1], times[1:]): 

623 md = calib.getMetadata() 

624 run = self.makeCuratedCalibrationRunName(md["CALIBDATE"], *labels) 

625 if run not in runs: 

626 butler.registry.registerRun(run) 

627 runs.add(run) 

628 dataId = DataCoordinate.standardize( 

629 universe=butler.registry.dimensions, 

630 instrument=self.getName(), 

631 detector=md["DETECTOR"], 

632 ) 

633 datasetRecords.append((calib, dataId, run, Timespan(beginTime, endTime))) 

634 

635 # Second loop actually does the inserts and filesystem writes. We 

636 # first do a butler.put on each dataset, inserting it into the run for 

637 # its calibDate. We remember those refs and group them by timespan, so 

638 # we can vectorize the certify calls as much as possible. 

639 refsByTimespan = defaultdict(list) 

640 with butler.transaction(): 

641 for calib, dataId, run, timespan in datasetRecords: 

642 refsByTimespan[timespan].append(butler.put(calib, datasetType, dataId, run=run)) 

643 for timespan, refs in refsByTimespan.items(): 

644 butler.registry.certify(collection, refs, timespan) 

645 

646 @abstractmethod 

647 def makeDataIdTranslatorFactory(self) -> TranslatorFactory: 

648 """Return a factory for creating Gen2->Gen3 data ID translators, 

649 specialized for this instrument. 

650 

651 Derived class implementations should generally call 

652 `TranslatorFactory.addGenericInstrumentRules` with appropriate 

653 arguments, but are not required to (and may not be able to if their 

654 Gen2 raw data IDs are sufficiently different from the HSC/DECam/CFHT 

655 norm). 

656 

657 Returns 

658 ------- 

659 factory : `TranslatorFactory`. 

660 Factory for `Translator` objects. 

661 """ 

662 raise NotImplementedError("Must be implemented by derived classes.") 

663 

664 @staticmethod 

665 def formatCollectionTimestamp(timestamp: Union[str, datetime.datetime]) -> str: 

666 """Format a timestamp for use in a collection name. 

667 

668 Parameters 

669 ---------- 

670 timestamp : `str` or `datetime.datetime` 

671 Timestamp to format. May be a date or datetime string in extended 

672 ISO format (assumed UTC), with or without a timezone specifier, a 

673 datetime string in basic ISO format with a timezone specifier, a 

674 naive `datetime.datetime` instance (assumed UTC) or a 

675 timezone-aware `datetime.datetime` instance (converted to UTC). 

676 This is intended to cover all forms that string ``CALIBDATE`` 

677 metadata values have taken in the past, as well as the format this 

678 method itself writes out (to enable round-tripping). 

679 

680 Returns 

681 ------- 

682 formatted : `str` 

683 Standardized string form for the timestamp. 

684 """ 

685 if isinstance(timestamp, str): 

686 if "-" in timestamp: 

687 # extended ISO format, with - and : delimiters 

688 timestamp = datetime.datetime.fromisoformat(timestamp) 

689 else: 

690 # basic ISO format, with no delimiters (what this method 

691 # returns) 

692 timestamp = datetime.datetime.strptime(timestamp, "%Y%m%dT%H%M%S%z") 

693 if not isinstance(timestamp, datetime.datetime): 

694 raise TypeError(f"Unexpected date/time object: {timestamp!r}.") 

695 if timestamp.tzinfo is not None: 

696 timestamp = timestamp.astimezone(datetime.timezone.utc) 

697 return f"{timestamp:%Y%m%dT%H%M%S}Z" 

698 

699 @staticmethod 

700 def makeCollectionTimestamp() -> str: 

701 """Create a timestamp string for use in a collection name from the 

702 current time. 

703 

704 Returns 

705 ------- 

706 formatted : `str` 

707 Standardized string form of the current time. 

708 """ 

709 return Instrument.formatCollectionTimestamp(datetime.datetime.now(tz=datetime.timezone.utc)) 

710 

711 def makeDefaultRawIngestRunName(self) -> str: 

712 """Make the default instrument-specific run collection string for raw 

713 data ingest. 

714 

715 Returns 

716 ------- 

717 coll : `str` 

718 Run collection name to be used as the default for ingestion of 

719 raws. 

720 """ 

721 return self.makeCollectionName("raw", "all") 

722 

723 def makeUnboundedCalibrationRunName(self, *labels: str) -> str: 

724 """Make a RUN collection name appropriate for inserting calibration 

725 datasets whose validity ranges are unbounded. 

726 

727 Parameters 

728 ---------- 

729 *labels : `str` 

730 Extra strings to be included in the base name, using the default 

731 delimiter for collection names. Usually this is the name of the 

732 ticket on which the calibration collection is being created. 

733 

734 Returns 

735 ------- 

736 name : `str` 

737 Run collection name. 

738 """ 

739 return self.makeCollectionName("calib", *labels, "unbounded") 

740 

741 def makeCuratedCalibrationRunName(self, calibDate: str, *labels: str) -> str: 

742 """Make a RUN collection name appropriate for inserting curated 

743 calibration datasets with the given ``CALIBDATE`` metadata value. 

744 

745 Parameters 

746 ---------- 

747 calibDate : `str` 

748 The ``CALIBDATE`` metadata value. 

749 *labels : `str` 

750 Strings to be included in the collection name (before 

751 ``calibDate``, but after all other terms), using the default 

752 delimiter for collection names. Usually this is the name of the 

753 ticket on which the calibration collection is being created. 

754 

755 Returns 

756 ------- 

757 name : `str` 

758 Run collection name. 

759 """ 

760 return self.makeCollectionName("calib", *labels, "curated", self.formatCollectionTimestamp(calibDate)) 

761 

762 def makeCalibrationCollectionName(self, *labels: str) -> str: 

763 """Make a CALIBRATION collection name appropriate for associating 

764 calibration datasets with validity ranges. 

765 

766 Parameters 

767 ---------- 

768 *labels : `str` 

769 Strings to be appended to the base name, using the default 

770 delimiter for collection names. Usually this is the name of the 

771 ticket on which the calibration collection is being created. 

772 

773 Returns 

774 ------- 

775 name : `str` 

776 Calibration collection name. 

777 """ 

778 return self.makeCollectionName("calib", *labels) 

779 

780 @staticmethod 

781 def makeRefCatCollectionName(*labels: str) -> str: 

782 """Return a global (not instrument-specific) name for a collection that 

783 holds reference catalogs. 

784 

785 With no arguments, this returns the name of the collection that holds 

786 all reference catalogs (usually a ``CHAINED`` collection, at least in 

787 long-lived repos that may contain more than one reference catalog). 

788 

789 Parameters 

790 ---------- 

791 *labels : `str` 

792 Strings to be added to the global collection name, in order to 

793 define a collection name for one or more reference catalogs being 

794 ingested at the same time. 

795 

796 Returns 

797 ------- 

798 name : `str` 

799 Collection name. 

800 

801 Notes 

802 ----- 

803 This is a ``staticmethod``, not a ``classmethod``, because it should 

804 be the same for all instruments. 

805 """ 

806 return "/".join(("refcats",) + labels) 

807 

808 def makeUmbrellaCollectionName(self) -> str: 

809 """Return the name of the umbrella ``CHAINED`` collection for this 

810 instrument that combines all standard recommended input collections. 

811 

812 This method should almost never be overridden by derived classes. 

813 

814 Returns 

815 ------- 

816 name : `str` 

817 Name for the umbrella collection. 

818 """ 

819 return self.makeCollectionName("defaults") 

820 

821 def makeCollectionName(self, *labels: str) -> str: 

822 """Get the instrument-specific collection string to use as derived 

823 from the supplied labels. 

824 

825 Parameters 

826 ---------- 

827 *labels : `str` 

828 Strings to be combined with the instrument name to form a 

829 collection name. 

830 

831 Returns 

832 ------- 

833 name : `str` 

834 Collection name to use that includes the instrument's recommended 

835 prefix. 

836 """ 

837 return "/".join((self.collection_prefix,) + labels) 

838 

839 

840def makeExposureRecordFromObsInfo(obsInfo, universe): 

841 """Construct an exposure DimensionRecord from 

842 `astro_metadata_translator.ObservationInfo`. 

843 

844 Parameters 

845 ---------- 

846 obsInfo : `astro_metadata_translator.ObservationInfo` 

847 A `~astro_metadata_translator.ObservationInfo` object corresponding to 

848 the exposure. 

849 universe : `DimensionUniverse` 

850 Set of all known dimensions. 

851 

852 Returns 

853 ------- 

854 record : `DimensionRecord` 

855 A record containing exposure metadata, suitable for insertion into 

856 a `Registry`. 

857 """ 

858 dimension = universe["exposure"] 

859 

860 ra, dec, sky_angle, zenith_angle = (None, None, None, None) 

861 if obsInfo.tracking_radec is not None: 

862 icrs = obsInfo.tracking_radec.icrs 

863 ra = icrs.ra.degree 

864 dec = icrs.dec.degree 

865 if obsInfo.boresight_rotation_coord == "sky": 

866 sky_angle = obsInfo.boresight_rotation_angle.degree 

867 if obsInfo.altaz_begin is not None: 

868 zenith_angle = obsInfo.altaz_begin.zen.degree 

869 

870 return dimension.RecordClass( 

871 instrument=obsInfo.instrument, 

872 id=obsInfo.exposure_id, 

873 obs_id=obsInfo.observation_id, 

874 group_name=obsInfo.exposure_group, 

875 group_id=obsInfo.visit_id, 

876 datetime_begin=obsInfo.datetime_begin, 

877 datetime_end=obsInfo.datetime_end, 

878 exposure_time=obsInfo.exposure_time.to_value("s"), 

879 # we are not mandating that dark_time be calculable 

880 dark_time=obsInfo.dark_time.to_value("s") if obsInfo.dark_time is not None else None, 

881 observation_type=obsInfo.observation_type, 

882 observation_reason=obsInfo.observation_reason, 

883 day_obs=obsInfo.observing_day, 

884 seq_num=obsInfo.observation_counter, 

885 physical_filter=obsInfo.physical_filter, 

886 science_program=obsInfo.science_program, 

887 target_name=obsInfo.object, 

888 tracking_ra=ra, 

889 tracking_dec=dec, 

890 sky_angle=sky_angle, 

891 zenith_angle=zenith_angle, 

892 ) 

893 

894 

895def loadCamera(butler: Butler, dataId: DataId, *, collections: Any = None) -> Tuple[Camera, bool]: 

896 """Attempt to load versioned camera geometry from a butler, but fall back 

897 to obtaining a nominal camera from the `Instrument` class if that fails. 

898 

899 Parameters 

900 ---------- 

901 butler : `lsst.daf.butler.Butler` 

902 Butler instance to attempt to query for and load a ``camera`` dataset 

903 from. 

904 dataId : `dict` or `DataCoordinate` 

905 Data ID that identifies at least the ``instrument`` and ``exposure`` 

906 dimensions. 

907 collections : Any, optional 

908 Collections to be searched, overriding ``self.butler.collections``. 

909 Can be any of the types supported by the ``collections`` argument 

910 to butler construction. 

911 

912 Returns 

913 ------- 

914 camera : `lsst.afw.cameraGeom.Camera` 

915 Camera object. 

916 versioned : `bool` 

917 If `True`, the camera was obtained from the butler and should represent 

918 a versioned camera from a calibration repository. If `False`, no 

919 camera datasets were found, and the returned camera was produced by 

920 instantiating the appropriate `Instrument` class and calling 

921 `Instrument.getCamera`. 

922 """ 

923 if collections is None: 

924 collections = butler.collections 

925 # Registry would do data ID expansion internally if we didn't do it first, 

926 # but we might want an expanded data ID ourselves later, so we do it here 

927 # to ensure it only happens once. 

928 # This will also catch problems with the data ID not having keys we need. 

929 dataId = butler.registry.expandDataId(dataId, graph=butler.registry.dimensions["exposure"].graph) 

930 try: 

931 cameraRef = butler.get("camera", dataId=dataId, collections=collections) 

932 return cameraRef, True 

933 except LookupError: 

934 pass 

935 instrument = Instrument.fromName(dataId["instrument"], butler.registry) 

936 return instrument.getCamera(), False