Coverage for python/lsst/obs/base/_instrument.py : 22%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of obs_base.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
22from __future__ import annotations
24__all__ = ("Instrument", "makeExposureRecordFromObsInfo", "loadCamera")
26import os.path
27from abc import ABCMeta, abstractmethod
28from collections import defaultdict
29from typing import Any, Optional, Set, Sequence, Tuple, TYPE_CHECKING
30from functools import lru_cache
32import astropy.time
34from lsst.afw.cameraGeom import Camera
35from lsst.daf.butler import (
36 Butler,
37 CollectionType,
38 DataCoordinate,
39 DataId,
40 DatasetType,
41 Timespan,
42)
43from lsst.utils import getPackageDir, doImport
45if TYPE_CHECKING: 45 ↛ 46line 45 didn't jump to line 46, because the condition on line 45 was never true
46 from .gen2to3 import TranslatorFactory
47 from lsst.daf.butler import Registry
49# To be a standard text curated calibration means that we use a
50# standard definition for the corresponding DatasetType.
51StandardCuratedCalibrationDatasetTypes = {
52 "defects": {"dimensions": ("instrument", "detector"), "storageClass": "Defects"},
53 "qe_curve": {"dimensions": ("instrument", "detector"), "storageClass": "QECurve"},
54 "crosstalk": {"dimensions": ("instrument", "detector"), "storageClass": "CrosstalkCalib"},
55}
58class Instrument(metaclass=ABCMeta):
59 """Base class for instrument-specific logic for the Gen3 Butler.
61 Concrete instrument subclasses should be directly constructable with no
62 arguments.
63 """
65 configPaths: Sequence[str] = ()
66 """Paths to config files to read for specific Tasks.
68 The paths in this list should contain files of the form `task.py`, for
69 each of the Tasks that requires special configuration.
70 """
72 policyName: Optional[str] = None
73 """Instrument specific name to use when locating a policy or configuration
74 file in the file system."""
76 obsDataPackage: Optional[str] = None
77 """Name of the package containing the text curated calibration files.
78 Usually a obs _data package. If `None` no curated calibration files
79 will be read. (`str`)"""
81 standardCuratedDatasetTypes: Set[str] = frozenset(StandardCuratedCalibrationDatasetTypes)
82 """The dataset types expected to be obtained from the obsDataPackage.
84 These dataset types are all required to have standard definitions and
85 must be known to the base class. Clearing this list will prevent
86 any of these calibrations from being stored. If a dataset type is not
87 known to a specific instrument it can still be included in this list
88 since the data package is the source of truth. (`set` of `str`)
89 """
91 additionalCuratedDatasetTypes: Set[str] = frozenset()
92 """Curated dataset types specific to this particular instrument that do
93 not follow the standard organization found in obs data packages.
95 These are the instrument-specific dataset types written by
96 `writeAdditionalCuratedCalibrations` in addition to the calibrations
97 found in obs data packages that follow the standard scheme.
98 (`set` of `str`)"""
100 @property
101 @abstractmethod
102 def filterDefinitions(self):
103 """`~lsst.obs.base.FilterDefinitionCollection`, defining the filters
104 for this instrument.
105 """
106 return None
108 def __init__(self):
109 self.filterDefinitions.reset()
110 self.filterDefinitions.defineFilters()
112 @classmethod
113 @abstractmethod
114 def getName(cls):
115 """Return the short (dimension) name for this instrument.
117 This is not (in general) the same as the class name - it's what is used
118 as the value of the "instrument" field in data IDs, and is usually an
119 abbreviation of the full name.
120 """
121 raise NotImplementedError()
123 @classmethod
124 @lru_cache()
125 def getCuratedCalibrationNames(cls) -> Set[str]:
126 """Return the names of all the curated calibration dataset types.
128 Returns
129 -------
130 names : `set` of `str`
131 The dataset type names of all curated calibrations. This will
132 include the standard curated calibrations even if the particular
133 instrument does not support them.
135 Notes
136 -----
137 The returned list does not indicate whether a particular dataset
138 is present in the Butler repository, simply that these are the
139 dataset types that are handled by ``writeCuratedCalibrations``.
140 """
142 # Camera is a special dataset type that is also handled as a
143 # curated calibration.
144 curated = {"camera"}
146 # Make a cursory attempt to filter out curated dataset types
147 # that are not present for this instrument
148 for datasetTypeName in cls.standardCuratedDatasetTypes:
149 calibPath = cls._getSpecificCuratedCalibrationPath(datasetTypeName)
150 if calibPath is not None:
151 curated.add(datasetTypeName)
153 curated.update(cls.additionalCuratedDatasetTypes)
154 return frozenset(curated)
156 @abstractmethod
157 def getCamera(self):
158 """Retrieve the cameraGeom representation of this instrument.
160 This is a temporary API that should go away once ``obs_`` packages have
161 a standardized approach to writing versioned cameras to a Gen3 repo.
162 """
163 raise NotImplementedError()
165 @abstractmethod
166 def register(self, registry):
167 """Insert instrument, physical_filter, and detector entries into a
168 `Registry`.
169 """
170 raise NotImplementedError()
172 @classmethod
173 @lru_cache()
174 def getObsDataPackageDir(cls):
175 """The root of the obs data package that provides specializations for
176 this instrument.
178 returns
179 -------
180 dir : `str`
181 The root of the relevat obs data package.
182 """
183 if cls.obsDataPackage is None:
184 return None
185 return getPackageDir(cls.obsDataPackage)
187 @staticmethod
188 def fromName(name: str, registry: Registry) -> Instrument:
189 """Given an instrument name and a butler, retrieve a corresponding
190 instantiated instrument object.
192 Parameters
193 ----------
194 name : `str`
195 Name of the instrument (must match the return value of `getName`).
196 registry : `lsst.daf.butler.Registry`
197 Butler registry to query to find the information.
199 Returns
200 -------
201 instrument : `Instrument`
202 An instance of the relevant `Instrument`.
204 Notes
205 -----
206 The instrument must be registered in the corresponding butler.
208 Raises
209 ------
210 LookupError
211 Raised if the instrument is not known to the supplied registry.
212 ModuleNotFoundError
213 Raised if the class could not be imported. This could mean
214 that the relevant obs package has not been setup.
215 TypeError
216 Raised if the class name retrieved is not a string.
217 """
218 records = list(registry.queryDimensionRecords("instrument", instrument=name))
219 if not records:
220 raise LookupError(f"No registered instrument with name '{name}'.")
221 cls = records[0].class_name
222 if not isinstance(cls, str):
223 raise TypeError(f"Unexpected class name retrieved from {name} instrument dimension (got {cls})")
224 instrument = doImport(cls)
225 return instrument()
227 @staticmethod
228 def importAll(registry: Registry) -> None:
229 """Import all the instruments known to this registry.
231 This will ensure that all metadata translators have been registered.
233 Parameters
234 ----------
235 registry : `lsst.daf.butler.Registry`
236 Butler registry to query to find the information.
238 Notes
239 -----
240 It is allowed for a particular instrument class to fail on import.
241 This might simply indicate that a particular obs package has
242 not been setup.
243 """
244 records = list(registry.queryDimensionRecords("instrument"))
245 for record in records:
246 cls = record.class_name
247 try:
248 doImport(cls)
249 except Exception:
250 pass
252 def _registerFilters(self, registry):
253 """Register the physical and abstract filter Dimension relationships.
254 This should be called in the ``register`` implementation.
256 Parameters
257 ----------
258 registry : `lsst.daf.butler.core.Registry`
259 The registry to add dimensions to.
260 """
261 for filter in self.filterDefinitions:
262 # fix for undefined abstract filters causing trouble in the
263 # registry:
264 if filter.band is None:
265 band = filter.physical_filter
266 else:
267 band = filter.band
269 registry.insertDimensionData("physical_filter",
270 {"instrument": self.getName(),
271 "name": filter.physical_filter,
272 "band": band
273 })
275 @abstractmethod
276 def getRawFormatter(self, dataId):
277 """Return the Formatter class that should be used to read a particular
278 raw file.
280 Parameters
281 ----------
282 dataId : `DataCoordinate`
283 Dimension-based ID for the raw file or files being ingested.
285 Returns
286 -------
287 formatter : `Formatter` class
288 Class to be used that reads the file into an
289 `lsst.afw.image.Exposure` instance.
290 """
291 raise NotImplementedError()
293 def applyConfigOverrides(self, name, config):
294 """Apply instrument-specific overrides for a task config.
296 Parameters
297 ----------
298 name : `str`
299 Name of the object being configured; typically the _DefaultName
300 of a Task.
301 config : `lsst.pex.config.Config`
302 Config instance to which overrides should be applied.
303 """
304 for root in self.configPaths:
305 path = os.path.join(root, f"{name}.py")
306 if os.path.exists(path):
307 config.load(path)
309 def writeCuratedCalibrations(self, butler: Butler, collection: Optional[str] = None,
310 suffixes: Sequence[str] = ()) -> None:
311 """Write human-curated calibration Datasets to the given Butler with
312 the appropriate validity ranges.
314 Parameters
315 ----------
316 butler : `lsst.daf.butler.Butler`
317 Butler to use to store these calibrations.
318 collection : `str`, optional
319 Name to use for the calibration collection that associates all
320 datasets with a validity range. If this collection already exists,
321 it must be a `~CollectionType.CALIBRATION` collection, and it must
322 not have any datasets that would conflict with those inserted by
323 this method. If `None`, a collection name is worked out
324 automatically from the instrument name and other metadata by
325 calling ``makeCalibrationCollectionName``, but this
326 default name may not work well for long-lived repositories unless
327 one or more ``suffixes`` are also provided (and changed every time
328 curated calibrations are ingested).
329 suffixes : `Sequence` [ `str` ], optional
330 Name suffixes to append to collection names, after concatenating
331 them with the standard collection name delimeter. If provided,
332 these are appended to the names of the `~CollectionType.RUN`
333 collections that datasets are inserted directly into, as well the
334 `~CollectionType.CALIBRATION` collection if it is generated
335 automatically (i.e. if ``collection is None``).
337 Notes
338 -----
339 Expected to be called from subclasses. The base method calls
340 ``writeCameraGeom``, ``writeStandardTextCuratedCalibrations``,
341 and ``writeAdditionalCuratdCalibrations``.
342 """
343 # Delegate registration of collections (and creating names for them)
344 # to other methods so they can be called independently with the same
345 # preconditions. Collection registration is idempotent, so this is
346 # safe, and while it adds a bit of overhead, as long as it's one
347 # registration attempt per method (not per dataset or dataset type),
348 # that's negligible.
349 self.writeCameraGeom(butler, collection, *suffixes)
350 self.writeStandardTextCuratedCalibrations(butler, collection, suffixes=suffixes)
351 self.writeAdditionalCuratedCalibrations(butler, collection, suffixes=suffixes)
353 def writeAdditionalCuratedCalibrations(self, butler: Butler, collection: Optional[str] = None,
354 suffixes: Sequence[str] = ()) -> None:
355 """Write additional curated calibrations that might be instrument
356 specific and are not part of the standard set.
358 Default implementation does nothing.
360 Parameters
361 ----------
362 butler : `lsst.daf.butler.Butler`
363 Butler to use to store these calibrations.
364 collection : `str`, optional
365 Name to use for the calibration collection that associates all
366 datasets with a validity range. If this collection already exists,
367 it must be a `~CollectionType.CALIBRATION` collection, and it must
368 not have any datasets that would conflict with those inserted by
369 this method. If `None`, a collection name is worked out
370 automatically from the instrument name and other metadata by
371 calling ``makeCalibrationCollectionName``, but this
372 default name may not work well for long-lived repositories unless
373 one or more ``suffixes`` are also provided (and changed every time
374 curated calibrations are ingested).
375 suffixes : `Sequence` [ `str` ], optional
376 Name suffixes to append to collection names, after concatenating
377 them with the standard collection name delimeter. If provided,
378 these are appended to the names of the `~CollectionType.RUN`
379 collections that datasets are inserted directly into, as well the
380 `~CollectionType.CALIBRATION` collection if it is generated
381 automatically (i.e. if ``collection is None``).
382 """
383 return
385 def writeCameraGeom(self, butler: Butler, collection: Optional[str] = None,
386 suffixes: Sequence[str] = ()) -> None:
387 """Write the default camera geometry to the butler repository and
388 associate it with the appropriate validity range in a calibration
389 collection.
391 Parameters
392 ----------
393 butler : `lsst.daf.butler.Butler`
394 Butler to use to store these calibrations.
395 collection : `str`, optional
396 Name to use for the calibration collection that associates all
397 datasets with a validity range. If this collection already exists,
398 it must be a `~CollectionType.CALIBRATION` collection, and it must
399 not have any datasets that would conflict with those inserted by
400 this method. If `None`, a collection name is worked out
401 automatically from the instrument name and other metadata by
402 calling ``makeCalibrationCollectionName``, but this
403 default name may not work well for long-lived repositories unless
404 one or more ``suffixes`` are also provided (and changed every time
405 curated calibrations are ingested).
406 suffixes : `Sequence` [ `str` ], optional
407 Name suffixes to append to collection names, after concatenating
408 them with the standard collection name delimeter. If provided,
409 these are appended to the names of the `~CollectionType.RUN`
410 collections that datasets are inserted directly into, as well the
411 `~CollectionType.CALIBRATION` collection if it is generated
412 automatically (i.e. if ``collection is None``).
413 """
414 if collection is None:
415 collection = self.makeCalibrationCollectionName(*suffixes)
416 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION)
417 run = self.makeUnboundedCalibrationRunName(*suffixes)
418 butler.registry.registerRun(run)
419 datasetType = DatasetType("camera", ("instrument",), "Camera", isCalibration=True,
420 universe=butler.registry.dimensions)
421 butler.registry.registerDatasetType(datasetType)
422 camera = self.getCamera()
423 ref = butler.put(camera, datasetType, {"instrument": self.getName()}, run=run)
424 butler.registry.certify(collection, [ref], Timespan(begin=None, end=None))
426 def writeStandardTextCuratedCalibrations(self, butler: Butler, collection: Optional[str] = None,
427 suffixes: Sequence[str] = ()) -> None:
428 """Write the set of standardized curated text calibrations to
429 the repository.
431 Parameters
432 ----------
433 butler : `lsst.daf.butler.Butler`
434 Butler to receive these calibration datasets.
435 collection : `str`, optional
436 Name to use for the calibration collection that associates all
437 datasets with a validity range. If this collection already exists,
438 it must be a `~CollectionType.CALIBRATION` collection, and it must
439 not have any datasets that would conflict with those inserted by
440 this method. If `None`, a collection name is worked out
441 automatically from the instrument name and other metadata by
442 calling ``makeCalibrationCollectionName``, but this
443 default name may not work well for long-lived repositories unless
444 one or more ``suffixes`` are also provided (and changed every time
445 curated calibrations are ingested).
446 suffixes : `Sequence` [ `str` ], optional
447 Name suffixes to append to collection names, after concatenating
448 them with the standard collection name delimeter. If provided,
449 these are appended to the names of the `~CollectionType.RUN`
450 collections that datasets are inserted directly into, as well the
451 `~CollectionType.CALIBRATION` collection if it is generated
452 automatically (i.e. if ``collection is None``).
453 """
454 if collection is None:
455 collection = self.makeCalibrationCollectionName(*suffixes)
456 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION)
457 runs = set()
458 for datasetTypeName in self.standardCuratedDatasetTypes:
459 # We need to define the dataset types.
460 if datasetTypeName not in StandardCuratedCalibrationDatasetTypes:
461 raise ValueError(f"DatasetType {datasetTypeName} not in understood list"
462 f" [{'.'.join(StandardCuratedCalibrationDatasetTypes)}]")
463 definition = StandardCuratedCalibrationDatasetTypes[datasetTypeName]
464 datasetType = DatasetType(datasetTypeName,
465 universe=butler.registry.dimensions,
466 isCalibration=True,
467 **definition)
468 self._writeSpecificCuratedCalibrationDatasets(butler, datasetType, collection, runs=runs,
469 suffixes=suffixes)
471 @classmethod
472 def _getSpecificCuratedCalibrationPath(cls, datasetTypeName):
473 """Return the path of the curated calibration directory.
475 Parameters
476 ----------
477 datasetTypeName : `str`
478 The name of the standard dataset type to find.
480 Returns
481 -------
482 path : `str`
483 The path to the standard curated data directory. `None` if the
484 dataset type is not found or the obs data package is not
485 available.
486 """
487 if cls.getObsDataPackageDir() is None:
488 # if there is no data package then there can't be datasets
489 return None
491 calibPath = os.path.join(cls.getObsDataPackageDir(), cls.policyName,
492 datasetTypeName)
494 if os.path.exists(calibPath):
495 return calibPath
497 return None
499 def _writeSpecificCuratedCalibrationDatasets(self, butler: Butler, datasetType: DatasetType,
500 collection: str, runs: Set[str], suffixes: Sequence[str]):
501 """Write standardized curated calibration datasets for this specific
502 dataset type from an obs data package.
504 Parameters
505 ----------
506 butler : `lsst.daf.butler.Butler`
507 Gen3 butler in which to put the calibrations.
508 datasetType : `lsst.daf.butler.DatasetType`
509 Dataset type to be put.
510 collection : `str`
511 Name of the `~CollectionType.CALIBRATION` collection that
512 associates all datasets with validity ranges. Must have been
513 registered prior to this call.
514 runs : `set` [ `str` ]
515 Names of runs that have already been registered by previous calls
516 and need not be registered again. Should be updated by this
517 method as new runs are registered.
518 suffixes : `Sequence` [ `str` ]
519 Suffixes to append to run names when creating them from
520 ``CALIBDATE`` metadata, via calls to `makeCuratedCalibrationName`.
522 Notes
523 -----
524 This method scans the location defined in the ``obsDataPackageDir``
525 class attribute for curated calibrations corresponding to the
526 supplied dataset type. The directory name in the data package must
527 match the name of the dataset type. They are assumed to use the
528 standard layout and can be read by
529 `~lsst.pipe.tasks.read_curated_calibs.read_all` and provide standard
530 metadata.
531 """
532 calibPath = self._getSpecificCuratedCalibrationPath(datasetType.name)
533 if calibPath is None:
534 return
536 # Register the dataset type
537 butler.registry.registerDatasetType(datasetType)
539 # obs_base can't depend on pipe_tasks but concrete obs packages
540 # can -- we therefore have to defer import
541 from lsst.pipe.tasks.read_curated_calibs import read_all
543 # Read calibs, registering a new run for each CALIBDATE as needed.
544 # We try to avoid registering runs multiple times as an optimization
545 # by putting them in the ``runs`` set that was passed in.
546 camera = self.getCamera()
547 calibsDict = read_all(calibPath, camera)[0] # second return is calib type
548 datasetRecords = []
549 for det in calibsDict:
550 times = sorted([k for k in calibsDict[det]])
551 calibs = [calibsDict[det][time] for time in times]
552 times = [astropy.time.Time(t, format="datetime", scale="utc") for t in times]
553 times += [None]
554 for calib, beginTime, endTime in zip(calibs, times[:-1], times[1:]):
555 md = calib.getMetadata()
556 run = self.makeCuratedCalibrationRunName(md['CALIBDATE'], *suffixes)
557 if run not in runs:
558 butler.registry.registerRun(run)
559 runs.add(run)
560 dataId = DataCoordinate.standardize(
561 universe=butler.registry.dimensions,
562 instrument=self.getName(),
563 detector=md["DETECTOR"],
564 )
565 datasetRecords.append((calib, dataId, run, Timespan(beginTime, endTime)))
567 # Second loop actually does the inserts and filesystem writes. We
568 # first do a butler.put on each dataset, inserting it into the run for
569 # its calibDate. We remember those refs and group them by timespan, so
570 # we can vectorize the certify calls as much as possible.
571 refsByTimespan = defaultdict(list)
572 with butler.transaction():
573 for calib, dataId, run, timespan in datasetRecords:
574 refsByTimespan[timespan].append(butler.put(calib, datasetType, dataId, run=run))
575 for timespan, refs in refsByTimespan.items():
576 butler.registry.certify(collection, refs, timespan)
578 @abstractmethod
579 def makeDataIdTranslatorFactory(self) -> TranslatorFactory:
580 """Return a factory for creating Gen2->Gen3 data ID translators,
581 specialized for this instrument.
583 Derived class implementations should generally call
584 `TranslatorFactory.addGenericInstrumentRules` with appropriate
585 arguments, but are not required to (and may not be able to if their
586 Gen2 raw data IDs are sufficiently different from the HSC/DECam/CFHT
587 norm).
589 Returns
590 -------
591 factory : `TranslatorFactory`.
592 Factory for `Translator` objects.
593 """
594 raise NotImplementedError("Must be implemented by derived classes.")
596 @classmethod
597 def makeDefaultRawIngestRunName(cls) -> str:
598 """Make the default instrument-specific run collection string for raw
599 data ingest.
601 Returns
602 -------
603 coll : `str`
604 Run collection name to be used as the default for ingestion of
605 raws.
606 """
607 return cls.makeCollectionName("raw", "all")
609 @classmethod
610 def makeUnboundedCalibrationRunName(cls, *suffixes: str) -> str:
611 """Make a RUN collection name appropriate for inserting calibration
612 datasets whose validity ranges are unbounded.
614 Parameters
615 ----------
616 *suffixes : `str`
617 Strings to be appended to the base name, using the default
618 delimiter for collection names.
620 Returns
621 -------
622 name : `str`
623 Run collection name.
624 """
625 return cls.makeCollectionName("calib", "unbounded", *suffixes)
627 @classmethod
628 def makeCuratedCalibrationRunName(cls, calibDate: str, *suffixes: str) -> str:
629 """Make a RUN collection name appropriate for inserting curated
630 calibration datasets with the given ``CALIBDATE`` metadata value.
632 Parameters
633 ----------
634 calibDate : `str`
635 The ``CALIBDATE`` metadata value.
636 *suffixes : `str`
637 Strings to be appended to the base name, using the default
638 delimiter for collection names.
640 Returns
641 -------
642 name : `str`
643 Run collection name.
644 """
645 return cls.makeCollectionName("calib", "curated", calibDate, *suffixes)
647 @classmethod
648 def makeCalibrationCollectionName(cls, *suffixes: str) -> str:
649 """Make a CALIBRATION collection name appropriate for associating
650 calibration datasets with validity ranges.
652 Parameters
653 ----------
654 *suffixes : `str`
655 Strings to be appended to the base name, using the default
656 delimiter for collection names.
658 Returns
659 -------
660 name : `str`
661 Calibration collection name.
662 """
663 return cls.makeCollectionName("calib", *suffixes)
665 @classmethod
666 def makeCollectionName(cls, *labels: str) -> str:
667 """Get the instrument-specific collection string to use as derived
668 from the supplied labels.
670 Parameters
671 ----------
672 *labels : `str`
673 Strings to be combined with the instrument name to form a
674 collection name.
676 Returns
677 -------
678 name : `str`
679 Collection name to use that includes the instrument name.
680 """
681 return "/".join((cls.getName(),) + labels)
684def makeExposureRecordFromObsInfo(obsInfo, universe):
685 """Construct an exposure DimensionRecord from
686 `astro_metadata_translator.ObservationInfo`.
688 Parameters
689 ----------
690 obsInfo : `astro_metadata_translator.ObservationInfo`
691 A `~astro_metadata_translator.ObservationInfo` object corresponding to
692 the exposure.
693 universe : `DimensionUniverse`
694 Set of all known dimensions.
696 Returns
697 -------
698 record : `DimensionRecord`
699 A record containing exposure metadata, suitable for insertion into
700 a `Registry`.
701 """
702 dimension = universe["exposure"]
704 ra, dec, sky_angle, zenith_angle = (None, None, None, None)
705 if obsInfo.tracking_radec is not None:
706 icrs = obsInfo.tracking_radec.icrs
707 ra = icrs.ra.degree
708 dec = icrs.dec.degree
709 if obsInfo.boresight_rotation_coord == "sky":
710 sky_angle = obsInfo.boresight_rotation_angle.degree
711 if obsInfo.altaz_begin is not None:
712 zenith_angle = obsInfo.altaz_begin.zen.degree
714 return dimension.RecordClass(
715 instrument=obsInfo.instrument,
716 id=obsInfo.exposure_id,
717 name=obsInfo.observation_id,
718 group_name=obsInfo.exposure_group,
719 group_id=obsInfo.visit_id,
720 datetime_begin=obsInfo.datetime_begin,
721 datetime_end=obsInfo.datetime_end,
722 exposure_time=obsInfo.exposure_time.to_value("s"),
723 dark_time=obsInfo.dark_time.to_value("s"),
724 observation_type=obsInfo.observation_type,
725 observation_reason=obsInfo.observation_reason,
726 physical_filter=obsInfo.physical_filter,
727 science_program=obsInfo.science_program,
728 target_name=obsInfo.object,
729 tracking_ra=ra,
730 tracking_dec=dec,
731 sky_angle=sky_angle,
732 zenith_angle=zenith_angle,
733 )
736def loadCamera(butler: Butler, dataId: DataId, *, collections: Any = None) -> Tuple[Camera, bool]:
737 """Attempt to load versioned camera geometry from a butler, but fall back
738 to obtaining a nominal camera from the `Instrument` class if that fails.
740 Parameters
741 ----------
742 butler : `lsst.daf.butler.Butler`
743 Butler instance to attempt to query for and load a ``camera`` dataset
744 from.
745 dataId : `dict` or `DataCoordinate`
746 Data ID that identifies at least the ``instrument`` and ``exposure``
747 dimensions.
748 collections : Any, optional
749 Collections to be searched, overriding ``self.butler.collections``.
750 Can be any of the types supported by the ``collections`` argument
751 to butler construction.
753 Returns
754 -------
755 camera : `lsst.afw.cameraGeom.Camera`
756 Camera object.
757 versioned : `bool`
758 If `True`, the camera was obtained from the butler and should represent
759 a versioned camera from a calibration repository. If `False`, no
760 camera datasets were found, and the returned camera was produced by
761 instantiating the appropriate `Instrument` class and calling
762 `Instrument.getCamera`.
763 """
764 if collections is None:
765 collections = butler.collections
766 # Registry would do data ID expansion internally if we didn't do it first,
767 # but we might want an expanded data ID ourselves later, so we do it here
768 # to ensure it only happens once.
769 # This will also catch problems with the data ID not having keys we need.
770 dataId = butler.registry.expandDataId(dataId, graph=butler.registry.dimensions["exposure"].graph)
771 try:
772 cameraRef = butler.get("camera", dataId=dataId, collections=collections)
773 return cameraRef, True
774 except LookupError:
775 pass
776 instrument = Instrument.fromName(dataId["instrument"], butler.registry)
777 return instrument.getCamera(), False