Coverage for python/lsst/obs/base/_instrument.py: 28%
195 statements
« prev ^ index » next coverage.py v7.5.1, created at 2024-05-11 11:00 +0000
« prev ^ index » next coverage.py v7.5.1, created at 2024-05-11 11:00 +0000
1# This file is part of obs_base.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
22from __future__ import annotations
24__all__ = ("Instrument", "makeExposureRecordFromObsInfo", "loadCamera")
26import logging
27import os.path
28import re
29from abc import abstractmethod
30from collections import defaultdict
31from collections.abc import Sequence, Set
32from functools import lru_cache
33from typing import TYPE_CHECKING, Any, cast
35import astropy.time
36from lsst.afw.cameraGeom import Camera
37from lsst.daf.butler import (
38 Butler,
39 CollectionType,
40 DataCoordinate,
41 DataId,
42 DatasetType,
43 DimensionRecord,
44 DimensionUniverse,
45 Timespan,
46)
47from lsst.daf.butler.registry import DataIdError
48from lsst.pipe.base import Instrument as InstrumentBase
49from lsst.utils import doImport, getPackageDir
51from ._read_curated_calibs import CuratedCalibration, read_all
53if TYPE_CHECKING: 53 ↛ 54line 53 didn't jump to line 54, because the condition on line 53 was never true
54 from astro_metadata_translator import MetadataTranslator, ObservationInfo
55 from lsst.daf.butler import Registry
57 from .filters import FilterDefinitionCollection
59_LOG = logging.getLogger(__name__)
61# To be a standard text curated calibration means that we use a
62# standard definition for the corresponding DatasetType.
63StandardCuratedCalibrationDatasetTypes = {
64 "defects": {"dimensions": ("instrument", "detector"), "storageClass": "Defects"},
65 "qe_curve": {"dimensions": ("instrument", "detector"), "storageClass": "QECurve"},
66 "crosstalk": {"dimensions": ("instrument", "detector"), "storageClass": "CrosstalkCalib"},
67 "linearizer": {"dimensions": ("instrument", "detector"), "storageClass": "Linearizer"},
68 "bfk": {"dimensions": ("instrument", "detector"), "storageClass": "BrighterFatterKernel"},
69 "transmission_optics": {"dimensions": ("instrument",), "storageClass": "TransmissionCurve"},
70 "transmission_filter": {
71 "dimensions": ("instrument", "physical_filter"),
72 "storageClass": "TransmissionCurve",
73 },
74 "transmission_sensor": {"dimensions": ("instrument", "detector"), "storageClass": "TransmissionCurve"},
75 "transmission_atmosphere": {"dimensions": ("instrument",), "storageClass": "TransmissionCurve"},
76 "transmission_system": {
77 "dimensions": ("instrument", "detector", "physical_filter"),
78 "storageClass": "TransmissionCurve",
79 },
80}
83class Instrument(InstrumentBase):
84 """Rubin-specified base for instrument-specific logic for the Gen3 Butler.
86 Parameters
87 ----------
88 collection_prefix : `str`, optional
89 Prefix for collection names to use instead of the instrument's own
90 name. This is primarily for use in simulated-data repositories, where
91 the instrument name may not be necessary and/or sufficient to
92 distinguish between collections.
94 Notes
95 -----
96 Concrete instrument subclasses must have the same construction signature as
97 the base class.
98 """
100 policyName: str | None = None
101 """Instrument specific name to use when locating a policy or configuration
102 file in the file system."""
104 obsDataPackage: str | None = None
105 """Name of the package containing the text curated calibration files.
106 Usually a obs _data package. If `None` no curated calibration files
107 will be read. (`str`)"""
109 standardCuratedDatasetTypes: Set[str] = frozenset(StandardCuratedCalibrationDatasetTypes)
110 """The dataset types expected to be obtained from the obsDataPackage.
112 These dataset types are all required to have standard definitions and
113 must be known to the base class. Clearing this list will prevent
114 any of these calibrations from being stored. If a dataset type is not
115 known to a specific instrument it can still be included in this list
116 since the data package is the source of truth. (`set` of `str`)
117 """
119 additionalCuratedDatasetTypes: Set[str] = frozenset()
120 """Curated dataset types specific to this particular instrument that do
121 not follow the standard organization found in obs data packages.
123 These are the instrument-specific dataset types written by
124 `writeAdditionalCuratedCalibrations` in addition to the calibrations
125 found in obs data packages that follow the standard scheme.
126 (`set` of `str`)"""
128 translatorClass: MetadataTranslator | None = None
129 """Class to use when extracting information from metadata. If `None`
130 the metadata extraction system will determine the translator class itself.
131 This class can also be used to calculate the observing day offset in some
132 scenarios.
133 """
135 @property
136 @abstractmethod
137 def filterDefinitions(self) -> FilterDefinitionCollection:
138 """`~lsst.obs.base.FilterDefinitionCollection`, defining the filters
139 for this instrument.
140 """
141 raise NotImplementedError()
143 def __init__(self, collection_prefix: str | None = None):
144 super().__init__(collection_prefix=collection_prefix)
146 @classmethod
147 @lru_cache
148 def getCuratedCalibrationNames(cls) -> frozenset[str]:
149 """Return the names of all the curated calibration dataset types.
151 Returns
152 -------
153 names : `frozenset` of `str`
154 The dataset type names of all curated calibrations. This will
155 include the standard curated calibrations even if the particular
156 instrument does not support them.
158 Notes
159 -----
160 The returned list does not indicate whether a particular dataset
161 is present in the Butler repository, simply that these are the
162 dataset types that are handled by ``writeCuratedCalibrations``.
163 """
164 # Camera is a special dataset type that is also handled as a
165 # curated calibration.
166 curated = {"camera"}
168 # Make a cursory attempt to filter out curated dataset types
169 # that are not present for this instrument
170 for datasetTypeName in cls.standardCuratedDatasetTypes:
171 calibPath = cls._getSpecificCuratedCalibrationPath(datasetTypeName)
172 if calibPath is not None:
173 curated.add(datasetTypeName)
175 curated.update(cls.additionalCuratedDatasetTypes)
176 return frozenset(curated)
178 @abstractmethod
179 def getCamera(self) -> Camera:
180 """Retrieve the cameraGeom representation of this instrument.
182 This is a temporary API that should go away once ``obs`` packages have
183 a standardized approach to writing versioned cameras to a Gen3 repo.
184 """
185 raise NotImplementedError()
187 @classmethod
188 @lru_cache
189 def getObsDataPackageDir(cls) -> str | None:
190 """Return the root of the obs data package that provides
191 specializations for this instrument.
193 Returns
194 -------
195 dir : `str` or `None`
196 The root of the relevant obs data package, or `None` if this
197 instrument does not have one.
198 """
199 if cls.obsDataPackage is None:
200 return None
201 return getPackageDir(cls.obsDataPackage)
203 def _registerFilters(self, registry: Registry, update: bool = False) -> None:
204 """Register the physical and abstract filter Dimension relationships.
205 This should be called in the `register` implementation, within
206 a transaction context manager block.
208 Parameters
209 ----------
210 registry : `lsst.daf.butler.Registry`
211 The registry to add dimensions to.
212 update : `bool`, optional
213 If `True` (`False` is default), update existing records if they
214 differ from the new ones.
215 """
216 for filter in self.filterDefinitions:
217 # fix for undefined abstract filters causing trouble in the
218 # registry:
219 if filter.band is None:
220 band = filter.physical_filter
221 else:
222 band = filter.band
224 registry.syncDimensionData(
225 "physical_filter",
226 {"instrument": self.getName(), "name": filter.physical_filter, "band": band},
227 update=update,
228 )
230 def writeCuratedCalibrations(
231 self, butler: Butler, collection: str | None = None, labels: Sequence[str] = ()
232 ) -> None:
233 """Write human-curated calibration Datasets to the given Butler with
234 the appropriate validity ranges.
236 Parameters
237 ----------
238 butler : `lsst.daf.butler.Butler`
239 Butler to use to store these calibrations.
240 collection : `str`, optional
241 Name to use for the calibration collection that associates all
242 datasets with a validity range. If this collection already exists,
243 it must be a `~CollectionType.CALIBRATION` collection, and it must
244 not have any datasets that would conflict with those inserted by
245 this method. If `None`, a collection name is worked out
246 automatically from the instrument name and other metadata by
247 calling ``makeCalibrationCollectionName``, but this
248 default name may not work well for long-lived repositories unless
249 ``labels`` is also provided (and changed every time curated
250 calibrations are ingested).
251 labels : `Sequence` [ `str` ], optional
252 Extra strings to include in collection names, after concatenating
253 them with the standard collection name delimiter. If provided,
254 these are inserted into the names of the `~CollectionType.RUN`
255 collections that datasets are inserted directly into, as well the
256 `~CollectionType.CALIBRATION` collection if it is generated
257 automatically (i.e. if ``collection is None``). Usually this is
258 just the name of the ticket on which the calibration collection is
259 being created.
261 Notes
262 -----
263 Expected to be called from subclasses. The base method calls
264 ``writeCameraGeom``, ``writeStandardTextCuratedCalibrations``,
265 and ``writeAdditionalCuratedCalibrations``.
266 """
267 # Delegate registration of collections (and creating names for them)
268 # to other methods so they can be called independently with the same
269 # preconditions. Collection registration is idempotent, so this is
270 # safe, and while it adds a bit of overhead, as long as it's one
271 # registration attempt per method (not per dataset or dataset type),
272 # that's negligible.
273 self.writeCameraGeom(butler, collection, labels=labels)
274 self.writeStandardTextCuratedCalibrations(butler, collection, labels=labels)
275 self.writeAdditionalCuratedCalibrations(butler, collection, labels=labels)
277 def writeAdditionalCuratedCalibrations(
278 self, butler: Butler, collection: str | None = None, labels: Sequence[str] = ()
279 ) -> None:
280 """Write additional curated calibrations that might be instrument
281 specific and are not part of the standard set.
283 Default implementation does nothing.
285 Parameters
286 ----------
287 butler : `lsst.daf.butler.Butler`
288 Butler to use to store these calibrations.
289 collection : `str`, optional
290 Name to use for the calibration collection that associates all
291 datasets with a validity range. If this collection already exists,
292 it must be a `~CollectionType.CALIBRATION` collection, and it must
293 not have any datasets that would conflict with those inserted by
294 this method. If `None`, a collection name is worked out
295 automatically from the instrument name and other metadata by
296 calling ``makeCalibrationCollectionName``, but this
297 default name may not work well for long-lived repositories unless
298 ``labels`` is also provided (and changed every time curated
299 calibrations are ingested).
300 labels : `Sequence` [ `str` ], optional
301 Extra strings to include in collection names, after concatenating
302 them with the standard collection name delimiter. If provided,
303 these are inserted into the names of the `~CollectionType.RUN`
304 collections that datasets are inserted directly into, as well the
305 `~CollectionType.CALIBRATION` collection if it is generated
306 automatically (i.e. if ``collection is None``). Usually this is
307 just the name of the ticket on which the calibration collection is
308 being created.
309 """
310 return
312 def writeCameraGeom(
313 self, butler: Butler, collection: str | None = None, labels: Sequence[str] = ()
314 ) -> None:
315 """Write the default camera geometry to the butler repository and
316 associate it with the appropriate validity range in a calibration
317 collection.
319 Parameters
320 ----------
321 butler : `lsst.daf.butler.Butler`
322 Butler to use to store these calibrations.
323 collection : `str`, optional
324 Name to use for the calibration collection that associates all
325 datasets with a validity range. If this collection already exists,
326 it must be a `~CollectionType.CALIBRATION` collection, and it must
327 not have any datasets that would conflict with those inserted by
328 this method. If `None`, a collection name is worked out
329 automatically from the instrument name and other metadata by
330 calling ``makeCalibrationCollectionName``, but this
331 default name may not work well for long-lived repositories unless
332 ``labels`` is also provided (and changed every time curated
333 calibrations are ingested).
334 labels : `Sequence` [ `str` ], optional
335 Extra strings to include in collection names, after concatenating
336 them with the standard collection name delimiter. If provided,
337 these are inserted into the names of the `~CollectionType.RUN`
338 collections that datasets are inserted directly into, as well the
339 `~CollectionType.CALIBRATION` collection if it is generated
340 automatically (i.e. if ``collection is None``). Usually this is
341 just the name of the ticket on which the calibration collection is
342 being created.
343 """
344 if collection is None:
345 collection = self.makeCalibrationCollectionName(*labels)
346 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION)
347 run = self.makeUnboundedCalibrationRunName(*labels)
348 butler.registry.registerRun(run)
349 datasetType = DatasetType(
350 "camera", ("instrument",), "Camera", isCalibration=True, universe=butler.dimensions
351 )
352 butler.registry.registerDatasetType(datasetType)
353 camera = self.getCamera()
354 ref = butler.put(camera, datasetType, {"instrument": self.getName()}, run=run)
355 butler.registry.certify(collection, [ref], Timespan(begin=None, end=None))
357 def writeStandardTextCuratedCalibrations(
358 self, butler: Butler, collection: str | None = None, labels: Sequence[str] = ()
359 ) -> None:
360 """Write the set of standardized curated text calibrations to
361 the repository.
363 Parameters
364 ----------
365 butler : `lsst.daf.butler.Butler`
366 Butler to receive these calibration datasets.
367 collection : `str`, optional
368 Name to use for the calibration collection that associates all
369 datasets with a validity range. If this collection already exists,
370 it must be a `~CollectionType.CALIBRATION` collection, and it must
371 not have any datasets that would conflict with those inserted by
372 this method. If `None`, a collection name is worked out
373 automatically from the instrument name and other metadata by
374 calling ``makeCalibrationCollectionName``, but this
375 default name may not work well for long-lived repositories unless
376 ``labels`` is also provided (and changed every time curated
377 calibrations are ingested).
378 labels : `Sequence` [ `str` ], optional
379 Extra strings to include in collection names, after concatenating
380 them with the standard collection name delimiter. If provided,
381 these are inserted into the names of the `~CollectionType.RUN`
382 collections that datasets are inserted directly into, as well the
383 `~CollectionType.CALIBRATION` collection if it is generated
384 automatically (i.e. if ``collection is None``). Usually this is
385 just the name of the ticket on which the calibration collection is
386 being created.
387 """
388 if collection is None:
389 collection = self.makeCalibrationCollectionName(*labels)
390 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION)
391 runs: set[str] = set()
392 for datasetTypeName in self.standardCuratedDatasetTypes:
393 # We need to define the dataset types.
394 if datasetTypeName not in StandardCuratedCalibrationDatasetTypes:
395 raise ValueError(
396 f"DatasetType {datasetTypeName} not in understood list"
397 f" [{'.'.join(StandardCuratedCalibrationDatasetTypes)}]"
398 )
399 definition = StandardCuratedCalibrationDatasetTypes[datasetTypeName]
400 datasetType = DatasetType(
401 datasetTypeName,
402 universe=butler.dimensions,
403 isCalibration=True,
404 # MyPy should be able to figure out that the kwargs here have
405 # the right types, but it can't.
406 **definition, # type: ignore
407 )
408 self._writeSpecificCuratedCalibrationDatasets(
409 butler, datasetType, collection, runs=runs, labels=labels
410 )
412 @classmethod
413 def _getSpecificCuratedCalibrationPath(cls, datasetTypeName: str) -> str | None:
414 """Return the path of the curated calibration directory.
416 Parameters
417 ----------
418 datasetTypeName : `str`
419 The name of the standard dataset type to find.
421 Returns
422 -------
423 path : `str` or `None`
424 The path to the standard curated data directory. `None` if the
425 dataset type is not found or the obs data package is not
426 available.
427 """
428 data_package_dir = cls.getObsDataPackageDir()
429 if data_package_dir is None:
430 # if there is no data package then there can't be datasets
431 return None
433 if cls.policyName is None:
434 raise TypeError(f"Instrument {cls.getName()} has an obs data package but no policy name.")
436 calibPath = os.path.join(data_package_dir, cls.policyName, datasetTypeName)
438 if os.path.exists(calibPath):
439 return calibPath
441 return None
443 def _writeSpecificCuratedCalibrationDatasets(
444 self, butler: Butler, datasetType: DatasetType, collection: str, runs: set[str], labels: Sequence[str]
445 ) -> None:
446 """Write standardized curated calibration datasets for this specific
447 dataset type from an obs data package.
449 Parameters
450 ----------
451 butler : `lsst.daf.butler.Butler`
452 Gen3 butler in which to put the calibrations.
453 datasetType : `lsst.daf.butler.DatasetType`
454 Dataset type to be put.
455 collection : `str`
456 Name of the `~CollectionType.CALIBRATION` collection that
457 associates all datasets with validity ranges. Must have been
458 registered prior to this call.
459 runs : `set` [ `str` ]
460 Names of runs that have already been registered by previous calls
461 and need not be registered again. Should be updated by this
462 method as new runs are registered.
463 labels : `Sequence` [ `str` ]
464 Extra strings to include in run names when creating them from
465 ``CALIBDATE`` metadata, via calls to `makeCuratedCalibrationName`.
466 Usually this is the name of the ticket on which the calibration
467 collection is being created.
469 Notes
470 -----
471 This method scans the location defined in the ``obsDataPackageDir``
472 class attribute for curated calibrations corresponding to the
473 supplied dataset type. The directory name in the data package must
474 match the name of the dataset type. They are assumed to use the
475 standard layout and can be read by
476 `~lsst.obs.base._read_curated_calibs.read_all` and provide standard
477 metadata.
478 """
479 calibPath = self._getSpecificCuratedCalibrationPath(datasetType.name)
480 if calibPath is None:
481 return
483 # Register the dataset type
484 butler.registry.registerDatasetType(datasetType)
485 _LOG.info("Processing %r curated calibration", datasetType.name)
487 # The class to use to read these calibrations comes from the storage
488 # class.
489 calib_class: Any
490 calib_class = datasetType.storageClass.pytype
491 if not hasattr(calib_class, "readText"):
492 # Let's try the default calib class. All curated
493 # calibrations should be subclasses of that, and the
494 # parent can identify the correct one to use.
495 calib_class = doImport("lsst.ip.isr.IsrCalib")
497 calib_class = cast(type[CuratedCalibration], calib_class)
499 # Read calibs, registering a new run for each CALIBDATE as needed.
500 # We try to avoid registering runs multiple times as an optimization
501 # by putting them in the ``runs`` set that was passed in.
502 camera = self.getCamera()
503 filters = set(self.filterDefinitions.physical_to_band.keys())
504 calib_dimensions: list[str]
505 if datasetType.name in StandardCuratedCalibrationDatasetTypes:
506 calib_dimensions = list(StandardCuratedCalibrationDatasetTypes[datasetType.name]["dimensions"])
507 else:
508 # This should never trigger with real data, but will
509 # trigger on the unit tests.
510 _LOG.warning(
511 "Unknown curated calibration type %s. Attempting to use supplied definition.",
512 datasetType.name,
513 )
514 calib_dimensions = list(datasetType.dimensions.names)
516 calibsDict, calib_type = read_all(calibPath, camera, calib_class, calib_dimensions, filters)
518 datasetRecords = []
519 for path in calibsDict:
520 times = sorted(calibsDict[path])
521 calibs = [calibsDict[path][time] for time in times]
522 atimes: list[astropy.time.Time | None] = [
523 astropy.time.Time(t, format="datetime", scale="utc") for t in times
524 ]
525 atimes += [None]
526 for calib, beginTime, endTime in zip(calibs, atimes[:-1], atimes[1:], strict=True):
527 md = calib.getMetadata()
528 run = self.makeCuratedCalibrationRunName(md["CALIBDATE"], *labels)
529 if run not in runs:
530 butler.registry.registerRun(run)
531 runs.add(run)
533 # DETECTOR and FILTER keywords in the calibration
534 # metadata must exist if the calibration depends on
535 # those dimensions.
536 dimension_arguments = {}
537 if "DETECTOR" in md:
538 dimension_arguments["detector"] = md["DETECTOR"]
539 if "FILTER" in md:
540 dimension_arguments["physical_filter"] = md["FILTER"]
542 dataId = DataCoordinate.standardize(
543 universe=butler.dimensions,
544 instrument=self.getName(),
545 **dimension_arguments,
546 )
547 datasetRecords.append((calib, dataId, run, Timespan(beginTime, endTime)))
549 # Second loop actually does the inserts and filesystem writes. We
550 # first do a butler.put on each dataset, inserting it into the run for
551 # its calibDate. We remember those refs and group them by timespan, so
552 # we can vectorize the certify calls as much as possible.
553 refsByTimespan = defaultdict(list)
554 with butler.transaction():
555 for calib, dataId, run, timespan in datasetRecords:
556 refsByTimespan[timespan].append(butler.put(calib, datasetType, dataId, run=run))
557 for timespan, refs in refsByTimespan.items():
558 butler.registry.certify(collection, refs, timespan)
560 @classmethod
561 def group_name_to_group_id(cls, group_name: str) -> int:
562 """Translate the exposure group name to an integer.
564 Parameters
565 ----------
566 group_name : `str`
567 The name of the exposure group.
569 Returns
570 -------
571 id : `int`
572 The exposure group name in integer form. This integer might be
573 used as an ID to uniquely identify the group in contexts where
574 a string can not be used.
576 Notes
577 -----
578 The default implementation removes all non numeric characters and casts
579 to an integer.
580 """
581 cleaned = re.sub(r"\D", "", group_name)
582 return int(cleaned)
585def makeExposureRecordFromObsInfo(
586 obsInfo: ObservationInfo, universe: DimensionUniverse, **kwargs: Any
587) -> DimensionRecord:
588 """Construct an exposure DimensionRecord from
589 `astro_metadata_translator.ObservationInfo`.
591 Parameters
592 ----------
593 obsInfo : `astro_metadata_translator.ObservationInfo`
594 A `~astro_metadata_translator.ObservationInfo` object corresponding to
595 the exposure.
596 universe : `DimensionUniverse`
597 Set of all known dimensions.
598 **kwargs
599 Additional field values for this record.
601 Returns
602 -------
603 record : `DimensionRecord`
604 A record containing exposure metadata, suitable for insertion into
605 a `Registry`.
606 """
607 dimension = universe["exposure"]
609 # Some registries support additional items.
610 supported = {meta.name for meta in dimension.metadata}
612 ra, dec, sky_angle, azimuth, zenith_angle = (None, None, None, None, None)
613 if obsInfo.tracking_radec is not None:
614 icrs = obsInfo.tracking_radec.icrs
615 ra = icrs.ra.degree
616 dec = icrs.dec.degree
617 if obsInfo.boresight_rotation_coord == "sky":
618 sky_angle = obsInfo.boresight_rotation_angle.degree
619 if obsInfo.altaz_begin is not None:
620 zenith_angle = obsInfo.altaz_begin.zen.degree
621 azimuth = obsInfo.altaz_begin.az.degree
623 extras: dict[str, Any] = {}
624 for meta_key, info_key in (
625 ("has_simulated", "has_simulated_content"),
626 ("seq_start", "group_counter_start"),
627 ("seq_end", "group_counter_end"),
628 ("can_see_sky", "can_see_sky"),
629 ):
630 if meta_key in supported:
631 extras[meta_key] = getattr(obsInfo, info_key)
633 if (k := "azimuth") in supported:
634 extras[k] = azimuth
636 if "group" in dimension.implied:
637 extras["group"] = obsInfo.exposure_group
638 elif "group_name" in supported:
639 extras["group_name"] = obsInfo.exposure_group
640 extras["group_id"] = obsInfo.visit_id
641 else:
642 raise RuntimeError(f"Unable to determine where to put group metadata in exposure record: {supported}")
644 # In some bad observations, the end time is before the begin time. We
645 # can not let that be ingested as-is because it becomes an unbounded
646 # timespan that will not work correctly with calibration lookups. Instead
647 # force the end time to be the begin time.
648 datetime_end = obsInfo.datetime_end
649 if datetime_end < obsInfo.datetime_begin:
650 datetime_end = obsInfo.datetime_begin
651 _LOG.warning(
652 "Exposure %s:%s has end time before begin time. Forcing it to use the begin time.",
653 obsInfo.instrument,
654 obsInfo.observation_id,
655 )
657 return dimension.RecordClass(
658 instrument=obsInfo.instrument,
659 id=obsInfo.exposure_id,
660 obs_id=obsInfo.observation_id,
661 datetime_begin=obsInfo.datetime_begin,
662 datetime_end=datetime_end,
663 exposure_time=obsInfo.exposure_time.to_value("s"),
664 # we are not mandating that dark_time be calculable
665 dark_time=obsInfo.dark_time.to_value("s") if obsInfo.dark_time is not None else None,
666 observation_type=obsInfo.observation_type,
667 observation_reason=obsInfo.observation_reason,
668 day_obs=obsInfo.observing_day,
669 seq_num=obsInfo.observation_counter,
670 physical_filter=obsInfo.physical_filter,
671 science_program=obsInfo.science_program,
672 target_name=obsInfo.object,
673 tracking_ra=ra,
674 tracking_dec=dec,
675 sky_angle=sky_angle,
676 zenith_angle=zenith_angle,
677 **extras,
678 **kwargs,
679 )
682def loadCamera(butler: Butler, dataId: DataId, *, collections: Any = None) -> tuple[Camera, bool]:
683 """Attempt to load versioned camera geometry from a butler, but fall back
684 to obtaining a nominal camera from the `Instrument` class if that fails.
686 Parameters
687 ----------
688 butler : `lsst.daf.butler.Butler`
689 Butler instance to attempt to query for and load a ``camera`` dataset
690 from.
691 dataId : `dict` or `~lsst.daf.butler.DataCoordinate`
692 Data ID that identifies at least the ``instrument`` and ``exposure``
693 dimensions.
694 collections : Any, optional
695 Collections to be searched, overriding ``self.butler.collections``.
696 Can be any of the types supported by the ``collections`` argument
697 to butler construction.
699 Returns
700 -------
701 camera : `lsst.afw.cameraGeom.Camera`
702 Camera object.
703 versioned : `bool`
704 If `True`, the camera was obtained from the butler and should represent
705 a versioned camera from a calibration repository. If `False`, no
706 camera datasets were found, and the returned camera was produced by
707 instantiating the appropriate `Instrument` class and calling
708 `Instrument.getCamera`.
710 Raises
711 ------
712 LookupError
713 Raised when ``dataId`` does not specify a valid data ID.
714 """
715 if collections is None:
716 collections = butler.collections
717 # Registry would do data ID expansion internally if we didn't do it first,
718 # but we might want an expanded data ID ourselves later, so we do it here
719 # to ensure it only happens once.
720 # This will also catch problems with the data ID not having keys we need.
721 try:
722 dataId = butler.registry.expandDataId(dataId, dimensions=butler.dimensions["exposure"].minimal_group)
723 except DataIdError as exc:
724 raise LookupError(str(exc)) from exc
725 try:
726 cameraRef = butler.get("camera", dataId=dataId, collections=collections)
727 return cameraRef, True
728 except LookupError:
729 pass
730 # We know an instrument data ID is a value, but MyPy doesn't.
731 instrument = Instrument.fromName(dataId["instrument"], butler.registry) # type: ignore
732 assert isinstance(instrument, Instrument) # for mypy
733 return instrument.getCamera(), False