Coverage for python/lsst/obs/base/_instrument.py: 28%
178 statements
« prev ^ index » next coverage.py v7.4.2, created at 2024-02-22 11:09 +0000
« prev ^ index » next coverage.py v7.4.2, created at 2024-02-22 11:09 +0000
1# This file is part of obs_base.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
22from __future__ import annotations
24__all__ = ("Instrument", "makeExposureRecordFromObsInfo", "loadCamera")
26import logging
27import os.path
28from abc import abstractmethod
29from collections import defaultdict
30from collections.abc import Sequence, Set
31from functools import lru_cache
32from typing import TYPE_CHECKING, Any, cast
34import astropy.time
35from lsst.afw.cameraGeom import Camera
36from lsst.daf.butler import (
37 Butler,
38 CollectionType,
39 DataCoordinate,
40 DataId,
41 DatasetType,
42 DimensionRecord,
43 DimensionUniverse,
44 Timespan,
45)
46from lsst.daf.butler.registry import DataIdError
47from lsst.pipe.base import Instrument as InstrumentBase
48from lsst.utils import doImport, getPackageDir
50from ._read_curated_calibs import CuratedCalibration, read_all
52if TYPE_CHECKING: 52 ↛ 53line 52 didn't jump to line 53, because the condition on line 52 was never true
53 from astro_metadata_translator import ObservationInfo
54 from lsst.daf.butler import Registry
56 from .filters import FilterDefinitionCollection
58_LOG = logging.getLogger(__name__)
60# To be a standard text curated calibration means that we use a
61# standard definition for the corresponding DatasetType.
62StandardCuratedCalibrationDatasetTypes = {
63 "defects": {"dimensions": ("instrument", "detector"), "storageClass": "Defects"},
64 "qe_curve": {"dimensions": ("instrument", "detector"), "storageClass": "QECurve"},
65 "crosstalk": {"dimensions": ("instrument", "detector"), "storageClass": "CrosstalkCalib"},
66 "linearizer": {"dimensions": ("instrument", "detector"), "storageClass": "Linearizer"},
67 "bfk": {"dimensions": ("instrument", "detector"), "storageClass": "BrighterFatterKernel"},
68 "transmission_optics": {"dimensions": ("instrument",), "storageClass": "TransmissionCurve"},
69 "transmission_filter": {
70 "dimensions": ("instrument", "physical_filter"),
71 "storageClass": "TransmissionCurve",
72 },
73 "transmission_sensor": {"dimensions": ("instrument", "detector"), "storageClass": "TransmissionCurve"},
74 "transmission_atmosphere": {"dimensions": ("instrument",), "storageClass": "TransmissionCurve"},
75 "transmission_system": {
76 "dimensions": ("instrument", "detector", "physical_filter"),
77 "storageClass": "TransmissionCurve",
78 },
79}
82class Instrument(InstrumentBase):
83 """Rubin-specified base for instrument-specific logic for the Gen3 Butler.
85 Parameters
86 ----------
87 collection_prefix : `str`, optional
88 Prefix for collection names to use instead of the instrument's own
89 name. This is primarily for use in simulated-data repositories, where
90 the instrument name may not be necessary and/or sufficient to
91 distinguish between collections.
93 Notes
94 -----
95 Concrete instrument subclasses must have the same construction signature as
96 the base class.
97 """
99 policyName: str | None = None
100 """Instrument specific name to use when locating a policy or configuration
101 file in the file system."""
103 obsDataPackage: str | None = None
104 """Name of the package containing the text curated calibration files.
105 Usually a obs _data package. If `None` no curated calibration files
106 will be read. (`str`)"""
108 standardCuratedDatasetTypes: Set[str] = frozenset(StandardCuratedCalibrationDatasetTypes)
109 """The dataset types expected to be obtained from the obsDataPackage.
111 These dataset types are all required to have standard definitions and
112 must be known to the base class. Clearing this list will prevent
113 any of these calibrations from being stored. If a dataset type is not
114 known to a specific instrument it can still be included in this list
115 since the data package is the source of truth. (`set` of `str`)
116 """
118 additionalCuratedDatasetTypes: Set[str] = frozenset()
119 """Curated dataset types specific to this particular instrument that do
120 not follow the standard organization found in obs data packages.
122 These are the instrument-specific dataset types written by
123 `writeAdditionalCuratedCalibrations` in addition to the calibrations
124 found in obs data packages that follow the standard scheme.
125 (`set` of `str`)"""
127 @property
128 @abstractmethod
129 def filterDefinitions(self) -> FilterDefinitionCollection:
130 """`~lsst.obs.base.FilterDefinitionCollection`, defining the filters
131 for this instrument.
132 """
133 raise NotImplementedError()
135 def __init__(self, collection_prefix: str | None = None):
136 super().__init__(collection_prefix=collection_prefix)
138 @classmethod
139 @lru_cache
140 def getCuratedCalibrationNames(cls) -> frozenset[str]:
141 """Return the names of all the curated calibration dataset types.
143 Returns
144 -------
145 names : `frozenset` of `str`
146 The dataset type names of all curated calibrations. This will
147 include the standard curated calibrations even if the particular
148 instrument does not support them.
150 Notes
151 -----
152 The returned list does not indicate whether a particular dataset
153 is present in the Butler repository, simply that these are the
154 dataset types that are handled by ``writeCuratedCalibrations``.
155 """
156 # Camera is a special dataset type that is also handled as a
157 # curated calibration.
158 curated = {"camera"}
160 # Make a cursory attempt to filter out curated dataset types
161 # that are not present for this instrument
162 for datasetTypeName in cls.standardCuratedDatasetTypes:
163 calibPath = cls._getSpecificCuratedCalibrationPath(datasetTypeName)
164 if calibPath is not None:
165 curated.add(datasetTypeName)
167 curated.update(cls.additionalCuratedDatasetTypes)
168 return frozenset(curated)
170 @abstractmethod
171 def getCamera(self) -> Camera:
172 """Retrieve the cameraGeom representation of this instrument.
174 This is a temporary API that should go away once ``obs`` packages have
175 a standardized approach to writing versioned cameras to a Gen3 repo.
176 """
177 raise NotImplementedError()
179 @classmethod
180 @lru_cache
181 def getObsDataPackageDir(cls) -> str | None:
182 """Return the root of the obs data package that provides
183 specializations for this instrument.
185 Returns
186 -------
187 dir : `str` or `None`
188 The root of the relevant obs data package, or `None` if this
189 instrument does not have one.
190 """
191 if cls.obsDataPackage is None:
192 return None
193 return getPackageDir(cls.obsDataPackage)
195 def _registerFilters(self, registry: Registry, update: bool = False) -> None:
196 """Register the physical and abstract filter Dimension relationships.
197 This should be called in the `register` implementation, within
198 a transaction context manager block.
200 Parameters
201 ----------
202 registry : `lsst.daf.butler.Registry`
203 The registry to add dimensions to.
204 update : `bool`, optional
205 If `True` (`False` is default), update existing records if they
206 differ from the new ones.
207 """
208 for filter in self.filterDefinitions:
209 # fix for undefined abstract filters causing trouble in the
210 # registry:
211 if filter.band is None:
212 band = filter.physical_filter
213 else:
214 band = filter.band
216 registry.syncDimensionData(
217 "physical_filter",
218 {"instrument": self.getName(), "name": filter.physical_filter, "band": band},
219 update=update,
220 )
222 def writeCuratedCalibrations(
223 self, butler: Butler, collection: str | None = None, labels: Sequence[str] = ()
224 ) -> None:
225 """Write human-curated calibration Datasets to the given Butler with
226 the appropriate validity ranges.
228 Parameters
229 ----------
230 butler : `lsst.daf.butler.Butler`
231 Butler to use to store these calibrations.
232 collection : `str`, optional
233 Name to use for the calibration collection that associates all
234 datasets with a validity range. If this collection already exists,
235 it must be a `~CollectionType.CALIBRATION` collection, and it must
236 not have any datasets that would conflict with those inserted by
237 this method. If `None`, a collection name is worked out
238 automatically from the instrument name and other metadata by
239 calling ``makeCalibrationCollectionName``, but this
240 default name may not work well for long-lived repositories unless
241 ``labels`` is also provided (and changed every time curated
242 calibrations are ingested).
243 labels : `Sequence` [ `str` ], optional
244 Extra strings to include in collection names, after concatenating
245 them with the standard collection name delimiter. If provided,
246 these are inserted into the names of the `~CollectionType.RUN`
247 collections that datasets are inserted directly into, as well the
248 `~CollectionType.CALIBRATION` collection if it is generated
249 automatically (i.e. if ``collection is None``). Usually this is
250 just the name of the ticket on which the calibration collection is
251 being created.
253 Notes
254 -----
255 Expected to be called from subclasses. The base method calls
256 ``writeCameraGeom``, ``writeStandardTextCuratedCalibrations``,
257 and ``writeAdditionalCuratedCalibrations``.
258 """
259 # Delegate registration of collections (and creating names for them)
260 # to other methods so they can be called independently with the same
261 # preconditions. Collection registration is idempotent, so this is
262 # safe, and while it adds a bit of overhead, as long as it's one
263 # registration attempt per method (not per dataset or dataset type),
264 # that's negligible.
265 self.writeCameraGeom(butler, collection, labels=labels)
266 self.writeStandardTextCuratedCalibrations(butler, collection, labels=labels)
267 self.writeAdditionalCuratedCalibrations(butler, collection, labels=labels)
269 def writeAdditionalCuratedCalibrations(
270 self, butler: Butler, collection: str | None = None, labels: Sequence[str] = ()
271 ) -> None:
272 """Write additional curated calibrations that might be instrument
273 specific and are not part of the standard set.
275 Default implementation does nothing.
277 Parameters
278 ----------
279 butler : `lsst.daf.butler.Butler`
280 Butler to use to store these calibrations.
281 collection : `str`, optional
282 Name to use for the calibration collection that associates all
283 datasets with a validity range. If this collection already exists,
284 it must be a `~CollectionType.CALIBRATION` collection, and it must
285 not have any datasets that would conflict with those inserted by
286 this method. If `None`, a collection name is worked out
287 automatically from the instrument name and other metadata by
288 calling ``makeCalibrationCollectionName``, but this
289 default name may not work well for long-lived repositories unless
290 ``labels`` is also provided (and changed every time curated
291 calibrations are ingested).
292 labels : `Sequence` [ `str` ], optional
293 Extra strings to include in collection names, after concatenating
294 them with the standard collection name delimiter. If provided,
295 these are inserted into the names of the `~CollectionType.RUN`
296 collections that datasets are inserted directly into, as well the
297 `~CollectionType.CALIBRATION` collection if it is generated
298 automatically (i.e. if ``collection is None``). Usually this is
299 just the name of the ticket on which the calibration collection is
300 being created.
301 """
302 return
304 def writeCameraGeom(
305 self, butler: Butler, collection: str | None = None, labels: Sequence[str] = ()
306 ) -> None:
307 """Write the default camera geometry to the butler repository and
308 associate it with the appropriate validity range in a calibration
309 collection.
311 Parameters
312 ----------
313 butler : `lsst.daf.butler.Butler`
314 Butler to use to store these calibrations.
315 collection : `str`, optional
316 Name to use for the calibration collection that associates all
317 datasets with a validity range. If this collection already exists,
318 it must be a `~CollectionType.CALIBRATION` collection, and it must
319 not have any datasets that would conflict with those inserted by
320 this method. If `None`, a collection name is worked out
321 automatically from the instrument name and other metadata by
322 calling ``makeCalibrationCollectionName``, but this
323 default name may not work well for long-lived repositories unless
324 ``labels`` is also provided (and changed every time curated
325 calibrations are ingested).
326 labels : `Sequence` [ `str` ], optional
327 Extra strings to include in collection names, after concatenating
328 them with the standard collection name delimiter. If provided,
329 these are inserted into the names of the `~CollectionType.RUN`
330 collections that datasets are inserted directly into, as well the
331 `~CollectionType.CALIBRATION` collection if it is generated
332 automatically (i.e. if ``collection is None``). Usually this is
333 just the name of the ticket on which the calibration collection is
334 being created.
335 """
336 if collection is None:
337 collection = self.makeCalibrationCollectionName(*labels)
338 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION)
339 run = self.makeUnboundedCalibrationRunName(*labels)
340 butler.registry.registerRun(run)
341 datasetType = DatasetType(
342 "camera", ("instrument",), "Camera", isCalibration=True, universe=butler.dimensions
343 )
344 butler.registry.registerDatasetType(datasetType)
345 camera = self.getCamera()
346 ref = butler.put(camera, datasetType, {"instrument": self.getName()}, run=run)
347 butler.registry.certify(collection, [ref], Timespan(begin=None, end=None))
349 def writeStandardTextCuratedCalibrations(
350 self, butler: Butler, collection: str | None = None, labels: Sequence[str] = ()
351 ) -> None:
352 """Write the set of standardized curated text calibrations to
353 the repository.
355 Parameters
356 ----------
357 butler : `lsst.daf.butler.Butler`
358 Butler to receive these calibration datasets.
359 collection : `str`, optional
360 Name to use for the calibration collection that associates all
361 datasets with a validity range. If this collection already exists,
362 it must be a `~CollectionType.CALIBRATION` collection, and it must
363 not have any datasets that would conflict with those inserted by
364 this method. If `None`, a collection name is worked out
365 automatically from the instrument name and other metadata by
366 calling ``makeCalibrationCollectionName``, but this
367 default name may not work well for long-lived repositories unless
368 ``labels`` is also provided (and changed every time curated
369 calibrations are ingested).
370 labels : `Sequence` [ `str` ], optional
371 Extra strings to include in collection names, after concatenating
372 them with the standard collection name delimiter. If provided,
373 these are inserted into the names of the `~CollectionType.RUN`
374 collections that datasets are inserted directly into, as well the
375 `~CollectionType.CALIBRATION` collection if it is generated
376 automatically (i.e. if ``collection is None``). Usually this is
377 just the name of the ticket on which the calibration collection is
378 being created.
379 """
380 if collection is None:
381 collection = self.makeCalibrationCollectionName(*labels)
382 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION)
383 runs: set[str] = set()
384 for datasetTypeName in self.standardCuratedDatasetTypes:
385 # We need to define the dataset types.
386 if datasetTypeName not in StandardCuratedCalibrationDatasetTypes:
387 raise ValueError(
388 f"DatasetType {datasetTypeName} not in understood list"
389 f" [{'.'.join(StandardCuratedCalibrationDatasetTypes)}]"
390 )
391 definition = StandardCuratedCalibrationDatasetTypes[datasetTypeName]
392 datasetType = DatasetType(
393 datasetTypeName,
394 universe=butler.dimensions,
395 isCalibration=True,
396 # MyPy should be able to figure out that the kwargs here have
397 # the right types, but it can't.
398 **definition, # type: ignore
399 )
400 self._writeSpecificCuratedCalibrationDatasets(
401 butler, datasetType, collection, runs=runs, labels=labels
402 )
404 @classmethod
405 def _getSpecificCuratedCalibrationPath(cls, datasetTypeName: str) -> str | None:
406 """Return the path of the curated calibration directory.
408 Parameters
409 ----------
410 datasetTypeName : `str`
411 The name of the standard dataset type to find.
413 Returns
414 -------
415 path : `str` or `None`
416 The path to the standard curated data directory. `None` if the
417 dataset type is not found or the obs data package is not
418 available.
419 """
420 data_package_dir = cls.getObsDataPackageDir()
421 if data_package_dir is None:
422 # if there is no data package then there can't be datasets
423 return None
425 if cls.policyName is None:
426 raise TypeError(f"Instrument {cls.getName()} has an obs data package but no policy name.")
428 calibPath = os.path.join(data_package_dir, cls.policyName, datasetTypeName)
430 if os.path.exists(calibPath):
431 return calibPath
433 return None
435 def _writeSpecificCuratedCalibrationDatasets(
436 self, butler: Butler, datasetType: DatasetType, collection: str, runs: set[str], labels: Sequence[str]
437 ) -> None:
438 """Write standardized curated calibration datasets for this specific
439 dataset type from an obs data package.
441 Parameters
442 ----------
443 butler : `lsst.daf.butler.Butler`
444 Gen3 butler in which to put the calibrations.
445 datasetType : `lsst.daf.butler.DatasetType`
446 Dataset type to be put.
447 collection : `str`
448 Name of the `~CollectionType.CALIBRATION` collection that
449 associates all datasets with validity ranges. Must have been
450 registered prior to this call.
451 runs : `set` [ `str` ]
452 Names of runs that have already been registered by previous calls
453 and need not be registered again. Should be updated by this
454 method as new runs are registered.
455 labels : `Sequence` [ `str` ]
456 Extra strings to include in run names when creating them from
457 ``CALIBDATE`` metadata, via calls to `makeCuratedCalibrationName`.
458 Usually this is the name of the ticket on which the calibration
459 collection is being created.
461 Notes
462 -----
463 This method scans the location defined in the ``obsDataPackageDir``
464 class attribute for curated calibrations corresponding to the
465 supplied dataset type. The directory name in the data package must
466 match the name of the dataset type. They are assumed to use the
467 standard layout and can be read by
468 `~lsst.obs.base._read_curated_calibs.read_all` and provide standard
469 metadata.
470 """
471 calibPath = self._getSpecificCuratedCalibrationPath(datasetType.name)
472 if calibPath is None:
473 return
475 # Register the dataset type
476 butler.registry.registerDatasetType(datasetType)
477 _LOG.info("Processing %r curated calibration", datasetType.name)
479 # The class to use to read these calibrations comes from the storage
480 # class.
481 calib_class: Any
482 calib_class = datasetType.storageClass.pytype
483 if not hasattr(calib_class, "readText"):
484 # Let's try the default calib class. All curated
485 # calibrations should be subclasses of that, and the
486 # parent can identify the correct one to use.
487 calib_class = doImport("lsst.ip.isr.IsrCalib")
489 calib_class = cast(type[CuratedCalibration], calib_class)
491 # Read calibs, registering a new run for each CALIBDATE as needed.
492 # We try to avoid registering runs multiple times as an optimization
493 # by putting them in the ``runs`` set that was passed in.
494 camera = self.getCamera()
495 filters = set(self.filterDefinitions.physical_to_band.keys())
496 calib_dimensions: list[str]
497 if datasetType.name in StandardCuratedCalibrationDatasetTypes:
498 calib_dimensions = list(StandardCuratedCalibrationDatasetTypes[datasetType.name]["dimensions"])
499 else:
500 # This should never trigger with real data, but will
501 # trigger on the unit tests.
502 _LOG.warning(
503 "Unknown curated calibration type %s. Attempting to use supplied definition.",
504 datasetType.name,
505 )
506 calib_dimensions = list(datasetType.dimensions.names)
508 calibsDict, calib_type = read_all(calibPath, camera, calib_class, calib_dimensions, filters)
510 datasetRecords = []
511 for path in calibsDict:
512 times = sorted(calibsDict[path])
513 calibs = [calibsDict[path][time] for time in times]
514 atimes: list[astropy.time.Time | None] = [
515 astropy.time.Time(t, format="datetime", scale="utc") for t in times
516 ]
517 atimes += [None]
518 for calib, beginTime, endTime in zip(calibs, atimes[:-1], atimes[1:], strict=True):
519 md = calib.getMetadata()
520 run = self.makeCuratedCalibrationRunName(md["CALIBDATE"], *labels)
521 if run not in runs:
522 butler.registry.registerRun(run)
523 runs.add(run)
525 # DETECTOR and FILTER keywords in the calibration
526 # metadata must exist if the calibration depends on
527 # those dimensions.
528 dimension_arguments = {}
529 if "DETECTOR" in md:
530 dimension_arguments["detector"] = md["DETECTOR"]
531 if "FILTER" in md:
532 dimension_arguments["physical_filter"] = md["FILTER"]
534 dataId = DataCoordinate.standardize(
535 universe=butler.dimensions,
536 instrument=self.getName(),
537 **dimension_arguments,
538 )
539 datasetRecords.append((calib, dataId, run, Timespan(beginTime, endTime)))
541 # Second loop actually does the inserts and filesystem writes. We
542 # first do a butler.put on each dataset, inserting it into the run for
543 # its calibDate. We remember those refs and group them by timespan, so
544 # we can vectorize the certify calls as much as possible.
545 refsByTimespan = defaultdict(list)
546 with butler.transaction():
547 for calib, dataId, run, timespan in datasetRecords:
548 refsByTimespan[timespan].append(butler.put(calib, datasetType, dataId, run=run))
549 for timespan, refs in refsByTimespan.items():
550 butler.registry.certify(collection, refs, timespan)
553def makeExposureRecordFromObsInfo(
554 obsInfo: ObservationInfo, universe: DimensionUniverse, **kwargs: Any
555) -> DimensionRecord:
556 """Construct an exposure DimensionRecord from
557 `astro_metadata_translator.ObservationInfo`.
559 Parameters
560 ----------
561 obsInfo : `astro_metadata_translator.ObservationInfo`
562 A `~astro_metadata_translator.ObservationInfo` object corresponding to
563 the exposure.
564 universe : `DimensionUniverse`
565 Set of all known dimensions.
566 **kwargs
567 Additional field values for this record.
569 Returns
570 -------
571 record : `DimensionRecord`
572 A record containing exposure metadata, suitable for insertion into
573 a `Registry`.
574 """
575 dimension = universe["exposure"]
577 # Some registries support additional items.
578 supported = {meta.name for meta in dimension.metadata}
580 ra, dec, sky_angle, azimuth, zenith_angle = (None, None, None, None, None)
581 if obsInfo.tracking_radec is not None:
582 icrs = obsInfo.tracking_radec.icrs
583 ra = icrs.ra.degree
584 dec = icrs.dec.degree
585 if obsInfo.boresight_rotation_coord == "sky":
586 sky_angle = obsInfo.boresight_rotation_angle.degree
587 if obsInfo.altaz_begin is not None:
588 zenith_angle = obsInfo.altaz_begin.zen.degree
589 azimuth = obsInfo.altaz_begin.az.degree
591 extras: dict[str, Any] = {}
592 for meta_key, info_key in (
593 ("has_simulated", "has_simulated_content"),
594 ("seq_start", "group_counter_start"),
595 ("seq_end", "group_counter_end"),
596 ):
597 if meta_key in supported:
598 extras[meta_key] = getattr(obsInfo, info_key)
600 if (k := "azimuth") in supported:
601 extras[k] = azimuth
603 return dimension.RecordClass(
604 instrument=obsInfo.instrument,
605 id=obsInfo.exposure_id,
606 obs_id=obsInfo.observation_id,
607 group_name=obsInfo.exposure_group,
608 group_id=obsInfo.visit_id,
609 datetime_begin=obsInfo.datetime_begin,
610 datetime_end=obsInfo.datetime_end,
611 exposure_time=obsInfo.exposure_time.to_value("s"),
612 # we are not mandating that dark_time be calculable
613 dark_time=obsInfo.dark_time.to_value("s") if obsInfo.dark_time is not None else None,
614 observation_type=obsInfo.observation_type,
615 observation_reason=obsInfo.observation_reason,
616 day_obs=obsInfo.observing_day,
617 seq_num=obsInfo.observation_counter,
618 physical_filter=obsInfo.physical_filter,
619 science_program=obsInfo.science_program,
620 target_name=obsInfo.object,
621 tracking_ra=ra,
622 tracking_dec=dec,
623 sky_angle=sky_angle,
624 zenith_angle=zenith_angle,
625 **extras,
626 **kwargs,
627 )
630def loadCamera(butler: Butler, dataId: DataId, *, collections: Any = None) -> tuple[Camera, bool]:
631 """Attempt to load versioned camera geometry from a butler, but fall back
632 to obtaining a nominal camera from the `Instrument` class if that fails.
634 Parameters
635 ----------
636 butler : `lsst.daf.butler.Butler`
637 Butler instance to attempt to query for and load a ``camera`` dataset
638 from.
639 dataId : `dict` or `~lsst.daf.butler.DataCoordinate`
640 Data ID that identifies at least the ``instrument`` and ``exposure``
641 dimensions.
642 collections : Any, optional
643 Collections to be searched, overriding ``self.butler.collections``.
644 Can be any of the types supported by the ``collections`` argument
645 to butler construction.
647 Returns
648 -------
649 camera : `lsst.afw.cameraGeom.Camera`
650 Camera object.
651 versioned : `bool`
652 If `True`, the camera was obtained from the butler and should represent
653 a versioned camera from a calibration repository. If `False`, no
654 camera datasets were found, and the returned camera was produced by
655 instantiating the appropriate `Instrument` class and calling
656 `Instrument.getCamera`.
658 Raises
659 ------
660 LookupError
661 Raised when ``dataId`` does not specify a valid data ID.
662 """
663 if collections is None:
664 collections = butler.collections
665 # Registry would do data ID expansion internally if we didn't do it first,
666 # but we might want an expanded data ID ourselves later, so we do it here
667 # to ensure it only happens once.
668 # This will also catch problems with the data ID not having keys we need.
669 try:
670 dataId = butler.registry.expandDataId(dataId, dimensions=butler.dimensions["exposure"].minimal_group)
671 except DataIdError as exc:
672 raise LookupError(str(exc)) from exc
673 try:
674 cameraRef = butler.get("camera", dataId=dataId, collections=collections)
675 return cameraRef, True
676 except LookupError:
677 pass
678 # We know an instrument data ID is a value, but MyPy doesn't.
679 instrument = Instrument.fromName(dataId["instrument"], butler.registry) # type: ignore
680 assert isinstance(instrument, Instrument) # for mypy
681 return instrument.getCamera(), False