Coverage for python/lsst/obs/base/_instrument.py: 21%
177 statements
« prev ^ index » next coverage.py v7.2.5, created at 2023-05-13 02:17 -0700
« prev ^ index » next coverage.py v7.2.5, created at 2023-05-13 02:17 -0700
1# This file is part of obs_base.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
22from __future__ import annotations
24__all__ = ("Instrument", "makeExposureRecordFromObsInfo", "loadCamera")
26import logging
27import os.path
28from abc import abstractmethod
29from collections import defaultdict
30from functools import lru_cache
31from typing import (
32 TYPE_CHECKING,
33 AbstractSet,
34 Any,
35 Dict,
36 FrozenSet,
37 Optional,
38 Sequence,
39 Set,
40 Tuple,
41 Type,
42 cast,
43)
45import astropy.time
46from lsst.afw.cameraGeom import Camera
47from lsst.daf.butler import (
48 Butler,
49 CollectionType,
50 DataCoordinate,
51 DataId,
52 DatasetType,
53 DimensionRecord,
54 DimensionUniverse,
55 Timespan,
56)
57from lsst.daf.butler.registry import DataIdError
58from lsst.pipe.base import Instrument as InstrumentBase
59from lsst.utils import doImport, getPackageDir
61from ._read_curated_calibs import CuratedCalibration, read_all
63if TYPE_CHECKING: 63 ↛ 64line 63 didn't jump to line 64, because the condition on line 63 was never true
64 from astro_metadata_translator import ObservationInfo
65 from lsst.daf.butler import Registry
67 from .filters import FilterDefinitionCollection
69_LOG = logging.getLogger(__name__)
71# To be a standard text curated calibration means that we use a
72# standard definition for the corresponding DatasetType.
73StandardCuratedCalibrationDatasetTypes = {
74 "defects": {"dimensions": ("instrument", "detector"), "storageClass": "Defects"},
75 "qe_curve": {"dimensions": ("instrument", "detector"), "storageClass": "QECurve"},
76 "crosstalk": {"dimensions": ("instrument", "detector"), "storageClass": "CrosstalkCalib"},
77 "linearizer": {"dimensions": ("instrument", "detector"), "storageClass": "Linearizer"},
78 "bfk": {"dimensions": ("instrument", "detector"), "storageClass": "BrighterFatterKernel"},
79 "transmission_optics": {"dimensions": ("instrument",), "storageClass": "TransmissionCurve"},
80 "transmission_filter": {
81 "dimensions": ("instrument", "physical_filter"),
82 "storageClass": "TransmissionCurve",
83 },
84 "transmission_sensor": {"dimensions": ("instrument", "detector"), "storageClass": "TransmissionCurve"},
85 "transmission_atmosphere": {"dimensions": ("instrument",), "storageClass": "TransmissionCurve"},
86 "transmission_system": {
87 "dimensions": ("instrument", "detector", "physical_filter"),
88 "storageClass": "TransmissionCurve",
89 },
90}
93class Instrument(InstrumentBase):
94 """Rubin-specified base for instrument-specific logic for the Gen3 Butler.
96 Parameters
97 ----------
98 collection_prefix : `str`, optional
99 Prefix for collection names to use instead of the intrument's own name.
100 This is primarily for use in simulated-data repositories, where the
101 instrument name may not be necessary and/or sufficient to distinguish
102 between collections.
104 Notes
105 -----
106 Concrete instrument subclasses must have the same construction signature as
107 the base class.
108 """
110 policyName: Optional[str] = None
111 """Instrument specific name to use when locating a policy or configuration
112 file in the file system."""
114 obsDataPackage: Optional[str] = None
115 """Name of the package containing the text curated calibration files.
116 Usually a obs _data package. If `None` no curated calibration files
117 will be read. (`str`)"""
119 standardCuratedDatasetTypes: AbstractSet[str] = frozenset(StandardCuratedCalibrationDatasetTypes)
120 """The dataset types expected to be obtained from the obsDataPackage.
122 These dataset types are all required to have standard definitions and
123 must be known to the base class. Clearing this list will prevent
124 any of these calibrations from being stored. If a dataset type is not
125 known to a specific instrument it can still be included in this list
126 since the data package is the source of truth. (`set` of `str`)
127 """
129 additionalCuratedDatasetTypes: AbstractSet[str] = frozenset()
130 """Curated dataset types specific to this particular instrument that do
131 not follow the standard organization found in obs data packages.
133 These are the instrument-specific dataset types written by
134 `writeAdditionalCuratedCalibrations` in addition to the calibrations
135 found in obs data packages that follow the standard scheme.
136 (`set` of `str`)"""
138 @property
139 @abstractmethod
140 def filterDefinitions(self) -> FilterDefinitionCollection:
141 """`~lsst.obs.base.FilterDefinitionCollection`, defining the filters
142 for this instrument.
143 """
144 raise NotImplementedError()
146 def __init__(self, collection_prefix: Optional[str] = None):
147 super().__init__(collection_prefix=collection_prefix)
149 @classmethod
150 @lru_cache()
151 def getCuratedCalibrationNames(cls) -> FrozenSet[str]:
152 """Return the names of all the curated calibration dataset types.
154 Returns
155 -------
156 names : `frozenset` of `str`
157 The dataset type names of all curated calibrations. This will
158 include the standard curated calibrations even if the particular
159 instrument does not support them.
161 Notes
162 -----
163 The returned list does not indicate whether a particular dataset
164 is present in the Butler repository, simply that these are the
165 dataset types that are handled by ``writeCuratedCalibrations``.
166 """
168 # Camera is a special dataset type that is also handled as a
169 # curated calibration.
170 curated = {"camera"}
172 # Make a cursory attempt to filter out curated dataset types
173 # that are not present for this instrument
174 for datasetTypeName in cls.standardCuratedDatasetTypes:
175 calibPath = cls._getSpecificCuratedCalibrationPath(datasetTypeName)
176 if calibPath is not None:
177 curated.add(datasetTypeName)
179 curated.update(cls.additionalCuratedDatasetTypes)
180 return frozenset(curated)
182 @abstractmethod
183 def getCamera(self) -> Camera:
184 """Retrieve the cameraGeom representation of this instrument.
186 This is a temporary API that should go away once ``obs`` packages have
187 a standardized approach to writing versioned cameras to a Gen3 repo.
188 """
189 raise NotImplementedError()
191 @classmethod
192 @lru_cache()
193 def getObsDataPackageDir(cls) -> Optional[str]:
194 """The root of the obs data package that provides specializations for
195 this instrument.
197 returns
198 -------
199 dir : `str` or `None`
200 The root of the relevant obs data package, or `None` if this
201 instrument does not have one.
202 """
203 if cls.obsDataPackage is None:
204 return None
205 return getPackageDir(cls.obsDataPackage)
207 def _registerFilters(self, registry: Registry, update: bool = False) -> None:
208 """Register the physical and abstract filter Dimension relationships.
209 This should be called in the `register` implementation, within
210 a transaction context manager block.
212 Parameters
213 ----------
214 registry : `lsst.daf.butler.core.Registry`
215 The registry to add dimensions to.
216 update : `bool`, optional
217 If `True` (`False` is default), update existing records if they
218 differ from the new ones.
219 """
220 for filter in self.filterDefinitions:
221 # fix for undefined abstract filters causing trouble in the
222 # registry:
223 if filter.band is None:
224 band = filter.physical_filter
225 else:
226 band = filter.band
228 registry.syncDimensionData(
229 "physical_filter",
230 {"instrument": self.getName(), "name": filter.physical_filter, "band": band},
231 update=update,
232 )
234 def writeCuratedCalibrations(
235 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = ()
236 ) -> None:
237 """Write human-curated calibration Datasets to the given Butler with
238 the appropriate validity ranges.
240 Parameters
241 ----------
242 butler : `lsst.daf.butler.Butler`
243 Butler to use to store these calibrations.
244 collection : `str`, optional
245 Name to use for the calibration collection that associates all
246 datasets with a validity range. If this collection already exists,
247 it must be a `~CollectionType.CALIBRATION` collection, and it must
248 not have any datasets that would conflict with those inserted by
249 this method. If `None`, a collection name is worked out
250 automatically from the instrument name and other metadata by
251 calling ``makeCalibrationCollectionName``, but this
252 default name may not work well for long-lived repositories unless
253 ``labels`` is also provided (and changed every time curated
254 calibrations are ingested).
255 labels : `Sequence` [ `str` ], optional
256 Extra strings to include in collection names, after concatenating
257 them with the standard collection name delimeter. If provided,
258 these are inserted into the names of the `~CollectionType.RUN`
259 collections that datasets are inserted directly into, as well the
260 `~CollectionType.CALIBRATION` collection if it is generated
261 automatically (i.e. if ``collection is None``). Usually this is
262 just the name of the ticket on which the calibration collection is
263 being created.
265 Notes
266 -----
267 Expected to be called from subclasses. The base method calls
268 ``writeCameraGeom``, ``writeStandardTextCuratedCalibrations``,
269 and ``writeAdditionalCuratdCalibrations``.
270 """
271 # Delegate registration of collections (and creating names for them)
272 # to other methods so they can be called independently with the same
273 # preconditions. Collection registration is idempotent, so this is
274 # safe, and while it adds a bit of overhead, as long as it's one
275 # registration attempt per method (not per dataset or dataset type),
276 # that's negligible.
277 self.writeCameraGeom(butler, collection, labels=labels)
278 self.writeStandardTextCuratedCalibrations(butler, collection, labels=labels)
279 self.writeAdditionalCuratedCalibrations(butler, collection, labels=labels)
281 def writeAdditionalCuratedCalibrations(
282 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = ()
283 ) -> None:
284 """Write additional curated calibrations that might be instrument
285 specific and are not part of the standard set.
287 Default implementation does nothing.
289 Parameters
290 ----------
291 butler : `lsst.daf.butler.Butler`
292 Butler to use to store these calibrations.
293 collection : `str`, optional
294 Name to use for the calibration collection that associates all
295 datasets with a validity range. If this collection already exists,
296 it must be a `~CollectionType.CALIBRATION` collection, and it must
297 not have any datasets that would conflict with those inserted by
298 this method. If `None`, a collection name is worked out
299 automatically from the instrument name and other metadata by
300 calling ``makeCalibrationCollectionName``, but this
301 default name may not work well for long-lived repositories unless
302 ``labels`` is also provided (and changed every time curated
303 calibrations are ingested).
304 labels : `Sequence` [ `str` ], optional
305 Extra strings to include in collection names, after concatenating
306 them with the standard collection name delimeter. If provided,
307 these are inserted into the names of the `~CollectionType.RUN`
308 collections that datasets are inserted directly into, as well the
309 `~CollectionType.CALIBRATION` collection if it is generated
310 automatically (i.e. if ``collection is None``). Usually this is
311 just the name of the ticket on which the calibration collection is
312 being created.
313 """
314 return
316 def writeCameraGeom(
317 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = ()
318 ) -> None:
319 """Write the default camera geometry to the butler repository and
320 associate it with the appropriate validity range in a calibration
321 collection.
323 Parameters
324 ----------
325 butler : `lsst.daf.butler.Butler`
326 Butler to use to store these calibrations.
327 collection : `str`, optional
328 Name to use for the calibration collection that associates all
329 datasets with a validity range. If this collection already exists,
330 it must be a `~CollectionType.CALIBRATION` collection, and it must
331 not have any datasets that would conflict with those inserted by
332 this method. If `None`, a collection name is worked out
333 automatically from the instrument name and other metadata by
334 calling ``makeCalibrationCollectionName``, but this
335 default name may not work well for long-lived repositories unless
336 ``labels`` is also provided (and changed every time curated
337 calibrations are ingested).
338 labels : `Sequence` [ `str` ], optional
339 Extra strings to include in collection names, after concatenating
340 them with the standard collection name delimeter. If provided,
341 these are inserted into the names of the `~CollectionType.RUN`
342 collections that datasets are inserted directly into, as well the
343 `~CollectionType.CALIBRATION` collection if it is generated
344 automatically (i.e. if ``collection is None``). Usually this is
345 just the name of the ticket on which the calibration collection is
346 being created.
347 """
348 if collection is None:
349 collection = self.makeCalibrationCollectionName(*labels)
350 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION)
351 run = self.makeUnboundedCalibrationRunName(*labels)
352 butler.registry.registerRun(run)
353 datasetType = DatasetType(
354 "camera", ("instrument",), "Camera", isCalibration=True, universe=butler.registry.dimensions
355 )
356 butler.registry.registerDatasetType(datasetType)
357 camera = self.getCamera()
358 ref = butler.put(camera, datasetType, {"instrument": self.getName()}, run=run)
359 butler.registry.certify(collection, [ref], Timespan(begin=None, end=None))
361 def writeStandardTextCuratedCalibrations(
362 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = ()
363 ) -> None:
364 """Write the set of standardized curated text calibrations to
365 the repository.
367 Parameters
368 ----------
369 butler : `lsst.daf.butler.Butler`
370 Butler to receive these calibration datasets.
371 collection : `str`, optional
372 Name to use for the calibration collection that associates all
373 datasets with a validity range. If this collection already exists,
374 it must be a `~CollectionType.CALIBRATION` collection, and it must
375 not have any datasets that would conflict with those inserted by
376 this method. If `None`, a collection name is worked out
377 automatically from the instrument name and other metadata by
378 calling ``makeCalibrationCollectionName``, but this
379 default name may not work well for long-lived repositories unless
380 ``labels`` is also provided (and changed every time curated
381 calibrations are ingested).
382 labels : `Sequence` [ `str` ], optional
383 Extra strings to include in collection names, after concatenating
384 them with the standard collection name delimeter. If provided,
385 these are inserted into the names of the `~CollectionType.RUN`
386 collections that datasets are inserted directly into, as well the
387 `~CollectionType.CALIBRATION` collection if it is generated
388 automatically (i.e. if ``collection is None``). Usually this is
389 just the name of the ticket on which the calibration collection is
390 being created.
391 """
392 if collection is None:
393 collection = self.makeCalibrationCollectionName(*labels)
394 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION)
395 runs: Set[str] = set()
396 for datasetTypeName in self.standardCuratedDatasetTypes:
397 # We need to define the dataset types.
398 if datasetTypeName not in StandardCuratedCalibrationDatasetTypes:
399 raise ValueError(
400 f"DatasetType {datasetTypeName} not in understood list"
401 f" [{'.'.join(StandardCuratedCalibrationDatasetTypes)}]"
402 )
403 definition = StandardCuratedCalibrationDatasetTypes[datasetTypeName]
404 datasetType = DatasetType(
405 datasetTypeName,
406 universe=butler.registry.dimensions,
407 isCalibration=True,
408 # MyPy should be able to figure out that the kwargs here have
409 # the right types, but it can't.
410 **definition, # type: ignore
411 )
412 self._writeSpecificCuratedCalibrationDatasets(
413 butler, datasetType, collection, runs=runs, labels=labels
414 )
416 @classmethod
417 def _getSpecificCuratedCalibrationPath(cls, datasetTypeName: str) -> Optional[str]:
418 """Return the path of the curated calibration directory.
420 Parameters
421 ----------
422 datasetTypeName : `str`
423 The name of the standard dataset type to find.
425 Returns
426 -------
427 path : `str` or `None`
428 The path to the standard curated data directory. `None` if the
429 dataset type is not found or the obs data package is not
430 available.
431 """
432 data_package_dir = cls.getObsDataPackageDir()
433 if data_package_dir is None:
434 # if there is no data package then there can't be datasets
435 return None
437 if cls.policyName is None:
438 raise TypeError(f"Instrument {cls.getName()} has an obs data package but no policy name.")
440 calibPath = os.path.join(data_package_dir, cls.policyName, datasetTypeName)
442 if os.path.exists(calibPath):
443 return calibPath
445 return None
447 def _writeSpecificCuratedCalibrationDatasets(
448 self, butler: Butler, datasetType: DatasetType, collection: str, runs: Set[str], labels: Sequence[str]
449 ) -> None:
450 """Write standardized curated calibration datasets for this specific
451 dataset type from an obs data package.
453 Parameters
454 ----------
455 butler : `lsst.daf.butler.Butler`
456 Gen3 butler in which to put the calibrations.
457 datasetType : `lsst.daf.butler.DatasetType`
458 Dataset type to be put.
459 collection : `str`
460 Name of the `~CollectionType.CALIBRATION` collection that
461 associates all datasets with validity ranges. Must have been
462 registered prior to this call.
463 runs : `set` [ `str` ]
464 Names of runs that have already been registered by previous calls
465 and need not be registered again. Should be updated by this
466 method as new runs are registered.
467 labels : `Sequence` [ `str` ]
468 Extra strings to include in run names when creating them from
469 ``CALIBDATE`` metadata, via calls to `makeCuratedCalibrationName`.
470 Usually this is the name of the ticket on which the calibration
471 collection is being created.
473 Notes
474 -----
475 This method scans the location defined in the ``obsDataPackageDir``
476 class attribute for curated calibrations corresponding to the
477 supplied dataset type. The directory name in the data package must
478 match the name of the dataset type. They are assumed to use the
479 standard layout and can be read by
480 `~lsst.obs.base._read_curated_calibs.read_all` and provide standard
481 metadata.
482 """
483 calibPath = self._getSpecificCuratedCalibrationPath(datasetType.name)
484 if calibPath is None:
485 return
487 # Register the dataset type
488 butler.registry.registerDatasetType(datasetType)
489 _LOG.info("Processing %r curated calibration", datasetType.name)
491 # The class to use to read these calibrations comes from the storage
492 # class.
493 calib_class: Any
494 calib_class = datasetType.storageClass.pytype
495 if not hasattr(calib_class, "readText"):
496 # Let's try the default calib class. All curated
497 # calibrations should be subclasses of that, and the
498 # parent can identify the correct one to use.
499 calib_class = doImport("lsst.ip.isr.IsrCalib")
501 calib_class = cast(Type[CuratedCalibration], calib_class)
503 # Read calibs, registering a new run for each CALIBDATE as needed.
504 # We try to avoid registering runs multiple times as an optimization
505 # by putting them in the ``runs`` set that was passed in.
506 camera = self.getCamera()
507 filters = set(self.filterDefinitions.physical_to_band.keys())
508 calib_dimensions: list[Any]
509 if datasetType.name in StandardCuratedCalibrationDatasetTypes:
510 calib_dimensions = list(StandardCuratedCalibrationDatasetTypes[datasetType.name]["dimensions"])
511 else:
512 # This should never trigger with real data, but will
513 # trigger on the unit tests.
514 _LOG.warning(
515 "Unknown curated calibration type %s. Attempting to use supplied definition.",
516 datasetType.name,
517 )
518 calib_dimensions = list(datasetType.dimensions)
520 calibsDict, calib_type = read_all(calibPath, camera, calib_class, calib_dimensions, filters)
522 datasetRecords = []
523 for path in calibsDict:
524 times = sorted([k for k in calibsDict[path]])
525 calibs = [calibsDict[path][time] for time in times]
526 atimes: list[Optional[astropy.time.Time]] = [
527 astropy.time.Time(t, format="datetime", scale="utc") for t in times
528 ]
529 atimes += [None]
530 for calib, beginTime, endTime in zip(calibs, atimes[:-1], atimes[1:]):
531 md = calib.getMetadata()
532 run = self.makeCuratedCalibrationRunName(md["CALIBDATE"], *labels)
533 if run not in runs:
534 butler.registry.registerRun(run)
535 runs.add(run)
537 # DETECTOR and FILTER keywords in the calibration
538 # metadata must exist if the calibration depends on
539 # those dimensions.
540 dimension_arguments = {}
541 if "DETECTOR" in md:
542 dimension_arguments["detector"] = md["DETECTOR"]
543 if "FILTER" in md:
544 dimension_arguments["physical_filter"] = md["FILTER"]
546 dataId = DataCoordinate.standardize(
547 universe=butler.registry.dimensions,
548 instrument=self.getName(),
549 **dimension_arguments,
550 )
551 datasetRecords.append((calib, dataId, run, Timespan(beginTime, endTime)))
553 # Second loop actually does the inserts and filesystem writes. We
554 # first do a butler.put on each dataset, inserting it into the run for
555 # its calibDate. We remember those refs and group them by timespan, so
556 # we can vectorize the certify calls as much as possible.
557 refsByTimespan = defaultdict(list)
558 with butler.transaction():
559 for calib, dataId, run, timespan in datasetRecords:
560 refsByTimespan[timespan].append(butler.put(calib, datasetType, dataId, run=run))
561 for timespan, refs in refsByTimespan.items():
562 butler.registry.certify(collection, refs, timespan)
565def makeExposureRecordFromObsInfo(
566 obsInfo: ObservationInfo, universe: DimensionUniverse, **kwargs: Any
567) -> DimensionRecord:
568 """Construct an exposure DimensionRecord from
569 `astro_metadata_translator.ObservationInfo`.
571 Parameters
572 ----------
573 obsInfo : `astro_metadata_translator.ObservationInfo`
574 A `~astro_metadata_translator.ObservationInfo` object corresponding to
575 the exposure.
576 universe : `DimensionUniverse`
577 Set of all known dimensions.
578 **kwargs
579 Additional field values for this record.
581 Returns
582 -------
583 record : `DimensionRecord`
584 A record containing exposure metadata, suitable for insertion into
585 a `Registry`.
586 """
587 dimension = universe["exposure"]
589 # Some registries support additional items.
590 supported = {meta.name for meta in dimension.metadata}
592 ra, dec, sky_angle, azimuth, zenith_angle = (None, None, None, None, None)
593 if obsInfo.tracking_radec is not None:
594 icrs = obsInfo.tracking_radec.icrs
595 ra = icrs.ra.degree
596 dec = icrs.dec.degree
597 if obsInfo.boresight_rotation_coord == "sky":
598 sky_angle = obsInfo.boresight_rotation_angle.degree
599 if obsInfo.altaz_begin is not None:
600 zenith_angle = obsInfo.altaz_begin.zen.degree
601 azimuth = obsInfo.altaz_begin.az.degree
603 extras: Dict[str, Any] = {}
604 for meta_key, info_key in (
605 ("has_simulated", "has_simulated_content"),
606 ("seq_start", "group_counter_start"),
607 ("seq_end", "group_counter_end"),
608 ):
609 if meta_key in supported:
610 extras[meta_key] = getattr(obsInfo, info_key)
612 if (k := "azimuth") in supported:
613 extras[k] = azimuth
615 return dimension.RecordClass(
616 instrument=obsInfo.instrument,
617 id=obsInfo.exposure_id,
618 obs_id=obsInfo.observation_id,
619 group_name=obsInfo.exposure_group,
620 group_id=obsInfo.visit_id,
621 datetime_begin=obsInfo.datetime_begin,
622 datetime_end=obsInfo.datetime_end,
623 exposure_time=obsInfo.exposure_time.to_value("s"),
624 # we are not mandating that dark_time be calculable
625 dark_time=obsInfo.dark_time.to_value("s") if obsInfo.dark_time is not None else None,
626 observation_type=obsInfo.observation_type,
627 observation_reason=obsInfo.observation_reason,
628 day_obs=obsInfo.observing_day,
629 seq_num=obsInfo.observation_counter,
630 physical_filter=obsInfo.physical_filter,
631 science_program=obsInfo.science_program,
632 target_name=obsInfo.object,
633 tracking_ra=ra,
634 tracking_dec=dec,
635 sky_angle=sky_angle,
636 zenith_angle=zenith_angle,
637 **extras,
638 **kwargs,
639 )
642def loadCamera(butler: Butler, dataId: DataId, *, collections: Any = None) -> Tuple[Camera, bool]:
643 """Attempt to load versioned camera geometry from a butler, but fall back
644 to obtaining a nominal camera from the `Instrument` class if that fails.
646 Parameters
647 ----------
648 butler : `lsst.daf.butler.Butler`
649 Butler instance to attempt to query for and load a ``camera`` dataset
650 from.
651 dataId : `dict` or `DataCoordinate`
652 Data ID that identifies at least the ``instrument`` and ``exposure``
653 dimensions.
654 collections : Any, optional
655 Collections to be searched, overriding ``self.butler.collections``.
656 Can be any of the types supported by the ``collections`` argument
657 to butler construction.
659 Returns
660 -------
661 camera : `lsst.afw.cameraGeom.Camera`
662 Camera object.
663 versioned : `bool`
664 If `True`, the camera was obtained from the butler and should represent
665 a versioned camera from a calibration repository. If `False`, no
666 camera datasets were found, and the returned camera was produced by
667 instantiating the appropriate `Instrument` class and calling
668 `Instrument.getCamera`.
670 Raises
671 ------
672 LookupError
673 Raised when ``dataId`` does not specify a valid data ID.
674 """
675 if collections is None:
676 collections = butler.collections
677 # Registry would do data ID expansion internally if we didn't do it first,
678 # but we might want an expanded data ID ourselves later, so we do it here
679 # to ensure it only happens once.
680 # This will also catch problems with the data ID not having keys we need.
681 try:
682 dataId = butler.registry.expandDataId(dataId, graph=butler.registry.dimensions["exposure"].graph)
683 except DataIdError as exc:
684 raise LookupError(str(exc)) from exc
685 try:
686 cameraRef = butler.get("camera", dataId=dataId, collections=collections)
687 return cameraRef, True
688 except LookupError:
689 pass
690 # We know an instrument data ID is a value, but MyPy doesn't.
691 instrument = Instrument.fromName(dataId["instrument"], butler.registry) # type: ignore
692 assert isinstance(instrument, Instrument) # for mypy
693 return instrument.getCamera(), False