Coverage for python/lsst/obs/base/_instrument.py: 23%
168 statements
« prev ^ index » next coverage.py v6.4.4, created at 2022-09-14 02:36 -0700
« prev ^ index » next coverage.py v6.4.4, created at 2022-09-14 02:36 -0700
1# This file is part of obs_base.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
22from __future__ import annotations
24__all__ = ("Instrument", "makeExposureRecordFromObsInfo", "loadCamera")
26import logging
27import os.path
28from abc import abstractmethod
29from collections import defaultdict
30from functools import lru_cache
31from typing import (
32 TYPE_CHECKING,
33 AbstractSet,
34 Any,
35 Dict,
36 FrozenSet,
37 Optional,
38 Sequence,
39 Set,
40 Tuple,
41 Type,
42 cast,
43)
45import astropy.time
46from lsst.afw.cameraGeom import Camera
47from lsst.daf.butler import (
48 Butler,
49 CollectionType,
50 DataCoordinate,
51 DataId,
52 DatasetType,
53 DimensionRecord,
54 DimensionUniverse,
55 Timespan,
56)
57from lsst.daf.butler.registry import DataIdError
58from lsst.pipe.base import Instrument as InstrumentBase
59from lsst.utils import getPackageDir
60from lsst.utils.introspection import get_full_type_name
62from ._read_curated_calibs import CuratedCalibration, read_all
64if TYPE_CHECKING: 64 ↛ 65line 64 didn't jump to line 65, because the condition on line 64 was never true
65 from astro_metadata_translator import ObservationInfo
66 from lsst.daf.butler import Registry
68 from .filters import FilterDefinitionCollection
70_LOG = logging.getLogger(__name__)
72# To be a standard text curated calibration means that we use a
73# standard definition for the corresponding DatasetType.
74StandardCuratedCalibrationDatasetTypes = {
75 "defects": {"dimensions": ("instrument", "detector"), "storageClass": "Defects"},
76 "qe_curve": {"dimensions": ("instrument", "detector"), "storageClass": "QECurve"},
77 "crosstalk": {"dimensions": ("instrument", "detector"), "storageClass": "CrosstalkCalib"},
78 "linearizer": {"dimensions": ("instrument", "detector"), "storageClass": "Linearizer"},
79 "bfk": {"dimensions": ("instrument", "detector"), "storageClass": "BrighterFatterKernel"},
80}
83class Instrument(InstrumentBase):
84 """Rubin-specified base for instrument-specific logic for the Gen3 Butler.
86 Parameters
87 ----------
88 collection_prefix : `str`, optional
89 Prefix for collection names to use instead of the intrument's own name.
90 This is primarily for use in simulated-data repositories, where the
91 instrument name may not be necessary and/or sufficient to distinguish
92 between collections.
94 Notes
95 -----
96 Concrete instrument subclasses must have the same construction signature as
97 the base class.
98 """
100 policyName: Optional[str] = None
101 """Instrument specific name to use when locating a policy or configuration
102 file in the file system."""
104 obsDataPackage: Optional[str] = None
105 """Name of the package containing the text curated calibration files.
106 Usually a obs _data package. If `None` no curated calibration files
107 will be read. (`str`)"""
109 standardCuratedDatasetTypes: AbstractSet[str] = frozenset(StandardCuratedCalibrationDatasetTypes)
110 """The dataset types expected to be obtained from the obsDataPackage.
112 These dataset types are all required to have standard definitions and
113 must be known to the base class. Clearing this list will prevent
114 any of these calibrations from being stored. If a dataset type is not
115 known to a specific instrument it can still be included in this list
116 since the data package is the source of truth. (`set` of `str`)
117 """
119 additionalCuratedDatasetTypes: AbstractSet[str] = frozenset()
120 """Curated dataset types specific to this particular instrument that do
121 not follow the standard organization found in obs data packages.
123 These are the instrument-specific dataset types written by
124 `writeAdditionalCuratedCalibrations` in addition to the calibrations
125 found in obs data packages that follow the standard scheme.
126 (`set` of `str`)"""
128 @property
129 @abstractmethod
130 def filterDefinitions(self) -> FilterDefinitionCollection:
131 """`~lsst.obs.base.FilterDefinitionCollection`, defining the filters
132 for this instrument.
133 """
134 raise NotImplementedError()
136 def __init__(self, collection_prefix: Optional[str] = None):
137 super().__init__(collection_prefix=collection_prefix)
139 @classmethod
140 @lru_cache()
141 def getCuratedCalibrationNames(cls) -> FrozenSet[str]:
142 """Return the names of all the curated calibration dataset types.
144 Returns
145 -------
146 names : `frozenset` of `str`
147 The dataset type names of all curated calibrations. This will
148 include the standard curated calibrations even if the particular
149 instrument does not support them.
151 Notes
152 -----
153 The returned list does not indicate whether a particular dataset
154 is present in the Butler repository, simply that these are the
155 dataset types that are handled by ``writeCuratedCalibrations``.
156 """
158 # Camera is a special dataset type that is also handled as a
159 # curated calibration.
160 curated = {"camera"}
162 # Make a cursory attempt to filter out curated dataset types
163 # that are not present for this instrument
164 for datasetTypeName in cls.standardCuratedDatasetTypes:
165 calibPath = cls._getSpecificCuratedCalibrationPath(datasetTypeName)
166 if calibPath is not None:
167 curated.add(datasetTypeName)
169 curated.update(cls.additionalCuratedDatasetTypes)
170 return frozenset(curated)
172 @abstractmethod
173 def getCamera(self) -> Camera:
174 """Retrieve the cameraGeom representation of this instrument.
176 This is a temporary API that should go away once ``obs`` packages have
177 a standardized approach to writing versioned cameras to a Gen3 repo.
178 """
179 raise NotImplementedError()
181 @classmethod
182 @lru_cache()
183 def getObsDataPackageDir(cls) -> Optional[str]:
184 """The root of the obs data package that provides specializations for
185 this instrument.
187 returns
188 -------
189 dir : `str` or `None`
190 The root of the relevant obs data package, or `None` if this
191 instrument does not have one.
192 """
193 if cls.obsDataPackage is None:
194 return None
195 return getPackageDir(cls.obsDataPackage)
197 def _registerFilters(self, registry: Registry, update: bool = False) -> None:
198 """Register the physical and abstract filter Dimension relationships.
199 This should be called in the `register` implementation, within
200 a transaction context manager block.
202 Parameters
203 ----------
204 registry : `lsst.daf.butler.core.Registry`
205 The registry to add dimensions to.
206 update : `bool`, optional
207 If `True` (`False` is default), update existing records if they
208 differ from the new ones.
209 """
210 for filter in self.filterDefinitions:
211 # fix for undefined abstract filters causing trouble in the
212 # registry:
213 if filter.band is None:
214 band = filter.physical_filter
215 else:
216 band = filter.band
218 registry.syncDimensionData(
219 "physical_filter",
220 {"instrument": self.getName(), "name": filter.physical_filter, "band": band},
221 update=update,
222 )
224 def writeCuratedCalibrations(
225 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = ()
226 ) -> None:
227 """Write human-curated calibration Datasets to the given Butler with
228 the appropriate validity ranges.
230 Parameters
231 ----------
232 butler : `lsst.daf.butler.Butler`
233 Butler to use to store these calibrations.
234 collection : `str`, optional
235 Name to use for the calibration collection that associates all
236 datasets with a validity range. If this collection already exists,
237 it must be a `~CollectionType.CALIBRATION` collection, and it must
238 not have any datasets that would conflict with those inserted by
239 this method. If `None`, a collection name is worked out
240 automatically from the instrument name and other metadata by
241 calling ``makeCalibrationCollectionName``, but this
242 default name may not work well for long-lived repositories unless
243 ``labels`` is also provided (and changed every time curated
244 calibrations are ingested).
245 labels : `Sequence` [ `str` ], optional
246 Extra strings to include in collection names, after concatenating
247 them with the standard collection name delimeter. If provided,
248 these are inserted into the names of the `~CollectionType.RUN`
249 collections that datasets are inserted directly into, as well the
250 `~CollectionType.CALIBRATION` collection if it is generated
251 automatically (i.e. if ``collection is None``). Usually this is
252 just the name of the ticket on which the calibration collection is
253 being created.
255 Notes
256 -----
257 Expected to be called from subclasses. The base method calls
258 ``writeCameraGeom``, ``writeStandardTextCuratedCalibrations``,
259 and ``writeAdditionalCuratdCalibrations``.
260 """
261 # Delegate registration of collections (and creating names for them)
262 # to other methods so they can be called independently with the same
263 # preconditions. Collection registration is idempotent, so this is
264 # safe, and while it adds a bit of overhead, as long as it's one
265 # registration attempt per method (not per dataset or dataset type),
266 # that's negligible.
267 self.writeCameraGeom(butler, collection, labels=labels)
268 self.writeStandardTextCuratedCalibrations(butler, collection, labels=labels)
269 self.writeAdditionalCuratedCalibrations(butler, collection, labels=labels)
271 def writeAdditionalCuratedCalibrations(
272 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = ()
273 ) -> None:
274 """Write additional curated calibrations that might be instrument
275 specific and are not part of the standard set.
277 Default implementation does nothing.
279 Parameters
280 ----------
281 butler : `lsst.daf.butler.Butler`
282 Butler to use to store these calibrations.
283 collection : `str`, optional
284 Name to use for the calibration collection that associates all
285 datasets with a validity range. If this collection already exists,
286 it must be a `~CollectionType.CALIBRATION` collection, and it must
287 not have any datasets that would conflict with those inserted by
288 this method. If `None`, a collection name is worked out
289 automatically from the instrument name and other metadata by
290 calling ``makeCalibrationCollectionName``, but this
291 default name may not work well for long-lived repositories unless
292 ``labels`` is also provided (and changed every time curated
293 calibrations are ingested).
294 labels : `Sequence` [ `str` ], optional
295 Extra strings to include in collection names, after concatenating
296 them with the standard collection name delimeter. If provided,
297 these are inserted into the names of the `~CollectionType.RUN`
298 collections that datasets are inserted directly into, as well the
299 `~CollectionType.CALIBRATION` collection if it is generated
300 automatically (i.e. if ``collection is None``). Usually this is
301 just the name of the ticket on which the calibration collection is
302 being created.
303 """
304 return
306 def writeCameraGeom(
307 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = ()
308 ) -> None:
309 """Write the default camera geometry to the butler repository and
310 associate it with the appropriate validity range in a calibration
311 collection.
313 Parameters
314 ----------
315 butler : `lsst.daf.butler.Butler`
316 Butler to use to store these calibrations.
317 collection : `str`, optional
318 Name to use for the calibration collection that associates all
319 datasets with a validity range. If this collection already exists,
320 it must be a `~CollectionType.CALIBRATION` collection, and it must
321 not have any datasets that would conflict with those inserted by
322 this method. If `None`, a collection name is worked out
323 automatically from the instrument name and other metadata by
324 calling ``makeCalibrationCollectionName``, but this
325 default name may not work well for long-lived repositories unless
326 ``labels`` is also provided (and changed every time curated
327 calibrations are ingested).
328 labels : `Sequence` [ `str` ], optional
329 Extra strings to include in collection names, after concatenating
330 them with the standard collection name delimeter. If provided,
331 these are inserted into the names of the `~CollectionType.RUN`
332 collections that datasets are inserted directly into, as well the
333 `~CollectionType.CALIBRATION` collection if it is generated
334 automatically (i.e. if ``collection is None``). Usually this is
335 just the name of the ticket on which the calibration collection is
336 being created.
337 """
338 if collection is None:
339 collection = self.makeCalibrationCollectionName(*labels)
340 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION)
341 run = self.makeUnboundedCalibrationRunName(*labels)
342 butler.registry.registerRun(run)
343 datasetType = DatasetType(
344 "camera", ("instrument",), "Camera", isCalibration=True, universe=butler.registry.dimensions
345 )
346 butler.registry.registerDatasetType(datasetType)
347 camera = self.getCamera()
348 ref = butler.put(camera, datasetType, {"instrument": self.getName()}, run=run)
349 butler.registry.certify(collection, [ref], Timespan(begin=None, end=None))
351 def writeStandardTextCuratedCalibrations(
352 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = ()
353 ) -> None:
354 """Write the set of standardized curated text calibrations to
355 the repository.
357 Parameters
358 ----------
359 butler : `lsst.daf.butler.Butler`
360 Butler to receive these calibration datasets.
361 collection : `str`, optional
362 Name to use for the calibration collection that associates all
363 datasets with a validity range. If this collection already exists,
364 it must be a `~CollectionType.CALIBRATION` collection, and it must
365 not have any datasets that would conflict with those inserted by
366 this method. If `None`, a collection name is worked out
367 automatically from the instrument name and other metadata by
368 calling ``makeCalibrationCollectionName``, but this
369 default name may not work well for long-lived repositories unless
370 ``labels`` is also provided (and changed every time curated
371 calibrations are ingested).
372 labels : `Sequence` [ `str` ], optional
373 Extra strings to include in collection names, after concatenating
374 them with the standard collection name delimeter. If provided,
375 these are inserted into the names of the `~CollectionType.RUN`
376 collections that datasets are inserted directly into, as well the
377 `~CollectionType.CALIBRATION` collection if it is generated
378 automatically (i.e. if ``collection is None``). Usually this is
379 just the name of the ticket on which the calibration collection is
380 being created.
381 """
382 if collection is None:
383 collection = self.makeCalibrationCollectionName(*labels)
384 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION)
385 runs: Set[str] = set()
386 for datasetTypeName in self.standardCuratedDatasetTypes:
387 # We need to define the dataset types.
388 if datasetTypeName not in StandardCuratedCalibrationDatasetTypes:
389 raise ValueError(
390 f"DatasetType {datasetTypeName} not in understood list"
391 f" [{'.'.join(StandardCuratedCalibrationDatasetTypes)}]"
392 )
393 definition = StandardCuratedCalibrationDatasetTypes[datasetTypeName]
394 datasetType = DatasetType(
395 datasetTypeName,
396 universe=butler.registry.dimensions,
397 isCalibration=True,
398 # MyPy should be able to figure out that the kwargs here have
399 # the right types, but it can't.
400 **definition, # type: ignore
401 )
402 self._writeSpecificCuratedCalibrationDatasets(
403 butler, datasetType, collection, runs=runs, labels=labels
404 )
406 @classmethod
407 def _getSpecificCuratedCalibrationPath(cls, datasetTypeName: str) -> Optional[str]:
408 """Return the path of the curated calibration directory.
410 Parameters
411 ----------
412 datasetTypeName : `str`
413 The name of the standard dataset type to find.
415 Returns
416 -------
417 path : `str` or `None`
418 The path to the standard curated data directory. `None` if the
419 dataset type is not found or the obs data package is not
420 available.
421 """
422 data_package_dir = cls.getObsDataPackageDir()
423 if data_package_dir is None:
424 # if there is no data package then there can't be datasets
425 return None
427 if cls.policyName is None:
428 raise TypeError(f"Instrument {cls.getName()} has an obs data package but no policy name.")
430 calibPath = os.path.join(data_package_dir, cls.policyName, datasetTypeName)
432 if os.path.exists(calibPath):
433 return calibPath
435 return None
437 def _writeSpecificCuratedCalibrationDatasets(
438 self, butler: Butler, datasetType: DatasetType, collection: str, runs: Set[str], labels: Sequence[str]
439 ) -> None:
440 """Write standardized curated calibration datasets for this specific
441 dataset type from an obs data package.
443 Parameters
444 ----------
445 butler : `lsst.daf.butler.Butler`
446 Gen3 butler in which to put the calibrations.
447 datasetType : `lsst.daf.butler.DatasetType`
448 Dataset type to be put.
449 collection : `str`
450 Name of the `~CollectionType.CALIBRATION` collection that
451 associates all datasets with validity ranges. Must have been
452 registered prior to this call.
453 runs : `set` [ `str` ]
454 Names of runs that have already been registered by previous calls
455 and need not be registered again. Should be updated by this
456 method as new runs are registered.
457 labels : `Sequence` [ `str` ]
458 Extra strings to include in run names when creating them from
459 ``CALIBDATE`` metadata, via calls to `makeCuratedCalibrationName`.
460 Usually this is the name of the ticket on which the calibration
461 collection is being created.
463 Notes
464 -----
465 This method scans the location defined in the ``obsDataPackageDir``
466 class attribute for curated calibrations corresponding to the
467 supplied dataset type. The directory name in the data package must
468 match the name of the dataset type. They are assumed to use the
469 standard layout and can be read by
470 `~lsst.obs.base._read_curated_calibs.read_all` and provide standard
471 metadata.
472 """
473 calibPath = self._getSpecificCuratedCalibrationPath(datasetType.name)
474 if calibPath is None:
475 return
477 # Register the dataset type
478 butler.registry.registerDatasetType(datasetType)
479 _LOG.info("Processing %r curated calibration", datasetType.name)
481 # The class to use to read these calibrations comes from the storage
482 # class.
483 calib_class = datasetType.storageClass.pytype
484 if not hasattr(calib_class, "readText"):
485 raise ValueError(
486 f"Curated calibration {datasetType.name} is using a class "
487 f"{get_full_type_name(calib_class)} that lacks a readText class method"
488 )
489 calib_class = cast(Type[CuratedCalibration], calib_class)
491 # Read calibs, registering a new run for each CALIBDATE as needed.
492 # We try to avoid registering runs multiple times as an optimization
493 # by putting them in the ``runs`` set that was passed in.
494 camera = self.getCamera()
495 calibsDict = read_all(calibPath, camera, calib_class)[0] # second return is calib type
496 datasetRecords = []
497 for det in calibsDict:
498 times = sorted([k for k in calibsDict[det]])
499 calibs = [calibsDict[det][time] for time in times]
500 atimes: list[Optional[astropy.time.Time]] = [
501 astropy.time.Time(t, format="datetime", scale="utc") for t in times
502 ]
503 atimes += [None]
504 for calib, beginTime, endTime in zip(calibs, atimes[:-1], atimes[1:]):
505 md = calib.getMetadata()
506 run = self.makeCuratedCalibrationRunName(md["CALIBDATE"], *labels)
507 if run not in runs:
508 butler.registry.registerRun(run)
509 runs.add(run)
510 dataId = DataCoordinate.standardize(
511 universe=butler.registry.dimensions,
512 instrument=self.getName(),
513 detector=md["DETECTOR"],
514 )
515 datasetRecords.append((calib, dataId, run, Timespan(beginTime, endTime)))
517 # Second loop actually does the inserts and filesystem writes. We
518 # first do a butler.put on each dataset, inserting it into the run for
519 # its calibDate. We remember those refs and group them by timespan, so
520 # we can vectorize the certify calls as much as possible.
521 refsByTimespan = defaultdict(list)
522 with butler.transaction():
523 for calib, dataId, run, timespan in datasetRecords:
524 refsByTimespan[timespan].append(butler.put(calib, datasetType, dataId, run=run))
525 for timespan, refs in refsByTimespan.items():
526 butler.registry.certify(collection, refs, timespan)
529def makeExposureRecordFromObsInfo(
530 obsInfo: ObservationInfo, universe: DimensionUniverse, **kwargs: Any
531) -> DimensionRecord:
532 """Construct an exposure DimensionRecord from
533 `astro_metadata_translator.ObservationInfo`.
535 Parameters
536 ----------
537 obsInfo : `astro_metadata_translator.ObservationInfo`
538 A `~astro_metadata_translator.ObservationInfo` object corresponding to
539 the exposure.
540 universe : `DimensionUniverse`
541 Set of all known dimensions.
542 **kwargs
543 Additional field values for this record.
545 Returns
546 -------
547 record : `DimensionRecord`
548 A record containing exposure metadata, suitable for insertion into
549 a `Registry`.
550 """
551 dimension = universe["exposure"]
553 # Some registries support additional items.
554 supported = {meta.name for meta in dimension.metadata}
556 ra, dec, sky_angle, azimuth, zenith_angle = (None, None, None, None, None)
557 if obsInfo.tracking_radec is not None:
558 icrs = obsInfo.tracking_radec.icrs
559 ra = icrs.ra.degree
560 dec = icrs.dec.degree
561 if obsInfo.boresight_rotation_coord == "sky":
562 sky_angle = obsInfo.boresight_rotation_angle.degree
563 if obsInfo.altaz_begin is not None:
564 zenith_angle = obsInfo.altaz_begin.zen.degree
565 azimuth = obsInfo.altaz_begin.az.degree
567 extras: Dict[str, Any] = {}
568 for meta_key, info_key in (
569 ("has_simulated", "has_simulated_content"),
570 ("seq_start", "group_counter_start"),
571 ("seq_end", "group_counter_end"),
572 ):
573 if meta_key in supported:
574 extras[meta_key] = getattr(obsInfo, info_key)
576 if (k := "azimuth") in supported:
577 extras[k] = azimuth
579 return dimension.RecordClass(
580 instrument=obsInfo.instrument,
581 id=obsInfo.exposure_id,
582 obs_id=obsInfo.observation_id,
583 group_name=obsInfo.exposure_group,
584 group_id=obsInfo.visit_id,
585 datetime_begin=obsInfo.datetime_begin,
586 datetime_end=obsInfo.datetime_end,
587 exposure_time=obsInfo.exposure_time.to_value("s"),
588 # we are not mandating that dark_time be calculable
589 dark_time=obsInfo.dark_time.to_value("s") if obsInfo.dark_time is not None else None,
590 observation_type=obsInfo.observation_type,
591 observation_reason=obsInfo.observation_reason,
592 day_obs=obsInfo.observing_day,
593 seq_num=obsInfo.observation_counter,
594 physical_filter=obsInfo.physical_filter,
595 science_program=obsInfo.science_program,
596 target_name=obsInfo.object,
597 tracking_ra=ra,
598 tracking_dec=dec,
599 sky_angle=sky_angle,
600 zenith_angle=zenith_angle,
601 **extras,
602 **kwargs,
603 )
606def loadCamera(butler: Butler, dataId: DataId, *, collections: Any = None) -> Tuple[Camera, bool]:
607 """Attempt to load versioned camera geometry from a butler, but fall back
608 to obtaining a nominal camera from the `Instrument` class if that fails.
610 Parameters
611 ----------
612 butler : `lsst.daf.butler.Butler`
613 Butler instance to attempt to query for and load a ``camera`` dataset
614 from.
615 dataId : `dict` or `DataCoordinate`
616 Data ID that identifies at least the ``instrument`` and ``exposure``
617 dimensions.
618 collections : Any, optional
619 Collections to be searched, overriding ``self.butler.collections``.
620 Can be any of the types supported by the ``collections`` argument
621 to butler construction.
623 Returns
624 -------
625 camera : `lsst.afw.cameraGeom.Camera`
626 Camera object.
627 versioned : `bool`
628 If `True`, the camera was obtained from the butler and should represent
629 a versioned camera from a calibration repository. If `False`, no
630 camera datasets were found, and the returned camera was produced by
631 instantiating the appropriate `Instrument` class and calling
632 `Instrument.getCamera`.
634 Raises
635 ------
636 LookupError
637 Raised when ``dataId`` does not specify a valid data ID.
638 """
639 if collections is None:
640 collections = butler.collections
641 # Registry would do data ID expansion internally if we didn't do it first,
642 # but we might want an expanded data ID ourselves later, so we do it here
643 # to ensure it only happens once.
644 # This will also catch problems with the data ID not having keys we need.
645 try:
646 dataId = butler.registry.expandDataId(dataId, graph=butler.registry.dimensions["exposure"].graph)
647 except DataIdError as exc:
648 raise LookupError(str(exc)) from exc
649 try:
650 cameraRef = butler.get("camera", dataId=dataId, collections=collections)
651 return cameraRef, True
652 except LookupError:
653 pass
654 # We know an instrument data ID is a value, but MyPy doesn't.
655 instrument = Instrument.fromName(dataId["instrument"], butler.registry) # type: ignore
656 assert isinstance(instrument, Instrument) # for mypy
657 return instrument.getCamera(), False