Coverage for python/lsst/obs/base/_instrument.py: 22%
164 statements
« prev ^ index » next coverage.py v6.5.0, created at 2022-11-06 13:04 -0800
« prev ^ index » next coverage.py v6.5.0, created at 2022-11-06 13:04 -0800
1# This file is part of obs_base.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
22from __future__ import annotations
24__all__ = ("Instrument", "makeExposureRecordFromObsInfo", "loadCamera")
26import os.path
27from abc import abstractmethod
28from collections import defaultdict
29from functools import lru_cache
30from typing import TYPE_CHECKING, AbstractSet, Any, Dict, FrozenSet, Optional, Sequence, Set, Tuple
32import astropy.time
33from lsst.afw.cameraGeom import Camera
34from lsst.daf.butler import (
35 Butler,
36 CollectionType,
37 DataCoordinate,
38 DataId,
39 DatasetType,
40 DimensionRecord,
41 DimensionUniverse,
42 Timespan,
43)
44from lsst.daf.butler.registry import DataIdError
45from lsst.pipe.base import Instrument as InstrumentBase
46from lsst.utils import getPackageDir
48if TYPE_CHECKING: 48 ↛ 49line 48 didn't jump to line 49, because the condition on line 48 was never true
49 from astro_metadata_translator import ObservationInfo
50 from lsst.daf.butler import Registry
52 from .filters import FilterDefinitionCollection
53 from .gen2to3 import TranslatorFactory
55# To be a standard text curated calibration means that we use a
56# standard definition for the corresponding DatasetType.
57StandardCuratedCalibrationDatasetTypes = {
58 "defects": {"dimensions": ("instrument", "detector"), "storageClass": "Defects"},
59 "qe_curve": {"dimensions": ("instrument", "detector"), "storageClass": "QECurve"},
60 "crosstalk": {"dimensions": ("instrument", "detector"), "storageClass": "CrosstalkCalib"},
61 "linearizer": {"dimensions": ("instrument", "detector"), "storageClass": "Linearizer"},
62 "bfk": {"dimensions": ("instrument", "detector"), "storageClass": "BrighterFatterKernel"},
63}
66class Instrument(InstrumentBase):
67 """Rubin-specified base for instrument-specific logic for the Gen3 Butler.
69 Parameters
70 ----------
71 collection_prefix : `str`, optional
72 Prefix for collection names to use instead of the intrument's own name.
73 This is primarily for use in simulated-data repositories, where the
74 instrument name may not be necessary and/or sufficient to distinguish
75 between collections.
77 Notes
78 -----
79 Concrete instrument subclasses must have the same construction signature as
80 the base class.
81 """
83 policyName: Optional[str] = None
84 """Instrument specific name to use when locating a policy or configuration
85 file in the file system."""
87 obsDataPackage: Optional[str] = None
88 """Name of the package containing the text curated calibration files.
89 Usually a obs _data package. If `None` no curated calibration files
90 will be read. (`str`)"""
92 standardCuratedDatasetTypes: AbstractSet[str] = frozenset(StandardCuratedCalibrationDatasetTypes)
93 """The dataset types expected to be obtained from the obsDataPackage.
95 These dataset types are all required to have standard definitions and
96 must be known to the base class. Clearing this list will prevent
97 any of these calibrations from being stored. If a dataset type is not
98 known to a specific instrument it can still be included in this list
99 since the data package is the source of truth. (`set` of `str`)
100 """
102 additionalCuratedDatasetTypes: AbstractSet[str] = frozenset()
103 """Curated dataset types specific to this particular instrument that do
104 not follow the standard organization found in obs data packages.
106 These are the instrument-specific dataset types written by
107 `writeAdditionalCuratedCalibrations` in addition to the calibrations
108 found in obs data packages that follow the standard scheme.
109 (`set` of `str`)"""
111 @property
112 @abstractmethod
113 def filterDefinitions(self) -> FilterDefinitionCollection:
114 """`~lsst.obs.base.FilterDefinitionCollection`, defining the filters
115 for this instrument.
116 """
117 raise NotImplementedError()
119 def __init__(self, collection_prefix: Optional[str] = None):
120 super().__init__(collection_prefix=collection_prefix)
122 @classmethod
123 @lru_cache()
124 def getCuratedCalibrationNames(cls) -> FrozenSet[str]:
125 """Return the names of all the curated calibration dataset types.
127 Returns
128 -------
129 names : `frozenset` of `str`
130 The dataset type names of all curated calibrations. This will
131 include the standard curated calibrations even if the particular
132 instrument does not support them.
134 Notes
135 -----
136 The returned list does not indicate whether a particular dataset
137 is present in the Butler repository, simply that these are the
138 dataset types that are handled by ``writeCuratedCalibrations``.
139 """
141 # Camera is a special dataset type that is also handled as a
142 # curated calibration.
143 curated = {"camera"}
145 # Make a cursory attempt to filter out curated dataset types
146 # that are not present for this instrument
147 for datasetTypeName in cls.standardCuratedDatasetTypes:
148 calibPath = cls._getSpecificCuratedCalibrationPath(datasetTypeName)
149 if calibPath is not None:
150 curated.add(datasetTypeName)
152 curated.update(cls.additionalCuratedDatasetTypes)
153 return frozenset(curated)
155 @abstractmethod
156 def getCamera(self) -> Camera:
157 """Retrieve the cameraGeom representation of this instrument.
159 This is a temporary API that should go away once ``obs`` packages have
160 a standardized approach to writing versioned cameras to a Gen3 repo.
161 """
162 raise NotImplementedError()
164 @classmethod
165 @lru_cache()
166 def getObsDataPackageDir(cls) -> Optional[str]:
167 """The root of the obs data package that provides specializations for
168 this instrument.
170 returns
171 -------
172 dir : `str` or `None`
173 The root of the relevant obs data package, or `None` if this
174 instrument does not have one.
175 """
176 if cls.obsDataPackage is None:
177 return None
178 return getPackageDir(cls.obsDataPackage)
180 def _registerFilters(self, registry: Registry, update: bool = False) -> None:
181 """Register the physical and abstract filter Dimension relationships.
182 This should be called in the `register` implementation, within
183 a transaction context manager block.
185 Parameters
186 ----------
187 registry : `lsst.daf.butler.core.Registry`
188 The registry to add dimensions to.
189 update : `bool`, optional
190 If `True` (`False` is default), update existing records if they
191 differ from the new ones.
192 """
193 for filter in self.filterDefinitions:
194 # fix for undefined abstract filters causing trouble in the
195 # registry:
196 if filter.band is None:
197 band = filter.physical_filter
198 else:
199 band = filter.band
201 registry.syncDimensionData(
202 "physical_filter",
203 {"instrument": self.getName(), "name": filter.physical_filter, "band": band},
204 update=update,
205 )
207 def writeCuratedCalibrations(
208 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = ()
209 ) -> None:
210 """Write human-curated calibration Datasets to the given Butler with
211 the appropriate validity ranges.
213 Parameters
214 ----------
215 butler : `lsst.daf.butler.Butler`
216 Butler to use to store these calibrations.
217 collection : `str`, optional
218 Name to use for the calibration collection that associates all
219 datasets with a validity range. If this collection already exists,
220 it must be a `~CollectionType.CALIBRATION` collection, and it must
221 not have any datasets that would conflict with those inserted by
222 this method. If `None`, a collection name is worked out
223 automatically from the instrument name and other metadata by
224 calling ``makeCalibrationCollectionName``, but this
225 default name may not work well for long-lived repositories unless
226 ``labels`` is also provided (and changed every time curated
227 calibrations are ingested).
228 labels : `Sequence` [ `str` ], optional
229 Extra strings to include in collection names, after concatenating
230 them with the standard collection name delimeter. If provided,
231 these are inserted into the names of the `~CollectionType.RUN`
232 collections that datasets are inserted directly into, as well the
233 `~CollectionType.CALIBRATION` collection if it is generated
234 automatically (i.e. if ``collection is None``). Usually this is
235 just the name of the ticket on which the calibration collection is
236 being created.
238 Notes
239 -----
240 Expected to be called from subclasses. The base method calls
241 ``writeCameraGeom``, ``writeStandardTextCuratedCalibrations``,
242 and ``writeAdditionalCuratdCalibrations``.
243 """
244 # Delegate registration of collections (and creating names for them)
245 # to other methods so they can be called independently with the same
246 # preconditions. Collection registration is idempotent, so this is
247 # safe, and while it adds a bit of overhead, as long as it's one
248 # registration attempt per method (not per dataset or dataset type),
249 # that's negligible.
250 self.writeCameraGeom(butler, collection, labels=labels)
251 self.writeStandardTextCuratedCalibrations(butler, collection, labels=labels)
252 self.writeAdditionalCuratedCalibrations(butler, collection, labels=labels)
254 def writeAdditionalCuratedCalibrations(
255 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = ()
256 ) -> None:
257 """Write additional curated calibrations that might be instrument
258 specific and are not part of the standard set.
260 Default implementation does nothing.
262 Parameters
263 ----------
264 butler : `lsst.daf.butler.Butler`
265 Butler to use to store these calibrations.
266 collection : `str`, optional
267 Name to use for the calibration collection that associates all
268 datasets with a validity range. If this collection already exists,
269 it must be a `~CollectionType.CALIBRATION` collection, and it must
270 not have any datasets that would conflict with those inserted by
271 this method. If `None`, a collection name is worked out
272 automatically from the instrument name and other metadata by
273 calling ``makeCalibrationCollectionName``, but this
274 default name may not work well for long-lived repositories unless
275 ``labels`` is also provided (and changed every time curated
276 calibrations are ingested).
277 labels : `Sequence` [ `str` ], optional
278 Extra strings to include in collection names, after concatenating
279 them with the standard collection name delimeter. If provided,
280 these are inserted into the names of the `~CollectionType.RUN`
281 collections that datasets are inserted directly into, as well the
282 `~CollectionType.CALIBRATION` collection if it is generated
283 automatically (i.e. if ``collection is None``). Usually this is
284 just the name of the ticket on which the calibration collection is
285 being created.
286 """
287 return
289 def writeCameraGeom(
290 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = ()
291 ) -> None:
292 """Write the default camera geometry to the butler repository and
293 associate it with the appropriate validity range in a calibration
294 collection.
296 Parameters
297 ----------
298 butler : `lsst.daf.butler.Butler`
299 Butler to use to store these calibrations.
300 collection : `str`, optional
301 Name to use for the calibration collection that associates all
302 datasets with a validity range. If this collection already exists,
303 it must be a `~CollectionType.CALIBRATION` collection, and it must
304 not have any datasets that would conflict with those inserted by
305 this method. If `None`, a collection name is worked out
306 automatically from the instrument name and other metadata by
307 calling ``makeCalibrationCollectionName``, but this
308 default name may not work well for long-lived repositories unless
309 ``labels`` is also provided (and changed every time curated
310 calibrations are ingested).
311 labels : `Sequence` [ `str` ], optional
312 Extra strings to include in collection names, after concatenating
313 them with the standard collection name delimeter. If provided,
314 these are inserted into the names of the `~CollectionType.RUN`
315 collections that datasets are inserted directly into, as well the
316 `~CollectionType.CALIBRATION` collection if it is generated
317 automatically (i.e. if ``collection is None``). Usually this is
318 just the name of the ticket on which the calibration collection is
319 being created.
320 """
321 if collection is None:
322 collection = self.makeCalibrationCollectionName(*labels)
323 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION)
324 run = self.makeUnboundedCalibrationRunName(*labels)
325 butler.registry.registerRun(run)
326 datasetType = DatasetType(
327 "camera", ("instrument",), "Camera", isCalibration=True, universe=butler.registry.dimensions
328 )
329 butler.registry.registerDatasetType(datasetType)
330 camera = self.getCamera()
331 ref = butler.put(camera, datasetType, {"instrument": self.getName()}, run=run)
332 butler.registry.certify(collection, [ref], Timespan(begin=None, end=None))
334 def writeStandardTextCuratedCalibrations(
335 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = ()
336 ) -> None:
337 """Write the set of standardized curated text calibrations to
338 the repository.
340 Parameters
341 ----------
342 butler : `lsst.daf.butler.Butler`
343 Butler to receive these calibration datasets.
344 collection : `str`, optional
345 Name to use for the calibration collection that associates all
346 datasets with a validity range. If this collection already exists,
347 it must be a `~CollectionType.CALIBRATION` collection, and it must
348 not have any datasets that would conflict with those inserted by
349 this method. If `None`, a collection name is worked out
350 automatically from the instrument name and other metadata by
351 calling ``makeCalibrationCollectionName``, but this
352 default name may not work well for long-lived repositories unless
353 ``labels`` is also provided (and changed every time curated
354 calibrations are ingested).
355 labels : `Sequence` [ `str` ], optional
356 Extra strings to include in collection names, after concatenating
357 them with the standard collection name delimeter. If provided,
358 these are inserted into the names of the `~CollectionType.RUN`
359 collections that datasets are inserted directly into, as well the
360 `~CollectionType.CALIBRATION` collection if it is generated
361 automatically (i.e. if ``collection is None``). Usually this is
362 just the name of the ticket on which the calibration collection is
363 being created.
364 """
365 if collection is None:
366 collection = self.makeCalibrationCollectionName(*labels)
367 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION)
368 runs: Set[str] = set()
369 for datasetTypeName in self.standardCuratedDatasetTypes:
370 # We need to define the dataset types.
371 if datasetTypeName not in StandardCuratedCalibrationDatasetTypes:
372 raise ValueError(
373 f"DatasetType {datasetTypeName} not in understood list"
374 f" [{'.'.join(StandardCuratedCalibrationDatasetTypes)}]"
375 )
376 definition = StandardCuratedCalibrationDatasetTypes[datasetTypeName]
377 datasetType = DatasetType(
378 datasetTypeName,
379 universe=butler.registry.dimensions,
380 isCalibration=True,
381 # MyPy should be able to figure out that the kwargs here have
382 # the right types, but it can't.
383 **definition, # type: ignore
384 )
385 self._writeSpecificCuratedCalibrationDatasets(
386 butler, datasetType, collection, runs=runs, labels=labels
387 )
389 @classmethod
390 def _getSpecificCuratedCalibrationPath(cls, datasetTypeName: str) -> Optional[str]:
391 """Return the path of the curated calibration directory.
393 Parameters
394 ----------
395 datasetTypeName : `str`
396 The name of the standard dataset type to find.
398 Returns
399 -------
400 path : `str` or `None`
401 The path to the standard curated data directory. `None` if the
402 dataset type is not found or the obs data package is not
403 available.
404 """
405 data_package_dir = cls.getObsDataPackageDir()
406 if data_package_dir is None:
407 # if there is no data package then there can't be datasets
408 return None
410 if cls.policyName is None:
411 raise TypeError(f"Instrument {cls.getName()} has an obs data package but no policy name.")
413 calibPath = os.path.join(data_package_dir, cls.policyName, datasetTypeName)
415 if os.path.exists(calibPath):
416 return calibPath
418 return None
420 def _writeSpecificCuratedCalibrationDatasets(
421 self, butler: Butler, datasetType: DatasetType, collection: str, runs: Set[str], labels: Sequence[str]
422 ) -> None:
423 """Write standardized curated calibration datasets for this specific
424 dataset type from an obs data package.
426 Parameters
427 ----------
428 butler : `lsst.daf.butler.Butler`
429 Gen3 butler in which to put the calibrations.
430 datasetType : `lsst.daf.butler.DatasetType`
431 Dataset type to be put.
432 collection : `str`
433 Name of the `~CollectionType.CALIBRATION` collection that
434 associates all datasets with validity ranges. Must have been
435 registered prior to this call.
436 runs : `set` [ `str` ]
437 Names of runs that have already been registered by previous calls
438 and need not be registered again. Should be updated by this
439 method as new runs are registered.
440 labels : `Sequence` [ `str` ]
441 Extra strings to include in run names when creating them from
442 ``CALIBDATE`` metadata, via calls to `makeCuratedCalibrationName`.
443 Usually this is the name of the ticket on which the calibration
444 collection is being created.
446 Notes
447 -----
448 This method scans the location defined in the ``obsDataPackageDir``
449 class attribute for curated calibrations corresponding to the
450 supplied dataset type. The directory name in the data package must
451 match the name of the dataset type. They are assumed to use the
452 standard layout and can be read by
453 `~lsst.pipe.tasks.read_curated_calibs.read_all` and provide standard
454 metadata.
455 """
456 calibPath = self._getSpecificCuratedCalibrationPath(datasetType.name)
457 if calibPath is None:
458 return
460 # Register the dataset type
461 butler.registry.registerDatasetType(datasetType)
463 # obs_base can't depend on pipe_tasks but concrete obs packages
464 # can -- we therefore have to defer import
465 from lsst.pipe.tasks.read_curated_calibs import read_all
467 # Read calibs, registering a new run for each CALIBDATE as needed.
468 # We try to avoid registering runs multiple times as an optimization
469 # by putting them in the ``runs`` set that was passed in.
470 camera = self.getCamera()
471 calibsDict = read_all(calibPath, camera)[0] # second return is calib type
472 datasetRecords = []
473 for det in calibsDict:
474 times = sorted([k for k in calibsDict[det]])
475 calibs = [calibsDict[det][time] for time in times]
476 times = [astropy.time.Time(t, format="datetime", scale="utc") for t in times]
477 times += [None]
478 for calib, beginTime, endTime in zip(calibs, times[:-1], times[1:]):
479 md = calib.getMetadata()
480 run = self.makeCuratedCalibrationRunName(md["CALIBDATE"], *labels)
481 if run not in runs:
482 butler.registry.registerRun(run)
483 runs.add(run)
484 dataId = DataCoordinate.standardize(
485 universe=butler.registry.dimensions,
486 instrument=self.getName(),
487 detector=md["DETECTOR"],
488 )
489 datasetRecords.append((calib, dataId, run, Timespan(beginTime, endTime)))
491 # Second loop actually does the inserts and filesystem writes. We
492 # first do a butler.put on each dataset, inserting it into the run for
493 # its calibDate. We remember those refs and group them by timespan, so
494 # we can vectorize the certify calls as much as possible.
495 refsByTimespan = defaultdict(list)
496 with butler.transaction():
497 for calib, dataId, run, timespan in datasetRecords:
498 refsByTimespan[timespan].append(butler.put(calib, datasetType, dataId, run=run))
499 for timespan, refs in refsByTimespan.items():
500 butler.registry.certify(collection, refs, timespan)
502 @abstractmethod
503 def makeDataIdTranslatorFactory(self) -> TranslatorFactory:
504 """Return a factory for creating Gen2->Gen3 data ID translators,
505 specialized for this instrument.
507 Derived class implementations should generally call
508 `TranslatorFactory.addGenericInstrumentRules` with appropriate
509 arguments, but are not required to (and may not be able to if their
510 Gen2 raw data IDs are sufficiently different from the HSC/DECam/CFHT
511 norm).
513 Returns
514 -------
515 factory : `TranslatorFactory`.
516 Factory for `Translator` objects.
517 """
518 raise NotImplementedError("Must be implemented by derived classes.")
521def makeExposureRecordFromObsInfo(
522 obsInfo: ObservationInfo, universe: DimensionUniverse, **kwargs: Any
523) -> DimensionRecord:
524 """Construct an exposure DimensionRecord from
525 `astro_metadata_translator.ObservationInfo`.
527 Parameters
528 ----------
529 obsInfo : `astro_metadata_translator.ObservationInfo`
530 A `~astro_metadata_translator.ObservationInfo` object corresponding to
531 the exposure.
532 universe : `DimensionUniverse`
533 Set of all known dimensions.
534 **kwargs
535 Additional field values for this record.
537 Returns
538 -------
539 record : `DimensionRecord`
540 A record containing exposure metadata, suitable for insertion into
541 a `Registry`.
542 """
543 dimension = universe["exposure"]
545 # Some registries support additional items.
546 supported = {meta.name for meta in dimension.metadata}
548 ra, dec, sky_angle, azimuth, zenith_angle = (None, None, None, None, None)
549 if obsInfo.tracking_radec is not None:
550 icrs = obsInfo.tracking_radec.icrs
551 ra = icrs.ra.degree
552 dec = icrs.dec.degree
553 if obsInfo.boresight_rotation_coord == "sky":
554 sky_angle = obsInfo.boresight_rotation_angle.degree
555 if obsInfo.altaz_begin is not None:
556 zenith_angle = obsInfo.altaz_begin.zen.degree
557 azimuth = obsInfo.altaz_begin.az.degree
559 extras: Dict[str, Any] = {}
560 for meta_key, info_key in (
561 ("has_simulated", "has_simulated_content"),
562 ("seq_start", "group_counter_start"),
563 ("seq_end", "group_counter_end"),
564 ):
565 if meta_key in supported:
566 extras[meta_key] = getattr(obsInfo, info_key)
568 if (k := "azimuth") in supported:
569 extras[k] = azimuth
571 return dimension.RecordClass(
572 instrument=obsInfo.instrument,
573 id=obsInfo.exposure_id,
574 obs_id=obsInfo.observation_id,
575 group_name=obsInfo.exposure_group,
576 group_id=obsInfo.visit_id,
577 datetime_begin=obsInfo.datetime_begin,
578 datetime_end=obsInfo.datetime_end,
579 exposure_time=obsInfo.exposure_time.to_value("s"),
580 # we are not mandating that dark_time be calculable
581 dark_time=obsInfo.dark_time.to_value("s") if obsInfo.dark_time is not None else None,
582 observation_type=obsInfo.observation_type,
583 observation_reason=obsInfo.observation_reason,
584 day_obs=obsInfo.observing_day,
585 seq_num=obsInfo.observation_counter,
586 physical_filter=obsInfo.physical_filter,
587 science_program=obsInfo.science_program,
588 target_name=obsInfo.object,
589 tracking_ra=ra,
590 tracking_dec=dec,
591 sky_angle=sky_angle,
592 zenith_angle=zenith_angle,
593 **extras,
594 **kwargs,
595 )
598def loadCamera(butler: Butler, dataId: DataId, *, collections: Any = None) -> Tuple[Camera, bool]:
599 """Attempt to load versioned camera geometry from a butler, but fall back
600 to obtaining a nominal camera from the `Instrument` class if that fails.
602 Parameters
603 ----------
604 butler : `lsst.daf.butler.Butler`
605 Butler instance to attempt to query for and load a ``camera`` dataset
606 from.
607 dataId : `dict` or `DataCoordinate`
608 Data ID that identifies at least the ``instrument`` and ``exposure``
609 dimensions.
610 collections : Any, optional
611 Collections to be searched, overriding ``self.butler.collections``.
612 Can be any of the types supported by the ``collections`` argument
613 to butler construction.
615 Returns
616 -------
617 camera : `lsst.afw.cameraGeom.Camera`
618 Camera object.
619 versioned : `bool`
620 If `True`, the camera was obtained from the butler and should represent
621 a versioned camera from a calibration repository. If `False`, no
622 camera datasets were found, and the returned camera was produced by
623 instantiating the appropriate `Instrument` class and calling
624 `Instrument.getCamera`.
626 Raises
627 ------
628 LookupError
629 Raised when ``dataId`` does not specify a valid data ID.
630 """
631 if collections is None:
632 collections = butler.collections
633 # Registry would do data ID expansion internally if we didn't do it first,
634 # but we might want an expanded data ID ourselves later, so we do it here
635 # to ensure it only happens once.
636 # This will also catch problems with the data ID not having keys we need.
637 try:
638 dataId = butler.registry.expandDataId(dataId, graph=butler.registry.dimensions["exposure"].graph)
639 except DataIdError as exc:
640 raise LookupError(str(exc)) from exc
641 try:
642 cameraRef = butler.get("camera", dataId=dataId, collections=collections)
643 return cameraRef, True
644 except LookupError:
645 pass
646 # We know an instrument data ID is a value, but MyPy doesn't.
647 instrument = Instrument.fromName(dataId["instrument"], butler.registry) # type: ignore
648 assert isinstance(instrument, Instrument) # for mypy
649 return instrument.getCamera(), False