Coverage for python/lsst/obs/base/_instrument.py: 26%
Shortcuts on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
Shortcuts on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of obs_base.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
22from __future__ import annotations
24__all__ = ("Instrument", "makeExposureRecordFromObsInfo", "loadCamera")
26import datetime
27import os.path
28from abc import ABCMeta, abstractmethod
29from collections import defaultdict
30from functools import lru_cache
31from typing import TYPE_CHECKING, AbstractSet, Any, FrozenSet, Optional, Sequence, Set, Tuple, Type, Union
33import astropy.time
34from lsst.afw.cameraGeom import Camera
35from lsst.daf.butler import (
36 Butler,
37 CollectionType,
38 DataCoordinate,
39 DataId,
40 DatasetType,
41 DimensionRecord,
42 DimensionUniverse,
43 Timespan,
44)
45from lsst.daf.butler.registry import DataIdError
46from lsst.utils import doImportType, getPackageDir
48if TYPE_CHECKING: 48 ↛ 49line 48 didn't jump to line 49, because the condition on line 48 was never true
49 from astro_metadata_translator import ObservationInfo
50 from lsst.daf.butler import Registry
51 from lsst.pex.config import Config
53 from ._fitsRawFormatterBase import FitsRawFormatterBase
54 from .filters import FilterDefinitionCollection
55 from .gen2to3 import TranslatorFactory
57# To be a standard text curated calibration means that we use a
58# standard definition for the corresponding DatasetType.
59StandardCuratedCalibrationDatasetTypes = {
60 "defects": {"dimensions": ("instrument", "detector"), "storageClass": "Defects"},
61 "qe_curve": {"dimensions": ("instrument", "detector"), "storageClass": "QECurve"},
62 "crosstalk": {"dimensions": ("instrument", "detector"), "storageClass": "CrosstalkCalib"},
63 "linearizer": {"dimensions": ("instrument", "detector"), "storageClass": "Linearizer"},
64 "bfk": {"dimensions": ("instrument", "detector"), "storageClass": "BrighterFatterKernel"},
65}
68class Instrument(metaclass=ABCMeta):
69 """Base class for instrument-specific logic for the Gen3 Butler.
71 Parameters
72 ----------
73 collection_prefix : `str`, optional
74 Prefix for collection names to use instead of the intrument's own name.
75 This is primarily for use in simulated-data repositories, where the
76 instrument name may not be necessary and/or sufficient to distinguish
77 between collections.
79 Notes
80 -----
81 Concrete instrument subclasses must have the same construction signature as
82 the base class.
83 """
85 configPaths: Sequence[str] = ()
86 """Paths to config files to read for specific Tasks.
88 The paths in this list should contain files of the form `task.py`, for
89 each of the Tasks that requires special configuration.
90 """
92 policyName: Optional[str] = None
93 """Instrument specific name to use when locating a policy or configuration
94 file in the file system."""
96 obsDataPackage: Optional[str] = None
97 """Name of the package containing the text curated calibration files.
98 Usually a obs _data package. If `None` no curated calibration files
99 will be read. (`str`)"""
101 standardCuratedDatasetTypes: AbstractSet[str] = frozenset(StandardCuratedCalibrationDatasetTypes)
102 """The dataset types expected to be obtained from the obsDataPackage.
104 These dataset types are all required to have standard definitions and
105 must be known to the base class. Clearing this list will prevent
106 any of these calibrations from being stored. If a dataset type is not
107 known to a specific instrument it can still be included in this list
108 since the data package is the source of truth. (`set` of `str`)
109 """
111 additionalCuratedDatasetTypes: AbstractSet[str] = frozenset()
112 """Curated dataset types specific to this particular instrument that do
113 not follow the standard organization found in obs data packages.
115 These are the instrument-specific dataset types written by
116 `writeAdditionalCuratedCalibrations` in addition to the calibrations
117 found in obs data packages that follow the standard scheme.
118 (`set` of `str`)"""
120 @property
121 @abstractmethod
122 def filterDefinitions(self) -> FilterDefinitionCollection:
123 """`~lsst.obs.base.FilterDefinitionCollection`, defining the filters
124 for this instrument.
125 """
126 raise NotImplementedError()
128 def __init__(self, collection_prefix: Optional[str] = None):
129 self.filterDefinitions.reset()
130 self.filterDefinitions.defineFilters()
131 if collection_prefix is None: 131 ↛ 133line 131 didn't jump to line 133, because the condition on line 131 was never false
132 collection_prefix = self.getName()
133 self.collection_prefix = collection_prefix
135 @classmethod
136 @abstractmethod
137 def getName(cls) -> str:
138 """Return the short (dimension) name for this instrument.
140 This is not (in general) the same as the class name - it's what is used
141 as the value of the "instrument" field in data IDs, and is usually an
142 abbreviation of the full name.
143 """
144 raise NotImplementedError()
146 @classmethod
147 @lru_cache()
148 def getCuratedCalibrationNames(cls) -> FrozenSet[str]:
149 """Return the names of all the curated calibration dataset types.
151 Returns
152 -------
153 names : `frozenset` of `str`
154 The dataset type names of all curated calibrations. This will
155 include the standard curated calibrations even if the particular
156 instrument does not support them.
158 Notes
159 -----
160 The returned list does not indicate whether a particular dataset
161 is present in the Butler repository, simply that these are the
162 dataset types that are handled by ``writeCuratedCalibrations``.
163 """
165 # Camera is a special dataset type that is also handled as a
166 # curated calibration.
167 curated = {"camera"}
169 # Make a cursory attempt to filter out curated dataset types
170 # that are not present for this instrument
171 for datasetTypeName in cls.standardCuratedDatasetTypes:
172 calibPath = cls._getSpecificCuratedCalibrationPath(datasetTypeName)
173 if calibPath is not None:
174 curated.add(datasetTypeName)
176 curated.update(cls.additionalCuratedDatasetTypes)
177 return frozenset(curated)
179 @abstractmethod
180 def getCamera(self) -> Camera:
181 """Retrieve the cameraGeom representation of this instrument.
183 This is a temporary API that should go away once ``obs`` packages have
184 a standardized approach to writing versioned cameras to a Gen3 repo.
185 """
186 raise NotImplementedError()
188 @abstractmethod
189 def register(self, registry: Registry, *, update: bool = False) -> None:
190 """Insert instrument, physical_filter, and detector entries into a
191 `Registry`.
193 Parameters
194 ----------
195 registry : `lsst.daf.butler.Registry`
196 Registry client for the data repository to modify.
197 update : `bool`, optional
198 If `True` (`False` is default), update existing records if they
199 differ from the new ones.
201 Raises
202 ------
203 lsst.daf.butler.registry.ConflictingDefinitionError
204 Raised if any existing record has the same key but a different
205 definition as one being registered.
207 Notes
208 -----
209 New detectors and physical filters can always be added by calling this
210 method multiple times, as long as no existing records have changed (if
211 existing records have changed, ``update=True`` must be used). Old
212 records can never be removed by this method.
214 Implementations should guarantee that registration is atomic (the
215 registry should not be modified if any error occurs) and idempotent at
216 the level of individual dimension entries; new detectors and filters
217 should be added, but changes to any existing record should not be.
218 This can generally be achieved via a block like::
220 with registry.transaction():
221 registry.syncDimensionData("instrument", ...)
222 registry.syncDimensionData("detector", ...)
223 self.registerFilters(registry)
225 """
226 raise NotImplementedError()
228 @classmethod
229 @lru_cache()
230 def getObsDataPackageDir(cls) -> Optional[str]:
231 """The root of the obs data package that provides specializations for
232 this instrument.
234 returns
235 -------
236 dir : `str` or `None`
237 The root of the relevant obs data package, or `None` if this
238 instrument does not have one.
239 """
240 if cls.obsDataPackage is None:
241 return None
242 return getPackageDir(cls.obsDataPackage)
244 @staticmethod
245 def fromName(name: str, registry: Registry, collection_prefix: Optional[str] = None) -> Instrument:
246 """Given an instrument name and a butler, retrieve a corresponding
247 instantiated instrument object.
249 Parameters
250 ----------
251 name : `str`
252 Name of the instrument (must match the return value of `getName`).
253 registry : `lsst.daf.butler.Registry`
254 Butler registry to query to find the information.
255 collection_prefix : `str`, optional
256 Prefix for collection names to use instead of the intrument's own
257 name. This is primarily for use in simulated-data repositories,
258 where the instrument name may not be necessary and/or sufficient to
259 distinguish between collections.
261 Returns
262 -------
263 instrument : `Instrument`
264 An instance of the relevant `Instrument`.
266 Notes
267 -----
268 The instrument must be registered in the corresponding butler.
270 Raises
271 ------
272 LookupError
273 Raised if the instrument is not known to the supplied registry.
274 ModuleNotFoundError
275 Raised if the class could not be imported. This could mean
276 that the relevant obs package has not been setup.
277 TypeError
278 Raised if the class name retrieved is not a string or the imported
279 symbol is not an `Instrument` subclass.
280 """
281 try:
282 records = list(registry.queryDimensionRecords("instrument", instrument=name))
283 except DataIdError:
284 records = None
285 if not records:
286 raise LookupError(f"No registered instrument with name '{name}'.")
287 cls_name = records[0].class_name
288 if not isinstance(cls_name, str):
289 raise TypeError(
290 f"Unexpected class name retrieved from {name} instrument dimension (got {cls_name})"
291 )
292 instrument_cls: type = doImportType(cls_name)
293 if not issubclass(instrument_cls, Instrument):
294 raise TypeError(
295 f"{instrument_cls!r}, obtained from importing {cls_name}, is not an Instrument subclass."
296 )
297 return instrument_cls(collection_prefix=collection_prefix)
299 @staticmethod
300 def importAll(registry: Registry) -> None:
301 """Import all the instruments known to this registry.
303 This will ensure that all metadata translators have been registered.
305 Parameters
306 ----------
307 registry : `lsst.daf.butler.Registry`
308 Butler registry to query to find the information.
310 Notes
311 -----
312 It is allowed for a particular instrument class to fail on import.
313 This might simply indicate that a particular obs package has
314 not been setup.
315 """
316 records = list(registry.queryDimensionRecords("instrument"))
317 for record in records:
318 cls = record.class_name
319 try:
320 doImportType(cls)
321 except Exception:
322 pass
324 def _registerFilters(self, registry: Registry, update: bool = False) -> None:
325 """Register the physical and abstract filter Dimension relationships.
326 This should be called in the `register` implementation, within
327 a transaction context manager block.
329 Parameters
330 ----------
331 registry : `lsst.daf.butler.core.Registry`
332 The registry to add dimensions to.
333 update : `bool`, optional
334 If `True` (`False` is default), update existing records if they
335 differ from the new ones.
336 """
337 for filter in self.filterDefinitions:
338 # fix for undefined abstract filters causing trouble in the
339 # registry:
340 if filter.band is None:
341 band = filter.physical_filter
342 else:
343 band = filter.band
345 registry.syncDimensionData(
346 "physical_filter",
347 {"instrument": self.getName(), "name": filter.physical_filter, "band": band},
348 update=update,
349 )
351 @abstractmethod
352 def getRawFormatter(self, dataId: DataId) -> Type[FitsRawFormatterBase]:
353 """Return the Formatter class that should be used to read a particular
354 raw file.
356 Parameters
357 ----------
358 dataId : `DataId`
359 Dimension-based ID for the raw file or files being ingested.
361 Returns
362 -------
363 formatter : `FitsRawFormatterBase` class
364 Class to be used that reads the file into an
365 `lsst.afw.image.Exposure` instance.
366 """
367 raise NotImplementedError()
369 def applyConfigOverrides(self, name: str, config: Config) -> None:
370 """Apply instrument-specific overrides for a task config.
372 Parameters
373 ----------
374 name : `str`
375 Name of the object being configured; typically the _DefaultName
376 of a Task.
377 config : `lsst.pex.config.Config`
378 Config instance to which overrides should be applied.
379 """
380 for root in self.configPaths:
381 path = os.path.join(root, f"{name}.py")
382 if os.path.exists(path):
383 config.load(path)
385 def writeCuratedCalibrations(
386 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = ()
387 ) -> None:
388 """Write human-curated calibration Datasets to the given Butler with
389 the appropriate validity ranges.
391 Parameters
392 ----------
393 butler : `lsst.daf.butler.Butler`
394 Butler to use to store these calibrations.
395 collection : `str`, optional
396 Name to use for the calibration collection that associates all
397 datasets with a validity range. If this collection already exists,
398 it must be a `~CollectionType.CALIBRATION` collection, and it must
399 not have any datasets that would conflict with those inserted by
400 this method. If `None`, a collection name is worked out
401 automatically from the instrument name and other metadata by
402 calling ``makeCalibrationCollectionName``, but this
403 default name may not work well for long-lived repositories unless
404 ``labels`` is also provided (and changed every time curated
405 calibrations are ingested).
406 labels : `Sequence` [ `str` ], optional
407 Extra strings to include in collection names, after concatenating
408 them with the standard collection name delimeter. If provided,
409 these are inserted into the names of the `~CollectionType.RUN`
410 collections that datasets are inserted directly into, as well the
411 `~CollectionType.CALIBRATION` collection if it is generated
412 automatically (i.e. if ``collection is None``). Usually this is
413 just the name of the ticket on which the calibration collection is
414 being created.
416 Notes
417 -----
418 Expected to be called from subclasses. The base method calls
419 ``writeCameraGeom``, ``writeStandardTextCuratedCalibrations``,
420 and ``writeAdditionalCuratdCalibrations``.
421 """
422 # Delegate registration of collections (and creating names for them)
423 # to other methods so they can be called independently with the same
424 # preconditions. Collection registration is idempotent, so this is
425 # safe, and while it adds a bit of overhead, as long as it's one
426 # registration attempt per method (not per dataset or dataset type),
427 # that's negligible.
428 self.writeCameraGeom(butler, collection, labels=labels)
429 self.writeStandardTextCuratedCalibrations(butler, collection, labels=labels)
430 self.writeAdditionalCuratedCalibrations(butler, collection, labels=labels)
432 def writeAdditionalCuratedCalibrations(
433 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = ()
434 ) -> None:
435 """Write additional curated calibrations that might be instrument
436 specific and are not part of the standard set.
438 Default implementation does nothing.
440 Parameters
441 ----------
442 butler : `lsst.daf.butler.Butler`
443 Butler to use to store these calibrations.
444 collection : `str`, optional
445 Name to use for the calibration collection that associates all
446 datasets with a validity range. If this collection already exists,
447 it must be a `~CollectionType.CALIBRATION` collection, and it must
448 not have any datasets that would conflict with those inserted by
449 this method. If `None`, a collection name is worked out
450 automatically from the instrument name and other metadata by
451 calling ``makeCalibrationCollectionName``, but this
452 default name may not work well for long-lived repositories unless
453 ``labels`` is also provided (and changed every time curated
454 calibrations are ingested).
455 labels : `Sequence` [ `str` ], optional
456 Extra strings to include in collection names, after concatenating
457 them with the standard collection name delimeter. If provided,
458 these are inserted into the names of the `~CollectionType.RUN`
459 collections that datasets are inserted directly into, as well the
460 `~CollectionType.CALIBRATION` collection if it is generated
461 automatically (i.e. if ``collection is None``). Usually this is
462 just the name of the ticket on which the calibration collection is
463 being created.
464 """
465 return
467 def writeCameraGeom(
468 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = ()
469 ) -> None:
470 """Write the default camera geometry to the butler repository and
471 associate it with the appropriate validity range in a calibration
472 collection.
474 Parameters
475 ----------
476 butler : `lsst.daf.butler.Butler`
477 Butler to use to store these calibrations.
478 collection : `str`, optional
479 Name to use for the calibration collection that associates all
480 datasets with a validity range. If this collection already exists,
481 it must be a `~CollectionType.CALIBRATION` collection, and it must
482 not have any datasets that would conflict with those inserted by
483 this method. If `None`, a collection name is worked out
484 automatically from the instrument name and other metadata by
485 calling ``makeCalibrationCollectionName``, but this
486 default name may not work well for long-lived repositories unless
487 ``labels`` is also provided (and changed every time curated
488 calibrations are ingested).
489 labels : `Sequence` [ `str` ], optional
490 Extra strings to include in collection names, after concatenating
491 them with the standard collection name delimeter. If provided,
492 these are inserted into the names of the `~CollectionType.RUN`
493 collections that datasets are inserted directly into, as well the
494 `~CollectionType.CALIBRATION` collection if it is generated
495 automatically (i.e. if ``collection is None``). Usually this is
496 just the name of the ticket on which the calibration collection is
497 being created.
498 """
499 if collection is None:
500 collection = self.makeCalibrationCollectionName(*labels)
501 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION)
502 run = self.makeUnboundedCalibrationRunName(*labels)
503 butler.registry.registerRun(run)
504 datasetType = DatasetType(
505 "camera", ("instrument",), "Camera", isCalibration=True, universe=butler.registry.dimensions
506 )
507 butler.registry.registerDatasetType(datasetType)
508 camera = self.getCamera()
509 ref = butler.put(camera, datasetType, {"instrument": self.getName()}, run=run)
510 butler.registry.certify(collection, [ref], Timespan(begin=None, end=None))
512 def writeStandardTextCuratedCalibrations(
513 self, butler: Butler, collection: Optional[str] = None, labels: Sequence[str] = ()
514 ) -> None:
515 """Write the set of standardized curated text calibrations to
516 the repository.
518 Parameters
519 ----------
520 butler : `lsst.daf.butler.Butler`
521 Butler to receive these calibration datasets.
522 collection : `str`, optional
523 Name to use for the calibration collection that associates all
524 datasets with a validity range. If this collection already exists,
525 it must be a `~CollectionType.CALIBRATION` collection, and it must
526 not have any datasets that would conflict with those inserted by
527 this method. If `None`, a collection name is worked out
528 automatically from the instrument name and other metadata by
529 calling ``makeCalibrationCollectionName``, but this
530 default name may not work well for long-lived repositories unless
531 ``labels`` is also provided (and changed every time curated
532 calibrations are ingested).
533 labels : `Sequence` [ `str` ], optional
534 Extra strings to include in collection names, after concatenating
535 them with the standard collection name delimeter. If provided,
536 these are inserted into the names of the `~CollectionType.RUN`
537 collections that datasets are inserted directly into, as well the
538 `~CollectionType.CALIBRATION` collection if it is generated
539 automatically (i.e. if ``collection is None``). Usually this is
540 just the name of the ticket on which the calibration collection is
541 being created.
542 """
543 if collection is None:
544 collection = self.makeCalibrationCollectionName(*labels)
545 butler.registry.registerCollection(collection, type=CollectionType.CALIBRATION)
546 runs: Set[str] = set()
547 for datasetTypeName in self.standardCuratedDatasetTypes:
548 # We need to define the dataset types.
549 if datasetTypeName not in StandardCuratedCalibrationDatasetTypes:
550 raise ValueError(
551 f"DatasetType {datasetTypeName} not in understood list"
552 f" [{'.'.join(StandardCuratedCalibrationDatasetTypes)}]"
553 )
554 definition = StandardCuratedCalibrationDatasetTypes[datasetTypeName]
555 datasetType = DatasetType(
556 datasetTypeName,
557 universe=butler.registry.dimensions,
558 isCalibration=True,
559 # MyPy should be able to figure out that the kwargs here have
560 # the right types, but it can't.
561 **definition, # type: ignore
562 )
563 self._writeSpecificCuratedCalibrationDatasets(
564 butler, datasetType, collection, runs=runs, labels=labels
565 )
567 @classmethod
568 def _getSpecificCuratedCalibrationPath(cls, datasetTypeName: str) -> Optional[str]:
569 """Return the path of the curated calibration directory.
571 Parameters
572 ----------
573 datasetTypeName : `str`
574 The name of the standard dataset type to find.
576 Returns
577 -------
578 path : `str` or `None`
579 The path to the standard curated data directory. `None` if the
580 dataset type is not found or the obs data package is not
581 available.
582 """
583 data_package_dir = cls.getObsDataPackageDir()
584 if data_package_dir is None:
585 # if there is no data package then there can't be datasets
586 return None
588 if cls.policyName is None:
589 raise TypeError(f"Instrument {cls.getName()} has an obs data package but no policy name.")
591 calibPath = os.path.join(data_package_dir, cls.policyName, datasetTypeName)
593 if os.path.exists(calibPath):
594 return calibPath
596 return None
598 def _writeSpecificCuratedCalibrationDatasets(
599 self, butler: Butler, datasetType: DatasetType, collection: str, runs: Set[str], labels: Sequence[str]
600 ) -> None:
601 """Write standardized curated calibration datasets for this specific
602 dataset type from an obs data package.
604 Parameters
605 ----------
606 butler : `lsst.daf.butler.Butler`
607 Gen3 butler in which to put the calibrations.
608 datasetType : `lsst.daf.butler.DatasetType`
609 Dataset type to be put.
610 collection : `str`
611 Name of the `~CollectionType.CALIBRATION` collection that
612 associates all datasets with validity ranges. Must have been
613 registered prior to this call.
614 runs : `set` [ `str` ]
615 Names of runs that have already been registered by previous calls
616 and need not be registered again. Should be updated by this
617 method as new runs are registered.
618 labels : `Sequence` [ `str` ]
619 Extra strings to include in run names when creating them from
620 ``CALIBDATE`` metadata, via calls to `makeCuratedCalibrationName`.
621 Usually this is the name of the ticket on which the calibration
622 collection is being created.
624 Notes
625 -----
626 This method scans the location defined in the ``obsDataPackageDir``
627 class attribute for curated calibrations corresponding to the
628 supplied dataset type. The directory name in the data package must
629 match the name of the dataset type. They are assumed to use the
630 standard layout and can be read by
631 `~lsst.pipe.tasks.read_curated_calibs.read_all` and provide standard
632 metadata.
633 """
634 calibPath = self._getSpecificCuratedCalibrationPath(datasetType.name)
635 if calibPath is None:
636 return
638 # Register the dataset type
639 butler.registry.registerDatasetType(datasetType)
641 # obs_base can't depend on pipe_tasks but concrete obs packages
642 # can -- we therefore have to defer import
643 from lsst.pipe.tasks.read_curated_calibs import read_all
645 # Read calibs, registering a new run for each CALIBDATE as needed.
646 # We try to avoid registering runs multiple times as an optimization
647 # by putting them in the ``runs`` set that was passed in.
648 camera = self.getCamera()
649 calibsDict = read_all(calibPath, camera)[0] # second return is calib type
650 datasetRecords = []
651 for det in calibsDict:
652 times = sorted([k for k in calibsDict[det]])
653 calibs = [calibsDict[det][time] for time in times]
654 times = [astropy.time.Time(t, format="datetime", scale="utc") for t in times]
655 times += [None]
656 for calib, beginTime, endTime in zip(calibs, times[:-1], times[1:]):
657 md = calib.getMetadata()
658 run = self.makeCuratedCalibrationRunName(md["CALIBDATE"], *labels)
659 if run not in runs:
660 butler.registry.registerRun(run)
661 runs.add(run)
662 dataId = DataCoordinate.standardize(
663 universe=butler.registry.dimensions,
664 instrument=self.getName(),
665 detector=md["DETECTOR"],
666 )
667 datasetRecords.append((calib, dataId, run, Timespan(beginTime, endTime)))
669 # Second loop actually does the inserts and filesystem writes. We
670 # first do a butler.put on each dataset, inserting it into the run for
671 # its calibDate. We remember those refs and group them by timespan, so
672 # we can vectorize the certify calls as much as possible.
673 refsByTimespan = defaultdict(list)
674 with butler.transaction():
675 for calib, dataId, run, timespan in datasetRecords:
676 refsByTimespan[timespan].append(butler.put(calib, datasetType, dataId, run=run))
677 for timespan, refs in refsByTimespan.items():
678 butler.registry.certify(collection, refs, timespan)
680 @abstractmethod
681 def makeDataIdTranslatorFactory(self) -> TranslatorFactory:
682 """Return a factory for creating Gen2->Gen3 data ID translators,
683 specialized for this instrument.
685 Derived class implementations should generally call
686 `TranslatorFactory.addGenericInstrumentRules` with appropriate
687 arguments, but are not required to (and may not be able to if their
688 Gen2 raw data IDs are sufficiently different from the HSC/DECam/CFHT
689 norm).
691 Returns
692 -------
693 factory : `TranslatorFactory`.
694 Factory for `Translator` objects.
695 """
696 raise NotImplementedError("Must be implemented by derived classes.")
698 @staticmethod
699 def formatCollectionTimestamp(timestamp: Union[str, datetime.datetime]) -> str:
700 """Format a timestamp for use in a collection name.
702 Parameters
703 ----------
704 timestamp : `str` or `datetime.datetime`
705 Timestamp to format. May be a date or datetime string in extended
706 ISO format (assumed UTC), with or without a timezone specifier, a
707 datetime string in basic ISO format with a timezone specifier, a
708 naive `datetime.datetime` instance (assumed UTC) or a
709 timezone-aware `datetime.datetime` instance (converted to UTC).
710 This is intended to cover all forms that string ``CALIBDATE``
711 metadata values have taken in the past, as well as the format this
712 method itself writes out (to enable round-tripping).
714 Returns
715 -------
716 formatted : `str`
717 Standardized string form for the timestamp.
718 """
719 if isinstance(timestamp, str):
720 if "-" in timestamp:
721 # extended ISO format, with - and : delimiters
722 timestamp = datetime.datetime.fromisoformat(timestamp)
723 else:
724 # basic ISO format, with no delimiters (what this method
725 # returns)
726 timestamp = datetime.datetime.strptime(timestamp, "%Y%m%dT%H%M%S%z")
727 if not isinstance(timestamp, datetime.datetime):
728 raise TypeError(f"Unexpected date/time object: {timestamp!r}.")
729 if timestamp.tzinfo is not None:
730 timestamp = timestamp.astimezone(datetime.timezone.utc)
731 return f"{timestamp:%Y%m%dT%H%M%S}Z"
733 @staticmethod
734 def makeCollectionTimestamp() -> str:
735 """Create a timestamp string for use in a collection name from the
736 current time.
738 Returns
739 -------
740 formatted : `str`
741 Standardized string form of the current time.
742 """
743 return Instrument.formatCollectionTimestamp(datetime.datetime.now(tz=datetime.timezone.utc))
745 def makeDefaultRawIngestRunName(self) -> str:
746 """Make the default instrument-specific run collection string for raw
747 data ingest.
749 Returns
750 -------
751 coll : `str`
752 Run collection name to be used as the default for ingestion of
753 raws.
754 """
755 return self.makeCollectionName("raw", "all")
757 def makeUnboundedCalibrationRunName(self, *labels: str) -> str:
758 """Make a RUN collection name appropriate for inserting calibration
759 datasets whose validity ranges are unbounded.
761 Parameters
762 ----------
763 *labels : `str`
764 Extra strings to be included in the base name, using the default
765 delimiter for collection names. Usually this is the name of the
766 ticket on which the calibration collection is being created.
768 Returns
769 -------
770 name : `str`
771 Run collection name.
772 """
773 return self.makeCollectionName("calib", *labels, "unbounded")
775 def makeCuratedCalibrationRunName(self, calibDate: str, *labels: str) -> str:
776 """Make a RUN collection name appropriate for inserting curated
777 calibration datasets with the given ``CALIBDATE`` metadata value.
779 Parameters
780 ----------
781 calibDate : `str`
782 The ``CALIBDATE`` metadata value.
783 *labels : `str`
784 Strings to be included in the collection name (before
785 ``calibDate``, but after all other terms), using the default
786 delimiter for collection names. Usually this is the name of the
787 ticket on which the calibration collection is being created.
789 Returns
790 -------
791 name : `str`
792 Run collection name.
793 """
794 return self.makeCollectionName("calib", *labels, "curated", self.formatCollectionTimestamp(calibDate))
796 def makeCalibrationCollectionName(self, *labels: str) -> str:
797 """Make a CALIBRATION collection name appropriate for associating
798 calibration datasets with validity ranges.
800 Parameters
801 ----------
802 *labels : `str`
803 Strings to be appended to the base name, using the default
804 delimiter for collection names. Usually this is the name of the
805 ticket on which the calibration collection is being created.
807 Returns
808 -------
809 name : `str`
810 Calibration collection name.
811 """
812 return self.makeCollectionName("calib", *labels)
814 @staticmethod
815 def makeRefCatCollectionName(*labels: str) -> str:
816 """Return a global (not instrument-specific) name for a collection that
817 holds reference catalogs.
819 With no arguments, this returns the name of the collection that holds
820 all reference catalogs (usually a ``CHAINED`` collection, at least in
821 long-lived repos that may contain more than one reference catalog).
823 Parameters
824 ----------
825 *labels : `str`
826 Strings to be added to the global collection name, in order to
827 define a collection name for one or more reference catalogs being
828 ingested at the same time.
830 Returns
831 -------
832 name : `str`
833 Collection name.
835 Notes
836 -----
837 This is a ``staticmethod``, not a ``classmethod``, because it should
838 be the same for all instruments.
839 """
840 return "/".join(("refcats",) + labels)
842 def makeUmbrellaCollectionName(self) -> str:
843 """Return the name of the umbrella ``CHAINED`` collection for this
844 instrument that combines all standard recommended input collections.
846 This method should almost never be overridden by derived classes.
848 Returns
849 -------
850 name : `str`
851 Name for the umbrella collection.
852 """
853 return self.makeCollectionName("defaults")
855 def makeCollectionName(self, *labels: str) -> str:
856 """Get the instrument-specific collection string to use as derived
857 from the supplied labels.
859 Parameters
860 ----------
861 *labels : `str`
862 Strings to be combined with the instrument name to form a
863 collection name.
865 Returns
866 -------
867 name : `str`
868 Collection name to use that includes the instrument's recommended
869 prefix.
870 """
871 return "/".join((self.collection_prefix,) + labels)
874def makeExposureRecordFromObsInfo(obsInfo: ObservationInfo, universe: DimensionUniverse) -> DimensionRecord:
875 """Construct an exposure DimensionRecord from
876 `astro_metadata_translator.ObservationInfo`.
878 Parameters
879 ----------
880 obsInfo : `astro_metadata_translator.ObservationInfo`
881 A `~astro_metadata_translator.ObservationInfo` object corresponding to
882 the exposure.
883 universe : `DimensionUniverse`
884 Set of all known dimensions.
886 Returns
887 -------
888 record : `DimensionRecord`
889 A record containing exposure metadata, suitable for insertion into
890 a `Registry`.
891 """
892 dimension = universe["exposure"]
894 ra, dec, sky_angle, zenith_angle = (None, None, None, None)
895 if obsInfo.tracking_radec is not None:
896 icrs = obsInfo.tracking_radec.icrs
897 ra = icrs.ra.degree
898 dec = icrs.dec.degree
899 if obsInfo.boresight_rotation_coord == "sky":
900 sky_angle = obsInfo.boresight_rotation_angle.degree
901 if obsInfo.altaz_begin is not None:
902 zenith_angle = obsInfo.altaz_begin.zen.degree
904 return dimension.RecordClass(
905 instrument=obsInfo.instrument,
906 id=obsInfo.exposure_id,
907 obs_id=obsInfo.observation_id,
908 group_name=obsInfo.exposure_group,
909 group_id=obsInfo.visit_id,
910 datetime_begin=obsInfo.datetime_begin,
911 datetime_end=obsInfo.datetime_end,
912 exposure_time=obsInfo.exposure_time.to_value("s"),
913 # we are not mandating that dark_time be calculable
914 dark_time=obsInfo.dark_time.to_value("s") if obsInfo.dark_time is not None else None,
915 observation_type=obsInfo.observation_type,
916 observation_reason=obsInfo.observation_reason,
917 day_obs=obsInfo.observing_day,
918 seq_num=obsInfo.observation_counter,
919 physical_filter=obsInfo.physical_filter,
920 science_program=obsInfo.science_program,
921 target_name=obsInfo.object,
922 tracking_ra=ra,
923 tracking_dec=dec,
924 sky_angle=sky_angle,
925 zenith_angle=zenith_angle,
926 )
929def loadCamera(butler: Butler, dataId: DataId, *, collections: Any = None) -> Tuple[Camera, bool]:
930 """Attempt to load versioned camera geometry from a butler, but fall back
931 to obtaining a nominal camera from the `Instrument` class if that fails.
933 Parameters
934 ----------
935 butler : `lsst.daf.butler.Butler`
936 Butler instance to attempt to query for and load a ``camera`` dataset
937 from.
938 dataId : `dict` or `DataCoordinate`
939 Data ID that identifies at least the ``instrument`` and ``exposure``
940 dimensions.
941 collections : Any, optional
942 Collections to be searched, overriding ``self.butler.collections``.
943 Can be any of the types supported by the ``collections`` argument
944 to butler construction.
946 Returns
947 -------
948 camera : `lsst.afw.cameraGeom.Camera`
949 Camera object.
950 versioned : `bool`
951 If `True`, the camera was obtained from the butler and should represent
952 a versioned camera from a calibration repository. If `False`, no
953 camera datasets were found, and the returned camera was produced by
954 instantiating the appropriate `Instrument` class and calling
955 `Instrument.getCamera`.
957 Raises
958 ------
959 LookupError
960 Raised when ``dataId`` does not specify a valid data ID.
961 """
962 if collections is None:
963 collections = butler.collections
964 # Registry would do data ID expansion internally if we didn't do it first,
965 # but we might want an expanded data ID ourselves later, so we do it here
966 # to ensure it only happens once.
967 # This will also catch problems with the data ID not having keys we need.
968 try:
969 dataId = butler.registry.expandDataId(dataId, graph=butler.registry.dimensions["exposure"].graph)
970 except DataIdError as exc:
971 raise LookupError(str(exc)) from exc
972 try:
973 cameraRef = butler.get("camera", dataId=dataId, collections=collections)
974 return cameraRef, True
975 except LookupError:
976 pass
977 # We know an instrument data ID is a value, but MyPy doesn't.
978 instrument = Instrument.fromName(dataId["instrument"], butler.registry) # type: ignore
979 return instrument.getCamera(), False