compute_summary_stats = ConfigurableField(
doc="Subtask that computes summary statistics from Exposure components.",
target=ComputeExposureSummaryStatsTask,
)
wcs_provider = ChoiceField(
doc="Which connection and behavior to use when applying WCS overrides.",
dtype=str,
allowed={
"input_summary": (
"Propagate the WCS from the input visit summary catalog "
"and do not recompute WCS-based summary statistics."
),
"tract": {
"Use the 'wcs_overrides_tract' connection to load an "
"`ExposureCatalog` with {visit, tract} dimensions and per-"
"detector rows, and recommpute WCS-based summary statistics."
},
"global": {
"Use the 'wcs_overrides_global' connection to load an "
"`ExposureCatalog` with {visit} dimensions and per-"
"detector rows, and recommpute WCS-based summary statistics."
},
# If needed, we could add options here to propagate the WCS from
# the input exposures and/or transfer WCS-based summary statistics
# from them as well. Right now there's no use case for that, since
# the input visit summary is always produced after the last time we
# write a new Exposure.
},
default="input_summary",
optional=False,
)
photo_calib_provider = ChoiceField(
doc="Which connection and behavior to use when applying photometric calibration overrides.",
dtype=str,
allowed={
"input_summary": (
"Propagate the PhotoCalib from the input visit summary catalog "
"and do not recompute photometric calibration summary "
"statistics."
),
"tract": {
"Use the 'photo_calib_overrides_tract' connection to load an "
"`ExposureCatalog` with {visit, tract} dimensions and per-"
"detector rows, and recommpute photometric calibration summary "
"statistics."
},
"global": {
"Use the 'photo_calib_overrides_global' connection to load an "
"`ExposureCatalog` with {visit} dimensions and per-"
"detector rows, and recommpute photometric calibration summary "
"statistics."
},
# If needed, we could add options here to propagate the PhotoCalib
# from the input exposures and/or transfer photometric calibration
# summary statistics them as well. Right now there's no use case
# for that, since the input visit summary is always produced after
# the last time we write a new Exposure.
},
default="input_summary",
optional=False,
)
background_provider = ChoiceField(
doc="Which connection(s) and behavior to use when applying background overrides.",
dtype=str,
allowed={
"input_summary": (
"The input visit summary catalog already includes summary "
"statistics for the final backgrounds that can be used as-is."
),
"replacement": {
"The 'background_originals' connection refers to a background "
"model that has been superseded by the model referred to by "
"the 'background_overrides' connection."
},
# Could also imagine an option in which there is no original
# background and the new one stands alone; can add later if needed.
},
default="input_summary",
optional=False,
)
# Could imagine an option here to say that the original background has not
# been subtracted from the input exposures, allowing postISRCCD to be used
# as input exposures. Can add later if needed.
class UpdateVisitSummaryTask(PipelineTask):
# The `run` method of this task can conditionally apply overrides for PSFs
# and aperture corrections, but its `PipelineTask` interface always applies
# them. We can always add the config options to make them optional later,
# if that turns out to be useful.
_DefaultName = "updateVisitSummary"
ConfigClass = UpdateVisitSummaryConfig
compute_summary_stats: ComputeExposureSummaryStatsTask
def __init__(self, *, initInputs: dict[str, Any] | None = None, **kwargs: Any):
super().__init__(initInputs=initInputs, **kwargs)
self.makeSubtask("compute_summary_stats")
if initInputs is None or "input_summary_schema" not in initInputs:
raise RuntimeError("Task requires 'input_summary_schema' in initInputs.")
input_summary_schema = initInputs["input_summary_schema"].schema
self.schema_mapper = SchemaMapper(input_summary_schema)
self.schema_mapper.addMinimalSchema(input_summary_schema)
self.schema = self.schema_mapper.getOutputSchema()
if self.config.wcs_provider == "tract":
self.schema.addField(
"wcsTractId", type="L", doc="ID of the tract that provided the WCS."
)
if self.config.photo_calib_provider == "tract":
self.schema.addField(
"photoCalibTractId",
type="L",
doc="ID of the tract that provided the PhotoCalib.",
)
self.output_summary_schema = ExposureCatalog(self.schema)
def runQuantum(
self,
butlerQC: ButlerQuantumContext,
inputRefs: InputQuantizedConnection,
outputRefs: OutputQuantizedConnection,
) -> None:
# Docstring inherited.
sky_map = butlerQC.get(inputRefs.sky_map)
del inputRefs.sky_map
inputs = {}
# Collapse the wcs_override_ and photo_calib_override_ connection pairs
# into individual inputs (either ExposureCatalog or PerTractInput
# objects).
match self.config.wcs_provider:
case "tract":
inputs["wcs_overrides"] = PerTractInput.load(
butlerQC, sky_map, inputRefs.wcs_overrides_tract
)
del inputRefs.wcs_overrides_tract
case "global":
inputs["wcs_overrides"] = GlobalInput(
butlerQC.get(inputRefs.wcs_overrides_global)
)
del inputRefs.wcs_overrides_global
case "input_summary":
inputs["wcs_overrides"] = None
match self.config.photo_calib_provider:
case "tract":
inputs["photo_calib_overrides"] = PerTractInput.load(
butlerQC, sky_map, inputRefs.photo_calib_overrides_tract
)
del inputRefs.photo_calib_overrides_tract
case "global":
inputs["photo_calib_overrides"] = GlobalInput(
butlerQC.get(inputRefs.photo_calib_overrides_global)
)
del inputRefs.photo_calib_overrides_global
case "input_summary":
inputs["photo_calib_overrides"] = None
# Load or make DeferredDatasetHandles for everything else.
inputs.update(butlerQC.get(inputRefs))
deferred_dataset_types = ["input_exposures"]
# Handle whether to look for background originals and overrides at all.
match self.config.background_provider:
case "replacement":
deferred_dataset_types.append("background_originals")
deferred_dataset_types.append("background_overrides")
# Transform the lists of DeferredDatasetHandles for the multiple=True,
# deferLoad=True connections into mappings keyed by detector ID.
for name in deferred_dataset_types:
handles_list = inputs[name]
inputs[name] = {
handle.dataId["detector"]: handle for handle in handles_list
}
for record in inputs["input_summary_catalog"]:
detector_id = record.getId()
if detector_id not in inputs[name]:
raise InvalidQuantumError(
f"No {name!r} with detector {detector_id} for visit "
f"{butlerQC.quantum.dataId['visit']} even though this detector is present "
"in the input visit summary catalog. "
"This is most likely to occur when the QuantumGraph that includes this task "
"was incorrectly generated with an explicit or implicit (from datasets) tract "
"constraint."
)
# Convert the psf_star_catalog datasets from DataFrame to Astropy so
# they can be handled by ComputeExposureSummaryStatsTask (which was
# actually written to work with afw.table, but Astropy is similar
# enough that it works, too). Ideally this would be handled by just
# using ArrowAstropy as the storage class in the connection, but QG
# generation apparently doesn't fully support those yet, as it leads to
# problems in ci_hsc.
inputs["psf_star_catalog"] = astropy.table.Table.from_pandas(inputs["psf_star_catalog"], index=True)
# Actually run the task and write the results.
outputs = self.run(**inputs)
butlerQC.put(outputs, outputRefs)
def run(
self,
input_summary_catalog: ExposureCatalog,
input_exposures: Mapping[int, DeferredDatasetHandle],
psf_overrides: ExposureCatalog | None = None,
psf_star_catalog: astropy.table.Table | None = None,
ap_corr_overrides: ExposureCatalog | None = None,
photo_calib_overrides: PossiblyMultipleInput | None = None,
wcs_overrides: PossiblyMultipleInput | None = None,
background_originals: Mapping[int, DeferredDatasetHandle] | None = None,
background_overrides: Mapping[int, DeferredDatasetHandle] | None = None,
):
Definition at line 626 of file update_visit_summary.py.