22__all__ = [
"DetectCoaddSourcesConfig",
"DetectCoaddSourcesTask"]
26from lsst.pipe.base import (Struct, PipelineTask, PipelineTaskConfig, PipelineTaskConnections)
27import lsst.pipe.base.connectionTypes
as cT
28from lsst.pex.config import Config, Field, ConfigurableField, ChoiceField
30from lsst.meas.base import SingleFrameMeasurementTask, ApplyApCorrTask, CatalogCalculationTask
32from lsst.meas.extensions.scarlet
import ScarletDeblendTask
41from lsst.obs.base
import ExposureIdInfo
44from .mergeDetections
import MergeDetectionsConfig, MergeDetectionsTask
45from .mergeMeasurements
import MergeMeasurementsConfig, MergeMeasurementsTask
46from .multiBandUtils
import CullPeaksConfig, _makeGetSchemaCatalogs
47from .deblendCoaddSourcesPipeline
import DeblendCoaddSourcesSingleConfig
48from .deblendCoaddSourcesPipeline
import DeblendCoaddSourcesSingleTask
49from .deblendCoaddSourcesPipeline
import DeblendCoaddSourcesMultiConfig
50from .deblendCoaddSourcesPipeline
import DeblendCoaddSourcesMultiTask
55* deepCoadd_det: detections from what used to be processCoadd (tract, patch, filter)
56* deepCoadd_mergeDet: merged detections (tract, patch)
57* deepCoadd_meas: measurements of merged detections (tract, patch, filter)
58* deepCoadd_ref: reference sources (tract, patch)
59All of these have associated *_schema catalogs that require no data ID and hold no records.
61In addition, we have a schema-only dataset, which saves the schema for the PeakRecords in
62the mergeDet, meas, and ref dataset Footprints:
63* deepCoadd_peak_schema
69 dimensions=(
"tract",
"patch",
"band",
"skymap"),
70 defaultTemplates={
"inputCoaddName":
"deep",
"outputCoaddName":
"deep"}):
71 detectionSchema = cT.InitOutput(
72 doc=
"Schema of the detection catalog",
73 name=
"{outputCoaddName}Coadd_det_schema",
74 storageClass=
"SourceCatalog",
77 doc=
"Exposure on which detections are to be performed",
78 name=
"{inputCoaddName}Coadd",
79 storageClass=
"ExposureF",
80 dimensions=(
"tract",
"patch",
"band",
"skymap")
82 outputBackgrounds = cT.Output(
83 doc=
"Output Backgrounds used in detection",
84 name=
"{outputCoaddName}Coadd_calexp_background",
85 storageClass=
"Background",
86 dimensions=(
"tract",
"patch",
"band",
"skymap")
88 outputSources = cT.Output(
89 doc=
"Detected sources catalog",
90 name=
"{outputCoaddName}Coadd_det",
91 storageClass=
"SourceCatalog",
92 dimensions=(
"tract",
"patch",
"band",
"skymap")
94 outputExposure = cT.Output(
95 doc=
"Exposure post detection",
96 name=
"{outputCoaddName}Coadd_calexp",
97 storageClass=
"ExposureF",
98 dimensions=(
"tract",
"patch",
"band",
"skymap")
102class DetectCoaddSourcesConfig(PipelineTaskConfig, pipelineConnections=DetectCoaddSourcesConnections):
103 """Configuration parameters for the DetectCoaddSourcesTask
106 doScaleVariance = Field(dtype=bool, default=True, doc=
"Scale variance plane using empirical noise?")
107 scaleVariance = ConfigurableField(target=ScaleVarianceTask, doc=
"Variance rescaling")
108 detection = ConfigurableField(target=DynamicDetectionTask, doc=
"Source detection")
109 coaddName = Field(dtype=str, default=
"deep", doc=
"Name of coadd")
110 doInsertFakes = Field(dtype=bool, default=
False,
111 doc=
"Run fake sources injection task",
112 deprecated=(
"doInsertFakes is no longer supported. This config will be removed "
114 insertFakes = ConfigurableField(target=BaseFakeSourcesTask,
115 doc=
"Injection of fake sources for testing "
116 "purposes (must be retargeted)",
117 deprecated=(
"insertFakes is no longer supported. This config will "
118 "be removed after v24."))
122 doc=
"Should be set to True if fake sources have been inserted into the input data.",
125 def setDefaults(self):
126 super().setDefaults()
127 self.detection.thresholdType =
"pixel_stdev"
128 self.detection.isotropicGrow =
True
130 self.detection.reEstimateBackground =
False
131 self.detection.background.useApprox =
False
132 self.detection.background.binSize = 4096
133 self.detection.background.undersampleStyle =
'REDUCE_INTERP_ORDER'
134 self.detection.doTempWideBackground =
True
137class DetectCoaddSourcesTask(PipelineTask):
138 """Detect sources on a single filter coadd.
140 Coadding individual visits requires each exposure to be warped. This
141 introduces covariance in the noise properties across pixels. Before
142 detection, we correct the coadd variance by scaling the variance plane
in
143 the coadd to match the observed variance. This
is an approximate
144 approach -- strictly, we should propagate the full covariance matrix --
145 but it
is simple
and works well
in practice.
147 After scaling the variance plane, we detect sources
and generate footprints
148 by delegating to the
@ref SourceDetectionTask_
"detection" subtask.
150 DetectCoaddSourcesTask
is meant to be run after assembling a coadded image
151 in a given band. The purpose of the task
is to update the background,
152 detect all sources
in a single band
and generate a set of parent
153 footprints. Subsequent tasks
in the multi-band processing procedure will
154 merge sources across bands
and, eventually, perform forced photometry.
159 Initial schema
for the output catalog, modified-
in place to include all
160 fields set by this task. If
None, the source minimal schema will be used.
162 Additional keyword arguments.
165 _DefaultName = "detectCoaddSources"
166 ConfigClass = DetectCoaddSourcesConfig
167 getSchemaCatalogs = _makeGetSchemaCatalogs(
"det")
169 def __init__(self, schema=None, **kwargs):
172 super().__init__(**kwargs)
174 schema = afwTable.SourceTable.makeMinimalSchema()
176 self.makeSubtask(
"detection", schema=self.schema)
177 if self.config.doScaleVariance:
178 self.makeSubtask(
"scaleVariance")
180 self.detectionSchema = afwTable.SourceCatalog(self.schema)
182 def runQuantum(self, butlerQC, inputRefs, outputRefs):
183 inputs = butlerQC.get(inputRefs)
184 exposureIdInfo = ExposureIdInfo.fromDataId(butlerQC.quantum.dataId,
"tract_patch_band")
185 inputs[
"idFactory"] = exposureIdInfo.makeSourceIdFactory()
186 inputs[
"expId"] = exposureIdInfo.expId
187 outputs = self.run(**inputs)
188 butlerQC.put(outputs, outputRefs)
190 def run(self, exposure, idFactory, expId):
191 """Run detection on an exposure.
193 First scale the variance plane to match the observed variance
194 using ``ScaleVarianceTask``. Then invoke the ``SourceDetectionTask_`` "detection" subtask to
200 Exposure on which to detect (may be backround-subtracted
and scaled,
201 depending on configuration).
203 IdFactory to set source identifiers.
205 Exposure identifier (integer)
for RNG seed.
209 result : `lsst.pipe.base.Struct`
210 Results
as a struct
with attributes:
215 List of backgrounds (`list`).
217 if self.config.doScaleVariance:
218 varScale = self.scaleVariance.run(exposure.maskedImage)
219 exposure.getMetadata().add(
"VARIANCE_SCALE", varScale)
220 backgrounds = afwMath.BackgroundList()
221 table = afwTable.SourceTable.make(self.schema, idFactory)
222 detections = self.detection.run(table, exposure, expId=expId)
223 sources = detections.sources
224 fpSets = detections.fpSets
225 if hasattr(fpSets,
"background")
and fpSets.background:
226 for bg
in fpSets.background:
227 backgrounds.append(bg)
228 return Struct(outputSources=sources, outputBackgrounds=backgrounds, outputExposure=exposure)
234class DeblendCoaddSourcesConfig(Config):
235 """Configuration parameters for the `DeblendCoaddSourcesTask`.
238 singleBandDeblend = ConfigurableField(target=SourceDeblendTask,
239 doc="Deblend sources separately in each band")
240 multiBandDeblend = ConfigurableField(target=ScarletDeblendTask,
241 doc=
"Deblend sources simultaneously across bands")
242 simultaneous = Field(dtype=bool,
244 doc=
"Simultaneously deblend all bands? "
245 "True uses `multibandDeblend` while False uses `singleBandDeblend`")
246 coaddName = Field(dtype=str, default=
"deep", doc=
"Name of coadd")
247 hasFakes = Field(dtype=bool,
249 doc=
"Should be set to True if fake sources have been inserted into the input data.")
251 def setDefaults(self):
252 Config.setDefaults(self)
253 self.singleBandDeblend.propagateAllPeaks =
True
256class MeasureMergedCoaddSourcesConnections(PipelineTaskConnections, dimensions=(
"tract",
"patch",
"band",
"skymap"),
257 defaultTemplates={
"inputCoaddName":
"deep",
258 "outputCoaddName":
"deep",
259 "deblendedCatalog":
"deblendedFlux"}):
260 inputSchema = cT.InitInput(
261 doc=
"Input schema for measure merged task produced by a deblender or detection task",
262 name=
"{inputCoaddName}Coadd_deblendedFlux_schema",
263 storageClass=
"SourceCatalog"
265 outputSchema = cT.InitOutput(
266 doc=
"Output schema after all new fields are added by task",
267 name=
"{inputCoaddName}Coadd_meas_schema",
268 storageClass=
"SourceCatalog"
270 refCat = cT.PrerequisiteInput(
271 doc=
"Reference catalog used to match measured sources against known sources",
273 storageClass=
"SimpleCatalog",
274 dimensions=(
"skypix",),
279 doc=
"Input coadd image",
280 name=
"{inputCoaddName}Coadd_calexp",
281 storageClass=
"ExposureF",
282 dimensions=(
"tract",
"patch",
"band",
"skymap")
285 doc=
"SkyMap to use in processing",
286 name=BaseSkyMap.SKYMAP_DATASET_TYPE_NAME,
287 storageClass=
"SkyMap",
288 dimensions=(
"skymap",),
290 visitCatalogs = cT.Input(
291 doc=
"Source catalogs for visits which overlap input tract, patch, band. Will be "
292 "further filtered in the task for the purpose of propagating flags from image calibration "
293 "and characterization to coadd objects. Only used in legacy PropagateVisitFlagsTask.",
295 dimensions=(
"instrument",
"visit",
"detector"),
296 storageClass=
"SourceCatalog",
299 sourceTableHandles = cT.Input(
300 doc=(
"Source tables that are derived from the ``CalibrateTask`` sources. "
301 "These tables contain astrometry and photometry flags, and optionally "
303 name=
"sourceTable_visit",
304 storageClass=
"DataFrame",
305 dimensions=(
"instrument",
"visit"),
309 finalizedSourceTableHandles = cT.Input(
310 doc=(
"Finalized source tables from ``FinalizeCalibrationTask``. These "
311 "tables contain PSF flags from the finalized PSF estimation."),
312 name=
"finalized_src_table",
313 storageClass=
"DataFrame",
314 dimensions=(
"instrument",
"visit"),
318 inputCatalog = cT.Input(
319 doc=(
"Name of the input catalog to use."
320 "If the single band deblender was used this should be 'deblendedFlux."
321 "If the multi-band deblender was used this should be 'deblendedModel, "
322 "or deblendedFlux if the multiband deblender was configured to output "
323 "deblended flux catalogs. If no deblending was performed this should "
325 name=
"{inputCoaddName}Coadd_{deblendedCatalog}",
326 storageClass=
"SourceCatalog",
327 dimensions=(
"tract",
"patch",
"band",
"skymap"),
329 scarletCatalog = cT.Input(
330 doc=
"Catalogs produced by multiband deblending",
331 name=
"{inputCoaddName}Coadd_deblendedCatalog",
332 storageClass=
"SourceCatalog",
333 dimensions=(
"tract",
"patch",
"skymap"),
335 scarletModels = cT.Input(
336 doc=
"Multiband scarlet models produced by the deblender",
337 name=
"{inputCoaddName}Coadd_scarletModelData",
338 storageClass=
"ScarletModelData",
339 dimensions=(
"tract",
"patch",
"skymap"),
341 outputSources = cT.Output(
342 doc=
"Source catalog containing all the measurement information generated in this task",
343 name=
"{outputCoaddName}Coadd_meas",
344 dimensions=(
"tract",
"patch",
"band",
"skymap"),
345 storageClass=
"SourceCatalog",
347 matchResult = cT.Output(
348 doc=
"Match catalog produced by configured matcher, optional on doMatchSources",
349 name=
"{outputCoaddName}Coadd_measMatch",
350 dimensions=(
"tract",
"patch",
"band",
"skymap"),
351 storageClass=
"Catalog",
353 denormMatches = cT.Output(
354 doc=
"Denormalized Match catalog produced by configured matcher, optional on "
355 "doWriteMatchesDenormalized",
356 name=
"{outputCoaddName}Coadd_measMatchFull",
357 dimensions=(
"tract",
"patch",
"band",
"skymap"),
358 storageClass=
"Catalog",
361 def __init__(self, *, config=None):
362 super().__init__(config=config)
363 if config.doPropagateFlags
is False:
364 self.inputs -= set((
"visitCatalogs",))
365 self.inputs -= set((
"sourceTableHandles",))
366 self.inputs -= set((
"finalizedSourceTableHandles",))
367 elif config.propagateFlags.target == PropagateSourceFlagsTask:
369 self.inputs -= set((
"visitCatalogs",))
371 if not config.propagateFlags.source_flags:
372 self.inputs -= set((
"sourceTableHandles",))
373 if not config.propagateFlags.finalized_source_flags:
374 self.inputs -= set((
"finalizedSourceTableHandles",))
377 self.inputs -= set((
"sourceTableHandles",))
378 self.inputs -= set((
"finalizedSourceTableHandles",))
380 if config.inputCatalog ==
"deblendedCatalog":
381 self.inputs -= set((
"inputCatalog",))
383 if not config.doAddFootprints:
384 self.inputs -= set((
"scarletModels",))
386 self.inputs -= set((
"deblendedCatalog"))
387 self.inputs -= set((
"scarletModels",))
389 if config.doMatchSources
is False:
390 self.outputs -= set((
"matchResult",))
392 if config.doWriteMatchesDenormalized
is False:
393 self.outputs -= set((
"denormMatches",))
396class MeasureMergedCoaddSourcesConfig(PipelineTaskConfig,
397 pipelineConnections=MeasureMergedCoaddSourcesConnections):
398 """Configuration parameters for the MeasureMergedCoaddSourcesTask
400 inputCatalog = ChoiceField(
402 default="deblendedCatalog",
404 "deblendedCatalog":
"Output catalog from ScarletDeblendTask",
405 "deblendedFlux":
"Output catalog from SourceDeblendTask",
406 "mergeDet":
"The merged detections before deblending."
408 doc=
"The name of the input catalog.",
410 doAddFootprints = Field(dtype=bool,
412 doc=
"Whether or not to add footprints to the input catalog from scarlet models. "
413 "This should be true whenever using the multi-band deblender, "
414 "otherwise this should be False.")
415 doConserveFlux = Field(dtype=bool, default=
True,
416 doc=
"Whether to use the deblender models as templates to re-distribute the flux "
417 "from the 'exposure' (True), or to perform measurements on the deblender "
419 doStripFootprints = Field(dtype=bool, default=
True,
420 doc=
"Whether to strip footprints from the output catalog before "
422 "This is usually done when using scarlet models to save disk space.")
423 measurement = ConfigurableField(target=SingleFrameMeasurementTask, doc=
"Source measurement")
424 setPrimaryFlags = ConfigurableField(target=SetPrimaryFlagsTask, doc=
"Set flags for primary tract/patch")
425 doPropagateFlags = Field(
426 dtype=bool, default=
True,
427 doc=
"Whether to match sources to CCD catalogs to propagate flags (to e.g. identify PSF stars)"
429 propagateFlags = ConfigurableField(target=PropagateSourceFlagsTask, doc=
"Propagate source flags to coadd")
430 doMatchSources = Field(dtype=bool, default=
True, doc=
"Match sources to reference catalog?")
431 match = ConfigurableField(target=DirectMatchTask, doc=
"Matching to reference catalog")
432 doWriteMatchesDenormalized = Field(
435 doc=(
"Write reference matches in denormalized format? "
436 "This format uses more disk space, but is more convenient to read."),
438 coaddName = Field(dtype=str, default=
"deep", doc=
"Name of coadd")
439 psfCache = Field(dtype=int, default=100, doc=
"Size of psfCache")
440 checkUnitsParseStrict = Field(
441 doc=
"Strictness of Astropy unit compatibility check, can be 'raise', 'warn' or 'silent'",
448 doc=
"Apply aperture corrections"
450 applyApCorr = ConfigurableField(
451 target=ApplyApCorrTask,
452 doc=
"Subtask to apply aperture corrections"
454 doRunCatalogCalculation = Field(
457 doc=
'Run catalogCalculation task'
459 catalogCalculation = ConfigurableField(
460 target=CatalogCalculationTask,
461 doc=
"Subtask to run catalogCalculation plugins on catalog"
467 doc=
"Should be set to True if fake sources have been inserted into the input data."
471 def refObjLoader(self):
472 return self.match.refObjLoader
474 def setDefaults(self):
475 super().setDefaults()
476 self.measurement.plugins.names |= [
'base_InputCount',
478 'base_LocalPhotoCalib',
480 self.measurement.plugins[
'base_PixelFlags'].masksFpAnywhere = [
'CLIPPED',
'SENSOR_EDGE',
482 self.measurement.plugins[
'base_PixelFlags'].masksFpCenter = [
'CLIPPED',
'SENSOR_EDGE',
486class MeasureMergedCoaddSourcesTask(PipelineTask):
487 """Deblend sources from main catalog in each coadd seperately and measure.
489 Use peaks and footprints
from a master catalog to perform deblending
and
490 measurement
in each coadd.
492 Given a master input catalog of sources (peaks
and footprints)
or deblender
493 outputs(including a HeavyFootprint
in each band), measure each source on
494 the coadd. Repeating this procedure
with the same master catalog across
495 multiple coadds will generate a consistent set of child sources.
497 The deblender retains all peaks
and deblends any missing peaks (dropouts
in
498 that band)
as PSFs. Source properties are measured
and the
@c is-primary
499 flag (indicating sources
with no children)
is set. Visit flags are
500 propagated to the coadd sources.
502 Optionally, we can match the coadd sources to an external reference
505 After MeasureMergedCoaddSourcesTask has been run on multiple coadds, we
506 have a set of per-band catalogs. The next stage
in the multi-band
507 processing procedure will merge these measurements into a suitable catalog
508 for driving forced photometry.
512 butler : `lsst.daf.butler.Butler`
or `
None`, optional
513 A butler used to read the input schemas
from disk
or construct the reference
514 catalog loader,
if schema
or peakSchema
or refObjLoader
is None.
516 The schema of the merged detection catalog used
as input to this one.
518 The schema of the PeakRecords
in the Footprints
in the merged detection catalog.
519 refObjLoader : `lsst.meas.algorithms.ReferenceObjectLoader`, optional
520 An instance of LoadReferenceObjectsTasks that supplies an external reference
521 catalog. May be
None if the loader can be constructed
from the butler argument
or all steps
522 requiring a reference catalog are disabled.
523 initInputs : `dict`, optional
524 Dictionary that can contain a key ``inputSchema`` containing the
525 input schema. If present will override the value of ``schema``.
527 Additional keyword arguments.
530 _DefaultName = "measureCoaddSources"
531 ConfigClass = MeasureMergedCoaddSourcesConfig
532 getSchemaCatalogs = _makeGetSchemaCatalogs(
"meas")
534 def __init__(self, butler=None, schema=None, peakSchema=None, refObjLoader=None, initInputs=None,
536 super().__init__(**kwargs)
537 self.deblended = self.config.inputCatalog.startswith(
"deblended")
538 self.inputCatalog =
"Coadd_" + self.config.inputCatalog
539 if initInputs
is not None:
540 schema = initInputs[
'inputSchema'].schema
542 assert butler
is not None,
"Neither butler nor schema is defined"
543 schema = butler.get(self.config.coaddName + self.inputCatalog +
"_schema", immediate=
True).schema
544 self.schemaMapper = afwTable.SchemaMapper(schema)
545 self.schemaMapper.addMinimalSchema(schema)
546 self.schema = self.schemaMapper.getOutputSchema()
548 self.makeSubtask(
"measurement", schema=self.schema, algMetadata=self.algMetadata)
549 self.makeSubtask(
"setPrimaryFlags", schema=self.schema)
550 if self.config.doMatchSources:
551 self.makeSubtask(
"match", butler=butler, refObjLoader=refObjLoader)
552 if self.config.doPropagateFlags:
553 self.makeSubtask(
"propagateFlags", schema=self.schema)
554 self.schema.checkUnits(parse_strict=self.config.checkUnitsParseStrict)
555 if self.config.doApCorr:
556 self.makeSubtask(
"applyApCorr", schema=self.schema)
557 if self.config.doRunCatalogCalculation:
558 self.makeSubtask(
"catalogCalculation", schema=self.schema)
560 self.outputSchema = afwTable.SourceCatalog(self.schema)
562 def runQuantum(self, butlerQC, inputRefs, outputRefs):
563 inputs = butlerQC.get(inputRefs)
565 refObjLoader = ReferenceObjectLoader([ref.datasetRef.dataId
for ref
in inputRefs.refCat],
566 inputs.pop(
'refCat'),
567 name=self.config.connections.refCat,
568 config=self.config.refObjLoader,
570 self.match.setRefObjLoader(refObjLoader)
574 inputs[
'exposure'].getPsf().setCacheCapacity(self.config.psfCache)
577 exposureIdInfo = ExposureIdInfo.fromDataId(butlerQC.quantum.dataId,
"tract_patch")
578 inputs[
'exposureId'] = exposureIdInfo.expId
579 idFactory = exposureIdInfo.makeSourceIdFactory()
581 table = afwTable.SourceTable.make(self.schema, idFactory)
582 sources = afwTable.SourceCatalog(table)
584 if "scarletCatalog" in inputs:
585 inputCatalog = inputs.pop(
"scarletCatalog")
586 catalogRef = inputRefs.scarletCatalog
588 inputCatalog = inputs.pop(
"inputCatalog")
589 catalogRef = inputRefs.inputCatalog
590 sources.extend(inputCatalog, self.schemaMapper)
593 if self.config.doAddFootprints:
594 modelData = inputs.pop(
'scarletModels')
595 if self.config.doConserveFlux:
596 redistributeImage = inputs[
'exposure'].image
598 redistributeImage =
None
599 modelData.updateCatalogFootprints(
601 band=inputRefs.exposure.dataId[
"band"],
602 psfModel=inputs[
'exposure'].getPsf(),
603 redistributeImage=redistributeImage,
604 removeScarletData=
True,
606 table = sources.getTable()
607 table.setMetadata(self.algMetadata)
608 inputs[
'sources'] = sources
610 skyMap = inputs.pop(
'skyMap')
611 tractNumber = catalogRef.dataId[
'tract']
612 tractInfo = skyMap[tractNumber]
613 patchInfo = tractInfo.getPatchInfo(catalogRef.dataId[
'patch'])
618 wcs=tractInfo.getWcs(),
619 bbox=patchInfo.getOuterBBox()
621 inputs[
'skyInfo'] = skyInfo
623 if self.config.doPropagateFlags:
624 if self.config.propagateFlags.target == PropagateSourceFlagsTask:
626 ccdInputs = inputs[
"exposure"].getInfo().getCoaddInputs().ccds
627 inputs[
"ccdInputs"] = ccdInputs
629 if "sourceTableHandles" in inputs:
630 sourceTableHandles = inputs.pop(
"sourceTableHandles")
631 sourceTableHandleDict = {handle.dataId[
"visit"]: handle
632 for handle
in sourceTableHandles}
633 inputs[
"sourceTableHandleDict"] = sourceTableHandleDict
634 if "finalizedSourceTableHandles" in inputs:
635 finalizedSourceTableHandles = inputs.pop(
"finalizedSourceTableHandles")
636 finalizedSourceTableHandleDict = {handle.dataId[
"visit"]: handle
637 for handle
in finalizedSourceTableHandles}
638 inputs[
"finalizedSourceTableHandleDict"] = finalizedSourceTableHandleDict
642 ccdInputs = inputs[
'exposure'].getInfo().getCoaddInputs().ccds
643 visitKey = ccdInputs.schema.find(
"visit").key
644 ccdKey = ccdInputs.schema.find(
"ccd").key
645 inputVisitIds = set()
647 for ccdRecord
in ccdInputs:
648 visit = ccdRecord.get(visitKey)
649 ccd = ccdRecord.get(ccdKey)
650 inputVisitIds.add((visit, ccd))
651 ccdRecordsWcs[(visit, ccd)] = ccdRecord.getWcs()
653 inputCatalogsToKeep = []
654 inputCatalogWcsUpdate = []
655 for i, dataRef
in enumerate(inputRefs.visitCatalogs):
656 key = (dataRef.dataId[
'visit'], dataRef.dataId[
'detector'])
657 if key
in inputVisitIds:
658 inputCatalogsToKeep.append(inputs[
'visitCatalogs'][i])
659 inputCatalogWcsUpdate.append(ccdRecordsWcs[key])
660 inputs[
'visitCatalogs'] = inputCatalogsToKeep
661 inputs[
'wcsUpdates'] = inputCatalogWcsUpdate
662 inputs[
'ccdInputs'] = ccdInputs
664 outputs = self.run(**inputs)
666 sources = outputs.outputSources
667 butlerQC.put(outputs, outputRefs)
669 def run(self, exposure, sources, skyInfo, exposureId, ccdInputs=None, visitCatalogs=None, wcsUpdates=None,
670 butler=None, sourceTableHandleDict=None, finalizedSourceTableHandleDict=None):
671 """Run measurement algorithms on the input exposure, and optionally populate the
672 resulting catalog with extra information.
676 exposure : `lsst.afw.exposure.Exposure`
677 The input exposure on which measurements are to be performed.
679 A catalog built
from the results of merged detections,
or
681 skyInfo : `lsst.pipe.base.Struct`
682 A struct containing information about the position of the input exposure within
683 a `SkyMap`, the `SkyMap`, its `Wcs`,
and its bounding box.
684 exposureId : `int`
or `bytes`
685 Packed unique number
or bytes unique to the input exposure.
687 Catalog containing information on the individual visits which went into making
689 visitCatalogs : `list` of `lsst.afw.table.SourceCatalogs`, optional
690 A list of source catalogs corresponding to measurements made on the individual
691 visits which went into the input exposure. If
None and butler
is `
None` then
692 the task cannot propagate visit flags to the output catalog.
693 Deprecated, to be removed
with PropagateVisitFlagsTask.
695 If visitCatalogs
is not `
None` this should be a list of wcs objects which correspond
696 to the input visits. Used to put all coordinates to common system. If `
None`
and
697 butler
is `
None` then the task cannot propagate visit flags to the output catalog.
698 Deprecated, to be removed
with PropagateVisitFlagsTask.
699 butler : `
None`, optional
700 This was a Gen2 butler used to load visit catalogs.
701 No longer used
and should
not be set. Will be removed
in the
703 sourceTableHandleDict : `dict` [`int`, `lsst.daf.butler.DeferredDatasetHandle`], optional
704 Dict
for sourceTable_visit handles (key
is visit)
for propagating flags.
705 These tables are derived
from the ``CalibrateTask`` sources,
and contain
706 astrometry
and photometry flags,
and optionally PSF flags.
707 finalizedSourceTableHandleDict : `dict` [`int`, `lsst.daf.butler.DeferredDatasetHandle`], optional
708 Dict
for finalized_src_table handles (key
is visit)
for propagating flags.
709 These tables are derived
from ``FinalizeCalibrationTask``
and contain
710 PSF flags
from the finalized PSF estimation.
714 results : `lsst.pipe.base.Struct`
715 Results of running measurement task. Will contain the catalog
in the
716 sources attribute. Optionally will have results of matching to a
717 reference catalog
in the matchResults attribute,
and denormalized
718 matches
in the denormMatches attribute.
720 if butler
is not None:
721 warnings.warn(
"The 'butler' parameter is no longer used and can be safely removed.",
722 category=FutureWarning, stacklevel=2)
725 self.measurement.run(sources, exposure, exposureId=exposureId)
727 if self.config.doApCorr:
728 self.applyApCorr.run(
730 apCorrMap=exposure.getInfo().getApCorrMap()
737 if not sources.isContiguous():
738 sources = sources.copy(deep=
True)
740 if self.config.doRunCatalogCalculation:
741 self.catalogCalculation.run(sources)
743 self.setPrimaryFlags.run(sources, skyMap=skyInfo.skyMap, tractInfo=skyInfo.tractInfo,
744 patchInfo=skyInfo.patchInfo)
745 if self.config.doPropagateFlags:
746 if self.config.propagateFlags.target == PropagateSourceFlagsTask:
748 self.propagateFlags.run(
751 sourceTableHandleDict,
752 finalizedSourceTableHandleDict
756 self.propagateFlags.run(
767 if self.config.doMatchSources:
768 matchResult = self.match.run(sources, exposure.getInfo().getFilter().bandLabel)
769 matches = afwTable.packMatches(matchResult.matches)
770 matches.table.setMetadata(matchResult.matchMeta)
771 results.matchResult = matches
772 if self.config.doWriteMatchesDenormalized:
773 if matchResult.matches:
774 denormMatches = denormalizeMatches(matchResult.matches, matchResult.matchMeta)
776 self.log.warning(
"No matches, so generating dummy denormalized matches file")
777 denormMatches = afwTable.BaseCatalog(afwTable.Schema())
779 denormMatches.getMetadata().add(
"COMMENT",
780 "This catalog is empty because no matches were found.")
781 results.denormMatches = denormMatches
782 results.denormMatches = denormMatches
784 results.outputSources = sources