lsst.pipe.tasks g584c84fe5e+a38c3b9d15
Loading...
Searching...
No Matches
multiBand.py
Go to the documentation of this file.
1# This file is part of pipe_tasks.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21
22__all__ = ["DetectCoaddSourcesConfig", "DetectCoaddSourcesTask"]
23
24import warnings
25
26from lsst.pipe.base import (Struct, PipelineTask, PipelineTaskConfig, PipelineTaskConnections)
27import lsst.pipe.base.connectionTypes as cT
28from lsst.pex.config import Config, Field, ConfigurableField, ChoiceField
29from lsst.meas.algorithms import DynamicDetectionTask, ReferenceObjectLoader, ScaleVarianceTask
30from lsst.meas.base import SingleFrameMeasurementTask, ApplyApCorrTask, CatalogCalculationTask
31from lsst.meas.deblender import SourceDeblendTask
32from lsst.meas.extensions.scarlet import ScarletDeblendTask
33from lsst.meas.astrom import DirectMatchTask, denormalizeMatches
34from lsst.pipe.tasks.fakes import BaseFakeSourcesTask
35from lsst.pipe.tasks.setPrimaryFlags import SetPrimaryFlagsTask
36from lsst.pipe.tasks.propagateSourceFlags import PropagateSourceFlagsTask
37import lsst.afw.table as afwTable
38import lsst.afw.math as afwMath
39from lsst.daf.base import PropertyList
40from lsst.skymap import BaseSkyMap
41from lsst.obs.base import ExposureIdInfo
42
43# NOTE: these imports are a convenience so multiband users only have to import this file.
44from .mergeDetections import MergeDetectionsConfig, MergeDetectionsTask # noqa: F401
45from .mergeMeasurements import MergeMeasurementsConfig, MergeMeasurementsTask # noqa: F401
46from .multiBandUtils import CullPeaksConfig, _makeGetSchemaCatalogs # noqa: F401
47from .deblendCoaddSourcesPipeline import DeblendCoaddSourcesSingleConfig # noqa: F401
48from .deblendCoaddSourcesPipeline import DeblendCoaddSourcesSingleTask # noqa: F401
49from .deblendCoaddSourcesPipeline import DeblendCoaddSourcesMultiConfig # noqa: F401
50from .deblendCoaddSourcesPipeline import DeblendCoaddSourcesMultiTask # noqa: F401
51
52
53"""
54New set types:
55* deepCoadd_det: detections from what used to be processCoadd (tract, patch, filter)
56* deepCoadd_mergeDet: merged detections (tract, patch)
57* deepCoadd_meas: measurements of merged detections (tract, patch, filter)
58* deepCoadd_ref: reference sources (tract, patch)
59All of these have associated *_schema catalogs that require no data ID and hold no records.
60
61In addition, we have a schema-only dataset, which saves the schema for the PeakRecords in
62the mergeDet, meas, and ref dataset Footprints:
63* deepCoadd_peak_schema
64"""
65
66
67
68class DetectCoaddSourcesConnections(PipelineTaskConnections,
69 dimensions=("tract", "patch", "band", "skymap"),
70 defaultTemplates={"inputCoaddName": "deep", "outputCoaddName": "deep"}):
71 detectionSchema = cT.InitOutput(
72 doc="Schema of the detection catalog",
73 name="{outputCoaddName}Coadd_det_schema",
74 storageClass="SourceCatalog",
75 )
76 exposure = cT.Input(
77 doc="Exposure on which detections are to be performed",
78 name="{inputCoaddName}Coadd",
79 storageClass="ExposureF",
80 dimensions=("tract", "patch", "band", "skymap")
81 )
82 outputBackgrounds = cT.Output(
83 doc="Output Backgrounds used in detection",
84 name="{outputCoaddName}Coadd_calexp_background",
85 storageClass="Background",
86 dimensions=("tract", "patch", "band", "skymap")
87 )
88 outputSources = cT.Output(
89 doc="Detected sources catalog",
90 name="{outputCoaddName}Coadd_det",
91 storageClass="SourceCatalog",
92 dimensions=("tract", "patch", "band", "skymap")
93 )
94 outputExposure = cT.Output(
95 doc="Exposure post detection",
96 name="{outputCoaddName}Coadd_calexp",
97 storageClass="ExposureF",
98 dimensions=("tract", "patch", "band", "skymap")
99 )
100
101
102class DetectCoaddSourcesConfig(PipelineTaskConfig, pipelineConnections=DetectCoaddSourcesConnections):
103 """Configuration parameters for the DetectCoaddSourcesTask
104 """
105
106 doScaleVariance = Field(dtype=bool, default=True, doc="Scale variance plane using empirical noise?")
107 scaleVariance = ConfigurableField(target=ScaleVarianceTask, doc="Variance rescaling")
108 detection = ConfigurableField(target=DynamicDetectionTask, doc="Source detection")
109 coaddName = Field(dtype=str, default="deep", doc="Name of coadd")
110 doInsertFakes = Field(dtype=bool, default=False,
111 doc="Run fake sources injection task",
112 deprecated=("doInsertFakes is no longer supported. This config will be removed "
113 "after v24."))
114 insertFakes = ConfigurableField(target=BaseFakeSourcesTask,
115 doc="Injection of fake sources for testing "
116 "purposes (must be retargeted)",
117 deprecated=("insertFakes is no longer supported. This config will "
118 "be removed after v24."))
119 hasFakes = Field(
120 dtype=bool,
121 default=False,
122 doc="Should be set to True if fake sources have been inserted into the input data.",
123 )
124
125 def setDefaults(self):
126 super().setDefaults()
127 self.detection.thresholdType = "pixel_stdev"
128 self.detection.isotropicGrow = True
129 # Coadds are made from background-subtracted CCDs, so any background subtraction should be very basic
130 self.detection.reEstimateBackground = False
131 self.detection.background.useApprox = False
132 self.detection.background.binSize = 4096
133 self.detection.background.undersampleStyle = 'REDUCE_INTERP_ORDER'
134 self.detection.doTempWideBackground = True # Suppress large footprints that overwhelm the deblender
135
136
137class DetectCoaddSourcesTask(PipelineTask):
138 """Detect sources on a single filter coadd.
139
140 Coadding individual visits requires each exposure to be warped. This
141 introduces covariance in the noise properties across pixels. Before
142 detection, we correct the coadd variance by scaling the variance plane in
143 the coadd to match the observed variance. This is an approximate
144 approach -- strictly, we should propagate the full covariance matrix --
145 but it is simple and works well in practice.
146
147 After scaling the variance plane, we detect sources and generate footprints
148 by delegating to the @ref SourceDetectionTask_ "detection" subtask.
149
150 DetectCoaddSourcesTask is meant to be run after assembling a coadded image
151 in a given band. The purpose of the task is to update the background,
152 detect all sources in a single band and generate a set of parent
153 footprints. Subsequent tasks in the multi-band processing procedure will
154 merge sources across bands and, eventually, perform forced photometry.
155
156 Parameters
157 ----------
158 schema : `lsst.afw.table.Schema`, optional
159 Initial schema for the output catalog, modified-in place to include all
160 fields set by this task. If None, the source minimal schema will be used.
161 **kwargs
162 Additional keyword arguments.
163 """
164
165 _DefaultName = "detectCoaddSources"
166 ConfigClass = DetectCoaddSourcesConfig
167 getSchemaCatalogs = _makeGetSchemaCatalogs("det")
168
169 def __init__(self, schema=None, **kwargs):
170 # N.B. Super is used here to handle the multiple inheritance of PipelineTasks, the init tree
171 # call structure has been reviewed carefully to be sure super will work as intended.
172 super().__init__(**kwargs)
173 if schema is None:
174 schema = afwTable.SourceTable.makeMinimalSchema()
175 self.schema = schema
176 self.makeSubtask("detection", schema=self.schema)
177 if self.config.doScaleVariance:
178 self.makeSubtask("scaleVariance")
179
180 self.detectionSchema = afwTable.SourceCatalog(self.schema)
181
182 def runQuantum(self, butlerQC, inputRefs, outputRefs):
183 inputs = butlerQC.get(inputRefs)
184 exposureIdInfo = ExposureIdInfo.fromDataId(butlerQC.quantum.dataId, "tract_patch_band")
185 inputs["idFactory"] = exposureIdInfo.makeSourceIdFactory()
186 inputs["expId"] = exposureIdInfo.expId
187 outputs = self.run(**inputs)
188 butlerQC.put(outputs, outputRefs)
189
190 def run(self, exposure, idFactory, expId):
191 """Run detection on an exposure.
192
193 First scale the variance plane to match the observed variance
194 using ``ScaleVarianceTask``. Then invoke the ``SourceDetectionTask_`` "detection" subtask to
195 detect sources.
196
197 Parameters
198 ----------
199 exposure : `lsst.afw.image.Exposure`
200 Exposure on which to detect (may be backround-subtracted and scaled,
201 depending on configuration).
202 idFactory : `lsst.afw.table.IdFactory`
203 IdFactory to set source identifiers.
204 expId : `int`
205 Exposure identifier (integer) for RNG seed.
206
207 Returns
208 -------
209 result : `lsst.pipe.base.Struct`
210 Results as a struct with attributes:
211
212 ``sources``
213 Catalog of detections (`lsst.afw.table.SourceCatalog`).
214 ``backgrounds``
215 List of backgrounds (`list`).
216 """
217 if self.config.doScaleVariance:
218 varScale = self.scaleVariance.run(exposure.maskedImage)
219 exposure.getMetadata().add("VARIANCE_SCALE", varScale)
220 backgrounds = afwMath.BackgroundList()
221 table = afwTable.SourceTable.make(self.schema, idFactory)
222 detections = self.detection.run(table, exposure, expId=expId)
223 sources = detections.sources
224 fpSets = detections.fpSets
225 if hasattr(fpSets, "background") and fpSets.background:
226 for bg in fpSets.background:
227 backgrounds.append(bg)
228 return Struct(outputSources=sources, outputBackgrounds=backgrounds, outputExposure=exposure)
229
230
231
232
233
234class DeblendCoaddSourcesConfig(Config):
235 """Configuration parameters for the `DeblendCoaddSourcesTask`.
236 """
237
238 singleBandDeblend = ConfigurableField(target=SourceDeblendTask,
239 doc="Deblend sources separately in each band")
240 multiBandDeblend = ConfigurableField(target=ScarletDeblendTask,
241 doc="Deblend sources simultaneously across bands")
242 simultaneous = Field(dtype=bool,
243 default=True,
244 doc="Simultaneously deblend all bands? "
245 "True uses `multibandDeblend` while False uses `singleBandDeblend`")
246 coaddName = Field(dtype=str, default="deep", doc="Name of coadd")
247 hasFakes = Field(dtype=bool,
248 default=False,
249 doc="Should be set to True if fake sources have been inserted into the input data.")
250
251 def setDefaults(self):
252 Config.setDefaults(self)
253 self.singleBandDeblend.propagateAllPeaks = True
254
255
256class MeasureMergedCoaddSourcesConnections(PipelineTaskConnections, dimensions=("tract", "patch", "band", "skymap"),
257 defaultTemplates={"inputCoaddName": "deep",
258 "outputCoaddName": "deep",
259 "deblendedCatalog": "deblendedFlux"}):
260 warnings.warn("MeasureMergedCoaddSourcesConnections.defaultTemplates is deprecated and no longer used. "
261 "Use MeasureMergedCoaddSourcesConfig.inputCatalog.")
262 inputSchema = cT.InitInput(
263 doc="Input schema for measure merged task produced by a deblender or detection task",
264 name="{inputCoaddName}Coadd_deblendedFlux_schema",
265 storageClass="SourceCatalog"
266 )
267 outputSchema = cT.InitOutput(
268 doc="Output schema after all new fields are added by task",
269 name="{inputCoaddName}Coadd_meas_schema",
270 storageClass="SourceCatalog"
271 )
272 refCat = cT.PrerequisiteInput(
273 doc="Reference catalog used to match measured sources against known sources",
274 name="ref_cat",
275 storageClass="SimpleCatalog",
276 dimensions=("skypix",),
277 deferLoad=True,
278 multiple=True
279 )
280 exposure = cT.Input(
281 doc="Input coadd image",
282 name="{inputCoaddName}Coadd_calexp",
283 storageClass="ExposureF",
284 dimensions=("tract", "patch", "band", "skymap")
285 )
286 skyMap = cT.Input(
287 doc="SkyMap to use in processing",
288 name=BaseSkyMap.SKYMAP_DATASET_TYPE_NAME,
289 storageClass="SkyMap",
290 dimensions=("skymap",),
291 )
292 visitCatalogs = cT.Input(
293 doc="Source catalogs for visits which overlap input tract, patch, band. Will be "
294 "further filtered in the task for the purpose of propagating flags from image calibration "
295 "and characterization to coadd objects. Only used in legacy PropagateVisitFlagsTask.",
296 name="src",
297 dimensions=("instrument", "visit", "detector"),
298 storageClass="SourceCatalog",
299 multiple=True
300 )
301 sourceTableHandles = cT.Input(
302 doc=("Source tables that are derived from the ``CalibrateTask`` sources. "
303 "These tables contain astrometry and photometry flags, and optionally "
304 "PSF flags."),
305 name="sourceTable_visit",
306 storageClass="DataFrame",
307 dimensions=("instrument", "visit"),
308 multiple=True,
309 deferLoad=True,
310 )
311 finalizedSourceTableHandles = cT.Input(
312 doc=("Finalized source tables from ``FinalizeCalibrationTask``. These "
313 "tables contain PSF flags from the finalized PSF estimation."),
314 name="finalized_src_table",
315 storageClass="DataFrame",
316 dimensions=("instrument", "visit"),
317 multiple=True,
318 deferLoad=True,
319 )
320 inputCatalog = cT.Input(
321 doc=("Name of the input catalog to use."
322 "If the single band deblender was used this should be 'deblendedFlux."
323 "If the multi-band deblender was used this should be 'deblendedModel, "
324 "or deblendedFlux if the multiband deblender was configured to output "
325 "deblended flux catalogs. If no deblending was performed this should "
326 "be 'mergeDet'"),
327 name="{inputCoaddName}Coadd_{deblendedCatalog}",
328 storageClass="SourceCatalog",
329 dimensions=("tract", "patch", "band", "skymap"),
330 )
331 scarletCatalog = cT.Input(
332 doc="Catalogs produced by multiband deblending",
333 name="{inputCoaddName}Coadd_deblendedCatalog",
334 storageClass="SourceCatalog",
335 dimensions=("tract", "patch", "skymap"),
336 )
337 scarletModels = cT.Input(
338 doc="Multiband scarlet models produced by the deblender",
339 name="{inputCoaddName}Coadd_scarletModelData",
340 storageClass="ScarletModelData",
341 dimensions=("tract", "patch", "skymap"),
342 )
343 outputSources = cT.Output(
344 doc="Source catalog containing all the measurement information generated in this task",
345 name="{outputCoaddName}Coadd_meas",
346 dimensions=("tract", "patch", "band", "skymap"),
347 storageClass="SourceCatalog",
348 )
349 matchResult = cT.Output(
350 doc="Match catalog produced by configured matcher, optional on doMatchSources",
351 name="{outputCoaddName}Coadd_measMatch",
352 dimensions=("tract", "patch", "band", "skymap"),
353 storageClass="Catalog",
354 )
355 denormMatches = cT.Output(
356 doc="Denormalized Match catalog produced by configured matcher, optional on "
357 "doWriteMatchesDenormalized",
358 name="{outputCoaddName}Coadd_measMatchFull",
359 dimensions=("tract", "patch", "band", "skymap"),
360 storageClass="Catalog",
361 )
362
363 def __init__(self, *, config=None):
364 super().__init__(config=config)
365 if config.doPropagateFlags is False:
366 self.inputs -= set(("visitCatalogs",))
367 self.inputs -= set(("sourceTableHandles",))
368 self.inputs -= set(("finalizedSourceTableHandles",))
369 elif config.propagateFlags.target == PropagateSourceFlagsTask:
370 # New PropagateSourceFlagsTask does not use visitCatalogs.
371 self.inputs -= set(("visitCatalogs",))
372 # Check for types of flags required.
373 if not config.propagateFlags.source_flags:
374 self.inputs -= set(("sourceTableHandles",))
375 if not config.propagateFlags.finalized_source_flags:
376 self.inputs -= set(("finalizedSourceTableHandles",))
377 else:
378 # Deprecated PropagateVisitFlagsTask uses visitCatalogs.
379 self.inputs -= set(("sourceTableHandles",))
380 self.inputs -= set(("finalizedSourceTableHandles",))
381
382 if config.inputCatalog == "deblendedCatalog":
383 self.inputs -= set(("inputCatalog",))
384
385 if not config.doAddFootprints:
386 self.inputs -= set(("scarletModels",))
387 else:
388 self.inputs -= set(("deblendedCatalog"))
389 self.inputs -= set(("scarletModels",))
390
391 if config.doMatchSources is False:
392 self.outputs -= set(("matchResult",))
393
394 if config.doWriteMatchesDenormalized is False:
395 self.outputs -= set(("denormMatches",))
396
397
398class MeasureMergedCoaddSourcesConfig(PipelineTaskConfig,
399 pipelineConnections=MeasureMergedCoaddSourcesConnections):
400 """Configuration parameters for the MeasureMergedCoaddSourcesTask
401 """
402 inputCatalog = ChoiceField(
403 dtype=str,
404 default="deblendedCatalog",
405 allowed={
406 "deblendedCatalog": "Output catalog from ScarletDeblendTask",
407 "deblendedFlux": "Output catalog from SourceDeblendTask",
408 "mergeDet": "The merged detections before deblending."
409 },
410 doc="The name of the input catalog.",
411 )
412 doAddFootprints = Field(dtype=bool,
413 default=True,
414 doc="Whether or not to add footprints to the input catalog from scarlet models. "
415 "This should be true whenever using the multi-band deblender, "
416 "otherwise this should be False.")
417 doConserveFlux = Field(dtype=bool, default=True,
418 doc="Whether to use the deblender models as templates to re-distribute the flux "
419 "from the 'exposure' (True), or to perform measurements on the deblender "
420 "model footprints.")
421 doStripFootprints = Field(dtype=bool, default=True,
422 doc="Whether to strip footprints from the output catalog before "
423 "saving to disk. "
424 "This is usually done when using scarlet models to save disk space.")
425 measurement = ConfigurableField(target=SingleFrameMeasurementTask, doc="Source measurement")
426 setPrimaryFlags = ConfigurableField(target=SetPrimaryFlagsTask, doc="Set flags for primary tract/patch")
427 doPropagateFlags = Field(
428 dtype=bool, default=True,
429 doc="Whether to match sources to CCD catalogs to propagate flags (to e.g. identify PSF stars)"
430 )
431 propagateFlags = ConfigurableField(target=PropagateSourceFlagsTask, doc="Propagate source flags to coadd")
432 doMatchSources = Field(dtype=bool, default=True, doc="Match sources to reference catalog?")
433 match = ConfigurableField(target=DirectMatchTask, doc="Matching to reference catalog")
434 doWriteMatchesDenormalized = Field(
435 dtype=bool,
436 default=False,
437 doc=("Write reference matches in denormalized format? "
438 "This format uses more disk space, but is more convenient to read."),
439 )
440 coaddName = Field(dtype=str, default="deep", doc="Name of coadd")
441 psfCache = Field(dtype=int, default=100, doc="Size of psfCache")
442 checkUnitsParseStrict = Field(
443 doc="Strictness of Astropy unit compatibility check, can be 'raise', 'warn' or 'silent'",
444 dtype=str,
445 default="raise",
446 )
447 doApCorr = Field(
448 dtype=bool,
449 default=True,
450 doc="Apply aperture corrections"
451 )
452 applyApCorr = ConfigurableField(
453 target=ApplyApCorrTask,
454 doc="Subtask to apply aperture corrections"
455 )
456 doRunCatalogCalculation = Field(
457 dtype=bool,
458 default=True,
459 doc='Run catalogCalculation task'
460 )
461 catalogCalculation = ConfigurableField(
462 target=CatalogCalculationTask,
463 doc="Subtask to run catalogCalculation plugins on catalog"
464 )
465
466 hasFakes = Field(
467 dtype=bool,
468 default=False,
469 doc="Should be set to True if fake sources have been inserted into the input data."
470 )
471
472 @property
473 def refObjLoader(self):
474 return self.match.refObjLoader
475
476 def setDefaults(self):
477 super().setDefaults()
478 self.measurement.plugins.names |= ['base_InputCount',
479 'base_Variance',
480 'base_LocalPhotoCalib',
481 'base_LocalWcs']
482 self.measurement.plugins['base_PixelFlags'].masksFpAnywhere = ['CLIPPED', 'SENSOR_EDGE',
483 'INEXACT_PSF']
484 self.measurement.plugins['base_PixelFlags'].masksFpCenter = ['CLIPPED', 'SENSOR_EDGE',
485 'INEXACT_PSF']
486
487
488class MeasureMergedCoaddSourcesTask(PipelineTask):
489 """Deblend sources from main catalog in each coadd seperately and measure.
490
491 Use peaks and footprints from a master catalog to perform deblending and
492 measurement in each coadd.
493
494 Given a master input catalog of sources (peaks and footprints) or deblender
495 outputs(including a HeavyFootprint in each band), measure each source on
496 the coadd. Repeating this procedure with the same master catalog across
497 multiple coadds will generate a consistent set of child sources.
498
499 The deblender retains all peaks and deblends any missing peaks (dropouts in
500 that band) as PSFs. Source properties are measured and the @c is-primary
501 flag (indicating sources with no children) is set. Visit flags are
502 propagated to the coadd sources.
503
504 Optionally, we can match the coadd sources to an external reference
505 catalog.
506
507 After MeasureMergedCoaddSourcesTask has been run on multiple coadds, we
508 have a set of per-band catalogs. The next stage in the multi-band
509 processing procedure will merge these measurements into a suitable catalog
510 for driving forced photometry.
511
512 Parameters
513 ----------
514 butler : `lsst.daf.butler.Butler` or `None`, optional
515 A butler used to read the input schemas from disk or construct the reference
516 catalog loader, if schema or peakSchema or refObjLoader is None.
517 schema : ``lsst.afw.table.Schema`, optional
518 The schema of the merged detection catalog used as input to this one.
519 peakSchema : ``lsst.afw.table.Schema`, optional
520 The schema of the PeakRecords in the Footprints in the merged detection catalog.
521 refObjLoader : `lsst.meas.algorithms.ReferenceObjectLoader`, optional
522 An instance of LoadReferenceObjectsTasks that supplies an external reference
523 catalog. May be None if the loader can be constructed from the butler argument or all steps
524 requiring a reference catalog are disabled.
525 initInputs : `dict`, optional
526 Dictionary that can contain a key ``inputSchema`` containing the
527 input schema. If present will override the value of ``schema``.
528 **kwargs
529 Additional keyword arguments.
530 """
531
532 _DefaultName = "measureCoaddSources"
533 ConfigClass = MeasureMergedCoaddSourcesConfig
534 getSchemaCatalogs = _makeGetSchemaCatalogs("meas")
535
536 def __init__(self, butler=None, schema=None, peakSchema=None, refObjLoader=None, initInputs=None,
537 **kwargs):
538 super().__init__(**kwargs)
539 self.deblended = self.config.inputCatalog.startswith("deblended")
540 self.inputCatalog = "Coadd_" + self.config.inputCatalog
541 if initInputs is not None:
542 schema = initInputs['inputSchema'].schema
543 if schema is None:
544 assert butler is not None, "Neither butler nor schema is defined"
545 schema = butler.get(self.config.coaddName + self.inputCatalog + "_schema", immediate=True).schema
546 self.schemaMapper = afwTable.SchemaMapper(schema)
547 self.schemaMapper.addMinimalSchema(schema)
548 self.schema = self.schemaMapper.getOutputSchema()
549 self.algMetadata = PropertyList()
550 self.makeSubtask("measurement", schema=self.schema, algMetadata=self.algMetadata)
551 self.makeSubtask("setPrimaryFlags", schema=self.schema)
552 if self.config.doMatchSources:
553 self.makeSubtask("match", butler=butler, refObjLoader=refObjLoader)
554 if self.config.doPropagateFlags:
555 self.makeSubtask("propagateFlags", schema=self.schema)
556 self.schema.checkUnits(parse_strict=self.config.checkUnitsParseStrict)
557 if self.config.doApCorr:
558 self.makeSubtask("applyApCorr", schema=self.schema)
559 if self.config.doRunCatalogCalculation:
560 self.makeSubtask("catalogCalculation", schema=self.schema)
561
562 self.outputSchema = afwTable.SourceCatalog(self.schema)
563
564 def runQuantum(self, butlerQC, inputRefs, outputRefs):
565 inputs = butlerQC.get(inputRefs)
566
567 refObjLoader = ReferenceObjectLoader([ref.datasetRef.dataId for ref in inputRefs.refCat],
568 inputs.pop('refCat'),
569 name=self.config.connections.refCat,
570 config=self.config.refObjLoader,
571 log=self.log)
572 self.match.setRefObjLoader(refObjLoader)
573
574 # Set psfcache
575 # move this to run after gen2 deprecation
576 inputs['exposure'].getPsf().setCacheCapacity(self.config.psfCache)
577
578 # Get unique integer ID for IdFactory and RNG seeds
579 exposureIdInfo = ExposureIdInfo.fromDataId(butlerQC.quantum.dataId, "tract_patch")
580 inputs['exposureId'] = exposureIdInfo.expId
581 idFactory = exposureIdInfo.makeSourceIdFactory()
582 # Transform inputCatalog
583 table = afwTable.SourceTable.make(self.schema, idFactory)
584 sources = afwTable.SourceCatalog(table)
585 # Load the correct input catalog
586 if "scarletCatalog" in inputs:
587 inputCatalog = inputs.pop("scarletCatalog")
588 catalogRef = inputRefs.scarletCatalog
589 else:
590 inputCatalog = inputs.pop("inputCatalog")
591 catalogRef = inputRefs.inputCatalog
592 sources.extend(inputCatalog, self.schemaMapper)
593 del inputCatalog
594 # Add the HeavyFootprints to the deblended sources
595 if self.config.doAddFootprints:
596 modelData = inputs.pop('scarletModels')
597 if self.config.doConserveFlux:
598 redistributeImage = inputs['exposure'].image
599 else:
600 redistributeImage = None
601 modelData.updateCatalogFootprints(
602 catalog=sources,
603 band=inputRefs.exposure.dataId["band"],
604 psfModel=inputs['exposure'].getPsf(),
605 redistributeImage=redistributeImage,
606 removeScarletData=True,
607 )
608 table = sources.getTable()
609 table.setMetadata(self.algMetadata) # Capture algorithm metadata to write out to the source catalog.
610 inputs['sources'] = sources
611
612 skyMap = inputs.pop('skyMap')
613 tractNumber = catalogRef.dataId['tract']
614 tractInfo = skyMap[tractNumber]
615 patchInfo = tractInfo.getPatchInfo(catalogRef.dataId['patch'])
616 skyInfo = Struct(
617 skyMap=skyMap,
618 tractInfo=tractInfo,
619 patchInfo=patchInfo,
620 wcs=tractInfo.getWcs(),
621 bbox=patchInfo.getOuterBBox()
622 )
623 inputs['skyInfo'] = skyInfo
624
625 if self.config.doPropagateFlags:
626 if self.config.propagateFlags.target == PropagateSourceFlagsTask:
627 # New version
628 ccdInputs = inputs["exposure"].getInfo().getCoaddInputs().ccds
629 inputs["ccdInputs"] = ccdInputs
630
631 if "sourceTableHandles" in inputs:
632 sourceTableHandles = inputs.pop("sourceTableHandles")
633 sourceTableHandleDict = {handle.dataId["visit"]: handle
634 for handle in sourceTableHandles}
635 inputs["sourceTableHandleDict"] = sourceTableHandleDict
636 if "finalizedSourceTableHandles" in inputs:
637 finalizedSourceTableHandles = inputs.pop("finalizedSourceTableHandles")
638 finalizedSourceTableHandleDict = {handle.dataId["visit"]: handle
639 for handle in finalizedSourceTableHandles}
640 inputs["finalizedSourceTableHandleDict"] = finalizedSourceTableHandleDict
641 else:
642 # Deprecated legacy version
643 # Filter out any visit catalog that is not coadd inputs
644 ccdInputs = inputs['exposure'].getInfo().getCoaddInputs().ccds
645 visitKey = ccdInputs.schema.find("visit").key
646 ccdKey = ccdInputs.schema.find("ccd").key
647 inputVisitIds = set()
648 ccdRecordsWcs = {}
649 for ccdRecord in ccdInputs:
650 visit = ccdRecord.get(visitKey)
651 ccd = ccdRecord.get(ccdKey)
652 inputVisitIds.add((visit, ccd))
653 ccdRecordsWcs[(visit, ccd)] = ccdRecord.getWcs()
654
655 inputCatalogsToKeep = []
656 inputCatalogWcsUpdate = []
657 for i, dataRef in enumerate(inputRefs.visitCatalogs):
658 key = (dataRef.dataId['visit'], dataRef.dataId['detector'])
659 if key in inputVisitIds:
660 inputCatalogsToKeep.append(inputs['visitCatalogs'][i])
661 inputCatalogWcsUpdate.append(ccdRecordsWcs[key])
662 inputs['visitCatalogs'] = inputCatalogsToKeep
663 inputs['wcsUpdates'] = inputCatalogWcsUpdate
664 inputs['ccdInputs'] = ccdInputs
665
666 outputs = self.run(**inputs)
667 # Strip HeavyFootprints to save space on disk
668 sources = outputs.outputSources
669 butlerQC.put(outputs, outputRefs)
670
671 def run(self, exposure, sources, skyInfo, exposureId, ccdInputs=None, visitCatalogs=None, wcsUpdates=None,
672 butler=None, sourceTableHandleDict=None, finalizedSourceTableHandleDict=None):
673 """Run measurement algorithms on the input exposure, and optionally populate the
674 resulting catalog with extra information.
675
676 Parameters
677 ----------
678 exposure : `lsst.afw.exposure.Exposure`
679 The input exposure on which measurements are to be performed.
681 A catalog built from the results of merged detections, or
682 deblender outputs.
683 skyInfo : `lsst.pipe.base.Struct`
684 A struct containing information about the position of the input exposure within
685 a `SkyMap`, the `SkyMap`, its `Wcs`, and its bounding box.
686 exposureId : `int` or `bytes`
687 Packed unique number or bytes unique to the input exposure.
688 ccdInputs : `lsst.afw.table.ExposureCatalog`, optional
689 Catalog containing information on the individual visits which went into making
690 the coadd.
691 visitCatalogs : `list` of `lsst.afw.table.SourceCatalogs`, optional
692 A list of source catalogs corresponding to measurements made on the individual
693 visits which went into the input exposure. If None and butler is `None` then
694 the task cannot propagate visit flags to the output catalog.
695 Deprecated, to be removed with PropagateVisitFlagsTask.
696 wcsUpdates : `list` of `lsst.afw.geom.SkyWcs`, optional
697 If visitCatalogs is not `None` this should be a list of wcs objects which correspond
698 to the input visits. Used to put all coordinates to common system. If `None` and
699 butler is `None` then the task cannot propagate visit flags to the output catalog.
700 Deprecated, to be removed with PropagateVisitFlagsTask.
701 butler : `None`, optional
702 This was a Gen2 butler used to load visit catalogs.
703 No longer used and should not be set. Will be removed in the
704 future.
705 sourceTableHandleDict : `dict` [`int`, `lsst.daf.butler.DeferredDatasetHandle`], optional
706 Dict for sourceTable_visit handles (key is visit) for propagating flags.
707 These tables are derived from the ``CalibrateTask`` sources, and contain
708 astrometry and photometry flags, and optionally PSF flags.
709 finalizedSourceTableHandleDict : `dict` [`int`, `lsst.daf.butler.DeferredDatasetHandle`], optional
710 Dict for finalized_src_table handles (key is visit) for propagating flags.
711 These tables are derived from ``FinalizeCalibrationTask`` and contain
712 PSF flags from the finalized PSF estimation.
713
714 Returns
715 -------
716 results : `lsst.pipe.base.Struct`
717 Results of running measurement task. Will contain the catalog in the
718 sources attribute. Optionally will have results of matching to a
719 reference catalog in the matchResults attribute, and denormalized
720 matches in the denormMatches attribute.
721 """
722 if butler is not None:
723 warnings.warn("The 'butler' parameter is no longer used and can be safely removed.",
724 category=FutureWarning, stacklevel=2)
725 butler = None
726
727 self.measurement.run(sources, exposure, exposureId=exposureId)
728
729 if self.config.doApCorr:
730 self.applyApCorr.run(
731 catalog=sources,
732 apCorrMap=exposure.getInfo().getApCorrMap()
733 )
734
735 # TODO DM-11568: this contiguous check-and-copy could go away if we
736 # reserve enough space during SourceDetection and/or SourceDeblend.
737 # NOTE: sourceSelectors require contiguous catalogs, so ensure
738 # contiguity now, so views are preserved from here on.
739 if not sources.isContiguous():
740 sources = sources.copy(deep=True)
741
742 if self.config.doRunCatalogCalculation:
743 self.catalogCalculation.run(sources)
744
745 self.setPrimaryFlags.run(sources, skyMap=skyInfo.skyMap, tractInfo=skyInfo.tractInfo,
746 patchInfo=skyInfo.patchInfo)
747 if self.config.doPropagateFlags:
748 if self.config.propagateFlags.target == PropagateSourceFlagsTask:
749 # New version
750 self.propagateFlags.run(
751 sources,
752 ccdInputs,
753 sourceTableHandleDict,
754 finalizedSourceTableHandleDict
755 )
756 else:
757 # Legacy deprecated version
758 self.propagateFlags.run(
759 butler,
760 sources,
761 ccdInputs,
762 exposure.getWcs(),
763 visitCatalogs,
764 wcsUpdates
765 )
766
767 results = Struct()
768
769 if self.config.doMatchSources:
770 matchResult = self.match.run(sources, exposure.getInfo().getFilter().bandLabel)
771 matches = afwTable.packMatches(matchResult.matches)
772 matches.table.setMetadata(matchResult.matchMeta)
773 results.matchResult = matches
774 if self.config.doWriteMatchesDenormalized:
775 if matchResult.matches:
776 denormMatches = denormalizeMatches(matchResult.matches, matchResult.matchMeta)
777 else:
778 self.log.warning("No matches, so generating dummy denormalized matches file")
779 denormMatches = afwTable.BaseCatalog(afwTable.Schema())
780 denormMatches.setMetadata(PropertyList())
781 denormMatches.getMetadata().add("COMMENT",
782 "This catalog is empty because no matches were found.")
783 results.denormMatches = denormMatches
784 results.denormMatches = denormMatches
785
786 results.outputSources = sources
787 return results
788