lsst.pipe.tasks g0bc41560cc+192e7efc5f
Loading...
Searching...
No Matches
multiBand.py
Go to the documentation of this file.
1# This file is part of pipe_tasks.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21
22__all__ = ["DetectCoaddSourcesConfig", "DetectCoaddSourcesTask"]
23
24import warnings
25
26from lsst.pipe.base import (Struct, PipelineTask, PipelineTaskConfig, PipelineTaskConnections)
27import lsst.pipe.base.connectionTypes as cT
28from lsst.pex.config import Field, ConfigurableField, ChoiceField
29from lsst.meas.algorithms import DynamicDetectionTask, ReferenceObjectLoader, ScaleVarianceTask
30from lsst.meas.base import (
31 SingleFrameMeasurementTask,
32 ApplyApCorrTask,
33 CatalogCalculationTask,
34 SkyMapIdGeneratorConfig,
35)
36from lsst.meas.astrom import DirectMatchTask, denormalizeMatches
37from lsst.pipe.tasks.fakes import BaseFakeSourcesTask
38from lsst.pipe.tasks.setPrimaryFlags import SetPrimaryFlagsTask
39from lsst.pipe.tasks.propagateSourceFlags import PropagateSourceFlagsTask
40import lsst.afw.table as afwTable
41import lsst.afw.math as afwMath
42from lsst.daf.base import PropertyList
43from lsst.skymap import BaseSkyMap
44
45# NOTE: these imports are a convenience so multiband users only have to import this file.
46from .mergeDetections import MergeDetectionsConfig, MergeDetectionsTask # noqa: F401
47from .mergeMeasurements import MergeMeasurementsConfig, MergeMeasurementsTask # noqa: F401
48from .multiBandUtils import CullPeaksConfig # noqa: F401
49from .deblendCoaddSourcesPipeline import DeblendCoaddSourcesSingleConfig # noqa: F401
50from .deblendCoaddSourcesPipeline import DeblendCoaddSourcesSingleTask # noqa: F401
51from .deblendCoaddSourcesPipeline import DeblendCoaddSourcesMultiConfig # noqa: F401
52from .deblendCoaddSourcesPipeline import DeblendCoaddSourcesMultiTask # noqa: F401
53
54
55"""
56New set types:
57* deepCoadd_det: detections from what used to be processCoadd (tract, patch, filter)
58* deepCoadd_mergeDet: merged detections (tract, patch)
59* deepCoadd_meas: measurements of merged detections (tract, patch, filter)
60* deepCoadd_ref: reference sources (tract, patch)
61All of these have associated *_schema catalogs that require no data ID and hold no records.
62
63In addition, we have a schema-only dataset, which saves the schema for the PeakRecords in
64the mergeDet, meas, and ref dataset Footprints:
65* deepCoadd_peak_schema
66"""
67
68
69
70class DetectCoaddSourcesConnections(PipelineTaskConnections,
71 dimensions=("tract", "patch", "band", "skymap"),
72 defaultTemplates={"inputCoaddName": "deep", "outputCoaddName": "deep"}):
73 detectionSchema = cT.InitOutput(
74 doc="Schema of the detection catalog",
75 name="{outputCoaddName}Coadd_det_schema",
76 storageClass="SourceCatalog",
77 )
78 exposure = cT.Input(
79 doc="Exposure on which detections are to be performed",
80 name="{inputCoaddName}Coadd",
81 storageClass="ExposureF",
82 dimensions=("tract", "patch", "band", "skymap")
83 )
84 outputBackgrounds = cT.Output(
85 doc="Output Backgrounds used in detection",
86 name="{outputCoaddName}Coadd_calexp_background",
87 storageClass="Background",
88 dimensions=("tract", "patch", "band", "skymap")
89 )
90 outputSources = cT.Output(
91 doc="Detected sources catalog",
92 name="{outputCoaddName}Coadd_det",
93 storageClass="SourceCatalog",
94 dimensions=("tract", "patch", "band", "skymap")
95 )
96 outputExposure = cT.Output(
97 doc="Exposure post detection",
98 name="{outputCoaddName}Coadd_calexp",
99 storageClass="ExposureF",
100 dimensions=("tract", "patch", "band", "skymap")
101 )
102
103
104class DetectCoaddSourcesConfig(PipelineTaskConfig, pipelineConnections=DetectCoaddSourcesConnections):
105 """Configuration parameters for the DetectCoaddSourcesTask
106 """
107
108 doScaleVariance = Field(dtype=bool, default=True, doc="Scale variance plane using empirical noise?")
109 scaleVariance = ConfigurableField(target=ScaleVarianceTask, doc="Variance rescaling")
110 detection = ConfigurableField(target=DynamicDetectionTask, doc="Source detection")
111 coaddName = Field(dtype=str, default="deep", doc="Name of coadd")
112 doInsertFakes = Field(dtype=bool, default=False,
113 doc="Run fake sources injection task",
114 deprecated=("doInsertFakes is no longer supported. This config will be removed "
115 "after v24."))
116 insertFakes = ConfigurableField(target=BaseFakeSourcesTask,
117 doc="Injection of fake sources for testing "
118 "purposes (must be retargeted)",
119 deprecated=("insertFakes is no longer supported. This config will "
120 "be removed after v24."))
121 hasFakes = Field(
122 dtype=bool,
123 default=False,
124 doc="Should be set to True if fake sources have been inserted into the input data.",
125 )
126 idGenerator = SkyMapIdGeneratorConfig.make_field()
127
128 def setDefaults(self):
129 super().setDefaults()
130 self.detection.thresholdType = "pixel_stdev"
131 self.detection.isotropicGrow = True
132 # Coadds are made from background-subtracted CCDs, so any background subtraction should be very basic
133 self.detection.reEstimateBackground = False
134 self.detection.background.useApprox = False
135 self.detection.background.binSize = 4096
136 self.detection.background.undersampleStyle = 'REDUCE_INTERP_ORDER'
137 self.detection.doTempWideBackground = True # Suppress large footprints that overwhelm the deblender
138 # Include band in packed data IDs that go into object IDs (None -> "as
139 # many bands as are defined", rather than the default of zero).
140 self.idGenerator.packer.n_bands = None
141
142
143class DetectCoaddSourcesTask(PipelineTask):
144 """Detect sources on a single filter coadd.
145
146 Coadding individual visits requires each exposure to be warped. This
147 introduces covariance in the noise properties across pixels. Before
148 detection, we correct the coadd variance by scaling the variance plane in
149 the coadd to match the observed variance. This is an approximate
150 approach -- strictly, we should propagate the full covariance matrix --
151 but it is simple and works well in practice.
152
153 After scaling the variance plane, we detect sources and generate footprints
154 by delegating to the @ref SourceDetectionTask_ "detection" subtask.
155
156 DetectCoaddSourcesTask is meant to be run after assembling a coadded image
157 in a given band. The purpose of the task is to update the background,
158 detect all sources in a single band and generate a set of parent
159 footprints. Subsequent tasks in the multi-band processing procedure will
160 merge sources across bands and, eventually, perform forced photometry.
161
162 Parameters
163 ----------
164 schema : `lsst.afw.table.Schema`, optional
165 Initial schema for the output catalog, modified-in place to include all
166 fields set by this task. If None, the source minimal schema will be used.
167 **kwargs
168 Additional keyword arguments.
169 """
170
171 _DefaultName = "detectCoaddSources"
172 ConfigClass = DetectCoaddSourcesConfig
173
174 def __init__(self, schema=None, **kwargs):
175 # N.B. Super is used here to handle the multiple inheritance of PipelineTasks, the init tree
176 # call structure has been reviewed carefully to be sure super will work as intended.
177 super().__init__(**kwargs)
178 if schema is None:
179 schema = afwTable.SourceTable.makeMinimalSchema()
180 self.schema = schema
181 self.makeSubtask("detection", schema=self.schema)
182 if self.config.doScaleVariance:
183 self.makeSubtask("scaleVariance")
184
185 self.detectionSchema = afwTable.SourceCatalog(self.schema)
186
187 def runQuantum(self, butlerQC, inputRefs, outputRefs):
188 inputs = butlerQC.get(inputRefs)
189 idGenerator = self.config.idGenerator.apply(butlerQC.quantum.dataId)
190 inputs["idFactory"] = idGenerator.make_table_id_factory()
191 inputs["expId"] = idGenerator.catalog_id
192 outputs = self.run(**inputs)
193 butlerQC.put(outputs, outputRefs)
194
195 def run(self, exposure, idFactory, expId):
196 """Run detection on an exposure.
197
198 First scale the variance plane to match the observed variance
199 using ``ScaleVarianceTask``. Then invoke the ``SourceDetectionTask_`` "detection" subtask to
200 detect sources.
201
202 Parameters
203 ----------
204 exposure : `lsst.afw.image.Exposure`
205 Exposure on which to detect (may be backround-subtracted and scaled,
206 depending on configuration).
207 idFactory : `lsst.afw.table.IdFactory`
208 IdFactory to set source identifiers.
209 expId : `int`
210 Exposure identifier (integer) for RNG seed.
211
212 Returns
213 -------
214 result : `lsst.pipe.base.Struct`
215 Results as a struct with attributes:
216
217 ``sources``
218 Catalog of detections (`lsst.afw.table.SourceCatalog`).
219 ``backgrounds``
220 List of backgrounds (`list`).
221 """
222 if self.config.doScaleVariance:
223 varScale = self.scaleVariance.run(exposure.maskedImage)
224 exposure.getMetadata().add("VARIANCE_SCALE", varScale)
225 backgrounds = afwMath.BackgroundList()
226 table = afwTable.SourceTable.make(self.schema, idFactory)
227 detections = self.detection.run(table, exposure, expId=expId)
228 sources = detections.sources
229 if hasattr(detections, "background") and detections.background:
230 for bg in detections.background:
231 backgrounds.append(bg)
232 return Struct(outputSources=sources, outputBackgrounds=backgrounds, outputExposure=exposure)
233
234
235class MeasureMergedCoaddSourcesConnections(PipelineTaskConnections,
236 dimensions=("tract", "patch", "band", "skymap"),
237 defaultTemplates={"inputCoaddName": "deep",
238 "outputCoaddName": "deep",
239 "deblendedCatalog": "deblendedFlux"}):
240 inputSchema = cT.InitInput(
241 doc="Input schema for measure merged task produced by a deblender or detection task",
242 name="{inputCoaddName}Coadd_deblendedFlux_schema",
243 storageClass="SourceCatalog"
244 )
245 outputSchema = cT.InitOutput(
246 doc="Output schema after all new fields are added by task",
247 name="{inputCoaddName}Coadd_meas_schema",
248 storageClass="SourceCatalog"
249 )
250 refCat = cT.PrerequisiteInput(
251 doc="Reference catalog used to match measured sources against known sources",
252 name="ref_cat",
253 storageClass="SimpleCatalog",
254 dimensions=("skypix",),
255 deferLoad=True,
256 multiple=True
257 )
258 exposure = cT.Input(
259 doc="Input coadd image",
260 name="{inputCoaddName}Coadd_calexp",
261 storageClass="ExposureF",
262 dimensions=("tract", "patch", "band", "skymap")
263 )
264 skyMap = cT.Input(
265 doc="SkyMap to use in processing",
266 name=BaseSkyMap.SKYMAP_DATASET_TYPE_NAME,
267 storageClass="SkyMap",
268 dimensions=("skymap",),
269 )
270 visitCatalogs = cT.Input(
271 doc="Source catalogs for visits which overlap input tract, patch, band. Will be "
272 "further filtered in the task for the purpose of propagating flags from image calibration "
273 "and characterization to coadd objects. Only used in legacy PropagateVisitFlagsTask.",
274 name="src",
275 dimensions=("instrument", "visit", "detector"),
276 storageClass="SourceCatalog",
277 multiple=True
278 )
279 sourceTableHandles = cT.Input(
280 doc=("Source tables that are derived from the ``CalibrateTask`` sources. "
281 "These tables contain astrometry and photometry flags, and optionally "
282 "PSF flags."),
283 name="sourceTable_visit",
284 storageClass="DataFrame",
285 dimensions=("instrument", "visit"),
286 multiple=True,
287 deferLoad=True,
288 )
289 finalizedSourceTableHandles = cT.Input(
290 doc=("Finalized source tables from ``FinalizeCalibrationTask``. These "
291 "tables contain PSF flags from the finalized PSF estimation."),
292 name="finalized_src_table",
293 storageClass="DataFrame",
294 dimensions=("instrument", "visit"),
295 multiple=True,
296 deferLoad=True,
297 )
298 inputCatalog = cT.Input(
299 doc=("Name of the input catalog to use."
300 "If the single band deblender was used this should be 'deblendedFlux."
301 "If the multi-band deblender was used this should be 'deblendedModel, "
302 "or deblendedFlux if the multiband deblender was configured to output "
303 "deblended flux catalogs. If no deblending was performed this should "
304 "be 'mergeDet'"),
305 name="{inputCoaddName}Coadd_{deblendedCatalog}",
306 storageClass="SourceCatalog",
307 dimensions=("tract", "patch", "band", "skymap"),
308 )
309 scarletCatalog = cT.Input(
310 doc="Catalogs produced by multiband deblending",
311 name="{inputCoaddName}Coadd_deblendedCatalog",
312 storageClass="SourceCatalog",
313 dimensions=("tract", "patch", "skymap"),
314 )
315 scarletModels = cT.Input(
316 doc="Multiband scarlet models produced by the deblender",
317 name="{inputCoaddName}Coadd_scarletModelData",
318 storageClass="ScarletModelData",
319 dimensions=("tract", "patch", "skymap"),
320 )
321 outputSources = cT.Output(
322 doc="Source catalog containing all the measurement information generated in this task",
323 name="{outputCoaddName}Coadd_meas",
324 dimensions=("tract", "patch", "band", "skymap"),
325 storageClass="SourceCatalog",
326 )
327 matchResult = cT.Output(
328 doc="Match catalog produced by configured matcher, optional on doMatchSources",
329 name="{outputCoaddName}Coadd_measMatch",
330 dimensions=("tract", "patch", "band", "skymap"),
331 storageClass="Catalog",
332 )
333 denormMatches = cT.Output(
334 doc="Denormalized Match catalog produced by configured matcher, optional on "
335 "doWriteMatchesDenormalized",
336 name="{outputCoaddName}Coadd_measMatchFull",
337 dimensions=("tract", "patch", "band", "skymap"),
338 storageClass="Catalog",
339 )
340
341 def __init__(self, *, config=None):
342 super().__init__(config=config)
343 if config.doPropagateFlags is False:
344 self.inputs -= set(("visitCatalogs",))
345 self.inputs -= set(("sourceTableHandles",))
346 self.inputs -= set(("finalizedSourceTableHandles",))
347 elif config.propagateFlags.target == PropagateSourceFlagsTask:
348 # New PropagateSourceFlagsTask does not use visitCatalogs.
349 self.inputs -= set(("visitCatalogs",))
350 # Check for types of flags required.
351 if not config.propagateFlags.source_flags:
352 self.inputs -= set(("sourceTableHandles",))
353 if not config.propagateFlags.finalized_source_flags:
354 self.inputs -= set(("finalizedSourceTableHandles",))
355 else:
356 # Deprecated PropagateVisitFlagsTask uses visitCatalogs.
357 self.inputs -= set(("sourceTableHandles",))
358 self.inputs -= set(("finalizedSourceTableHandles",))
359
360 if config.inputCatalog == "deblendedCatalog":
361 self.inputs -= set(("inputCatalog",))
362
363 if not config.doAddFootprints:
364 self.inputs -= set(("scarletModels",))
365 else:
366 self.inputs -= set(("deblendedCatalog"))
367 self.inputs -= set(("scarletModels",))
368
369 if config.doMatchSources is False:
370 self.outputs -= set(("matchResult",))
371
372 if config.doWriteMatchesDenormalized is False:
373 self.outputs -= set(("denormMatches",))
374
375
376class MeasureMergedCoaddSourcesConfig(PipelineTaskConfig,
377 pipelineConnections=MeasureMergedCoaddSourcesConnections):
378 """Configuration parameters for the MeasureMergedCoaddSourcesTask
379 """
380 inputCatalog = ChoiceField(
381 dtype=str,
382 default="deblendedCatalog",
383 allowed={
384 "deblendedCatalog": "Output catalog from ScarletDeblendTask",
385 "deblendedFlux": "Output catalog from SourceDeblendTask",
386 "mergeDet": "The merged detections before deblending."
387 },
388 doc="The name of the input catalog.",
389 )
390 doAddFootprints = Field(dtype=bool,
391 default=True,
392 doc="Whether or not to add footprints to the input catalog from scarlet models. "
393 "This should be true whenever using the multi-band deblender, "
394 "otherwise this should be False.")
395 doConserveFlux = Field(dtype=bool, default=True,
396 doc="Whether to use the deblender models as templates to re-distribute the flux "
397 "from the 'exposure' (True), or to perform measurements on the deblender "
398 "model footprints.")
399 doStripFootprints = Field(dtype=bool, default=True,
400 doc="Whether to strip footprints from the output catalog before "
401 "saving to disk. "
402 "This is usually done when using scarlet models to save disk space.")
403 measurement = ConfigurableField(target=SingleFrameMeasurementTask, doc="Source measurement")
404 setPrimaryFlags = ConfigurableField(target=SetPrimaryFlagsTask, doc="Set flags for primary tract/patch")
405 doPropagateFlags = Field(
406 dtype=bool, default=True,
407 doc="Whether to match sources to CCD catalogs to propagate flags (to e.g. identify PSF stars)"
408 )
409 propagateFlags = ConfigurableField(target=PropagateSourceFlagsTask, doc="Propagate source flags to coadd")
410 doMatchSources = Field(dtype=bool, default=True, doc="Match sources to reference catalog?")
411 match = ConfigurableField(target=DirectMatchTask, doc="Matching to reference catalog")
412 doWriteMatchesDenormalized = Field(
413 dtype=bool,
414 default=False,
415 doc=("Write reference matches in denormalized format? "
416 "This format uses more disk space, but is more convenient to read."),
417 )
418 coaddName = Field(dtype=str, default="deep", doc="Name of coadd")
419 psfCache = Field(dtype=int, default=100, doc="Size of psfCache")
420 checkUnitsParseStrict = Field(
421 doc="Strictness of Astropy unit compatibility check, can be 'raise', 'warn' or 'silent'",
422 dtype=str,
423 default="raise",
424 )
425 doApCorr = Field(
426 dtype=bool,
427 default=True,
428 doc="Apply aperture corrections"
429 )
430 applyApCorr = ConfigurableField(
431 target=ApplyApCorrTask,
432 doc="Subtask to apply aperture corrections"
433 )
434 doRunCatalogCalculation = Field(
435 dtype=bool,
436 default=True,
437 doc='Run catalogCalculation task'
438 )
439 catalogCalculation = ConfigurableField(
440 target=CatalogCalculationTask,
441 doc="Subtask to run catalogCalculation plugins on catalog"
442 )
443
444 hasFakes = Field(
445 dtype=bool,
446 default=False,
447 doc="Should be set to True if fake sources have been inserted into the input data."
448 )
449 idGenerator = SkyMapIdGeneratorConfig.make_field()
450
451 @property
452 def refObjLoader(self):
453 return self.match.refObjLoader
454
455 def setDefaults(self):
456 super().setDefaults()
457 self.measurement.plugins.names |= ['base_InputCount',
458 'base_Variance',
459 'base_LocalPhotoCalib',
460 'base_LocalWcs']
461 self.measurement.plugins['base_PixelFlags'].masksFpAnywhere = ['CLIPPED', 'SENSOR_EDGE',
462 'INEXACT_PSF']
463 self.measurement.plugins['base_PixelFlags'].masksFpCenter = ['CLIPPED', 'SENSOR_EDGE',
464 'INEXACT_PSF']
465
466
467class MeasureMergedCoaddSourcesTask(PipelineTask):
468 """Deblend sources from main catalog in each coadd seperately and measure.
469
470 Use peaks and footprints from a master catalog to perform deblending and
471 measurement in each coadd.
472
473 Given a master input catalog of sources (peaks and footprints) or deblender
474 outputs(including a HeavyFootprint in each band), measure each source on
475 the coadd. Repeating this procedure with the same master catalog across
476 multiple coadds will generate a consistent set of child sources.
477
478 The deblender retains all peaks and deblends any missing peaks (dropouts in
479 that band) as PSFs. Source properties are measured and the @c is-primary
480 flag (indicating sources with no children) is set. Visit flags are
481 propagated to the coadd sources.
482
483 Optionally, we can match the coadd sources to an external reference
484 catalog.
485
486 After MeasureMergedCoaddSourcesTask has been run on multiple coadds, we
487 have a set of per-band catalogs. The next stage in the multi-band
488 processing procedure will merge these measurements into a suitable catalog
489 for driving forced photometry.
490
491 Parameters
492 ----------
493 butler : `lsst.daf.butler.Butler` or `None`, optional
494 A butler used to read the input schemas from disk or construct the reference
495 catalog loader, if schema or peakSchema or refObjLoader is None.
496 schema : ``lsst.afw.table.Schema`, optional
497 The schema of the merged detection catalog used as input to this one.
498 peakSchema : ``lsst.afw.table.Schema`, optional
499 The schema of the PeakRecords in the Footprints in the merged detection catalog.
500 refObjLoader : `lsst.meas.algorithms.ReferenceObjectLoader`, optional
501 An instance of LoadReferenceObjectsTasks that supplies an external reference
502 catalog. May be None if the loader can be constructed from the butler argument or all steps
503 requiring a reference catalog are disabled.
504 initInputs : `dict`, optional
505 Dictionary that can contain a key ``inputSchema`` containing the
506 input schema. If present will override the value of ``schema``.
507 **kwargs
508 Additional keyword arguments.
509 """
510
511 _DefaultName = "measureCoaddSources"
512 ConfigClass = MeasureMergedCoaddSourcesConfig
513
514 def __init__(self, butler=None, schema=None, peakSchema=None, refObjLoader=None, initInputs=None,
515 **kwargs):
516 super().__init__(**kwargs)
517 self.deblended = self.config.inputCatalog.startswith("deblended")
518 self.inputCatalog = "Coadd_" + self.config.inputCatalog
519 if initInputs is not None:
520 schema = initInputs['inputSchema'].schema
521 if schema is None:
522 assert butler is not None, "Neither butler nor schema is defined"
523 schema = butler.get(self.config.coaddName + self.inputCatalog + "_schema").schema
524 self.schemaMapper = afwTable.SchemaMapper(schema)
525 self.schemaMapper.addMinimalSchema(schema)
526 self.schema = self.schemaMapper.getOutputSchema()
527 self.algMetadata = PropertyList()
528 self.makeSubtask("measurement", schema=self.schema, algMetadata=self.algMetadata)
529 self.makeSubtask("setPrimaryFlags", schema=self.schema)
530 if self.config.doMatchSources:
531 self.makeSubtask("match", butler=butler, refObjLoader=refObjLoader)
532 if self.config.doPropagateFlags:
533 self.makeSubtask("propagateFlags", schema=self.schema)
534 self.schema.checkUnits(parse_strict=self.config.checkUnitsParseStrict)
535 if self.config.doApCorr:
536 self.makeSubtask("applyApCorr", schema=self.schema)
537 if self.config.doRunCatalogCalculation:
538 self.makeSubtask("catalogCalculation", schema=self.schema)
539
540 self.outputSchema = afwTable.SourceCatalog(self.schema)
541
542 def runQuantum(self, butlerQC, inputRefs, outputRefs):
543 inputs = butlerQC.get(inputRefs)
544
545 refObjLoader = ReferenceObjectLoader([ref.datasetRef.dataId for ref in inputRefs.refCat],
546 inputs.pop('refCat'),
547 name=self.config.connections.refCat,
548 config=self.config.refObjLoader,
549 log=self.log)
550 self.match.setRefObjLoader(refObjLoader)
551
552 # Set psfcache
553 # move this to run after gen2 deprecation
554 inputs['exposure'].getPsf().setCacheCapacity(self.config.psfCache)
555
556 # Get unique integer ID for IdFactory and RNG seeds; only the latter
557 # should really be used as the IDs all come from the input catalog.
558 idGenerator = self.config.idGenerator.apply(butlerQC.quantum.dataId)
559 inputs['exposureId'] = idGenerator.catalog_id
560
561 # Transform inputCatalog
562 table = afwTable.SourceTable.make(self.schema, idGenerator.make_table_id_factory())
563 sources = afwTable.SourceCatalog(table)
564 # Load the correct input catalog
565 if "scarletCatalog" in inputs:
566 inputCatalog = inputs.pop("scarletCatalog")
567 catalogRef = inputRefs.scarletCatalog
568 else:
569 inputCatalog = inputs.pop("inputCatalog")
570 catalogRef = inputRefs.inputCatalog
571 sources.extend(inputCatalog, self.schemaMapper)
572 del inputCatalog
573 # Add the HeavyFootprints to the deblended sources
574 if self.config.doAddFootprints:
575 modelData = inputs.pop('scarletModels')
576 if self.config.doConserveFlux:
577 redistributeImage = inputs['exposure'].image
578 else:
579 redistributeImage = None
580 modelData.updateCatalogFootprints(
581 catalog=sources,
582 band=inputRefs.exposure.dataId["band"],
583 psfModel=inputs['exposure'].getPsf(),
584 redistributeImage=redistributeImage,
585 removeScarletData=True,
586 )
587 table = sources.getTable()
588 table.setMetadata(self.algMetadata) # Capture algorithm metadata to write out to the source catalog.
589 inputs['sources'] = sources
590
591 skyMap = inputs.pop('skyMap')
592 tractNumber = catalogRef.dataId['tract']
593 tractInfo = skyMap[tractNumber]
594 patchInfo = tractInfo.getPatchInfo(catalogRef.dataId['patch'])
595 skyInfo = Struct(
596 skyMap=skyMap,
597 tractInfo=tractInfo,
598 patchInfo=patchInfo,
599 wcs=tractInfo.getWcs(),
600 bbox=patchInfo.getOuterBBox()
601 )
602 inputs['skyInfo'] = skyInfo
603
604 if self.config.doPropagateFlags:
605 if self.config.propagateFlags.target == PropagateSourceFlagsTask:
606 # New version
607 ccdInputs = inputs["exposure"].getInfo().getCoaddInputs().ccds
608 inputs["ccdInputs"] = ccdInputs
609
610 if "sourceTableHandles" in inputs:
611 sourceTableHandles = inputs.pop("sourceTableHandles")
612 sourceTableHandleDict = {handle.dataId["visit"]: handle
613 for handle in sourceTableHandles}
614 inputs["sourceTableHandleDict"] = sourceTableHandleDict
615 if "finalizedSourceTableHandles" in inputs:
616 finalizedSourceTableHandles = inputs.pop("finalizedSourceTableHandles")
617 finalizedSourceTableHandleDict = {handle.dataId["visit"]: handle
618 for handle in finalizedSourceTableHandles}
619 inputs["finalizedSourceTableHandleDict"] = finalizedSourceTableHandleDict
620 else:
621 # Deprecated legacy version
622 # Filter out any visit catalog that is not coadd inputs
623 ccdInputs = inputs['exposure'].getInfo().getCoaddInputs().ccds
624 visitKey = ccdInputs.schema.find("visit").key
625 ccdKey = ccdInputs.schema.find("ccd").key
626 inputVisitIds = set()
627 ccdRecordsWcs = {}
628 for ccdRecord in ccdInputs:
629 visit = ccdRecord.get(visitKey)
630 ccd = ccdRecord.get(ccdKey)
631 inputVisitIds.add((visit, ccd))
632 ccdRecordsWcs[(visit, ccd)] = ccdRecord.getWcs()
633
634 inputCatalogsToKeep = []
635 inputCatalogWcsUpdate = []
636 for i, dataRef in enumerate(inputRefs.visitCatalogs):
637 key = (dataRef.dataId['visit'], dataRef.dataId['detector'])
638 if key in inputVisitIds:
639 inputCatalogsToKeep.append(inputs['visitCatalogs'][i])
640 inputCatalogWcsUpdate.append(ccdRecordsWcs[key])
641 inputs['visitCatalogs'] = inputCatalogsToKeep
642 inputs['wcsUpdates'] = inputCatalogWcsUpdate
643 inputs['ccdInputs'] = ccdInputs
644
645 outputs = self.run(**inputs)
646 # Strip HeavyFootprints to save space on disk
647 sources = outputs.outputSources
648 butlerQC.put(outputs, outputRefs)
649
650 def run(self, exposure, sources, skyInfo, exposureId, ccdInputs=None, visitCatalogs=None, wcsUpdates=None,
651 butler=None, sourceTableHandleDict=None, finalizedSourceTableHandleDict=None):
652 """Run measurement algorithms on the input exposure, and optionally populate the
653 resulting catalog with extra information.
654
655 Parameters
656 ----------
657 exposure : `lsst.afw.exposure.Exposure`
658 The input exposure on which measurements are to be performed.
660 A catalog built from the results of merged detections, or
661 deblender outputs.
662 skyInfo : `lsst.pipe.base.Struct`
663 A struct containing information about the position of the input exposure within
664 a `SkyMap`, the `SkyMap`, its `Wcs`, and its bounding box.
665 exposureId : `int` or `bytes`
666 Packed unique number or bytes unique to the input exposure.
667 ccdInputs : `lsst.afw.table.ExposureCatalog`, optional
668 Catalog containing information on the individual visits which went into making
669 the coadd.
670 visitCatalogs : `list` of `lsst.afw.table.SourceCatalogs`, optional
671 A list of source catalogs corresponding to measurements made on the individual
672 visits which went into the input exposure. If None and butler is `None` then
673 the task cannot propagate visit flags to the output catalog.
674 Deprecated, to be removed with PropagateVisitFlagsTask.
675 wcsUpdates : `list` of `lsst.afw.geom.SkyWcs`, optional
676 If visitCatalogs is not `None` this should be a list of wcs objects which correspond
677 to the input visits. Used to put all coordinates to common system. If `None` and
678 butler is `None` then the task cannot propagate visit flags to the output catalog.
679 Deprecated, to be removed with PropagateVisitFlagsTask.
680 butler : `None`, optional
681 This was a Gen2 butler used to load visit catalogs.
682 No longer used and should not be set. Will be removed in the
683 future.
684 sourceTableHandleDict : `dict` [`int`, `lsst.daf.butler.DeferredDatasetHandle`], optional
685 Dict for sourceTable_visit handles (key is visit) for propagating flags.
686 These tables are derived from the ``CalibrateTask`` sources, and contain
687 astrometry and photometry flags, and optionally PSF flags.
688 finalizedSourceTableHandleDict : `dict` [`int`, `lsst.daf.butler.DeferredDatasetHandle`], optional
689 Dict for finalized_src_table handles (key is visit) for propagating flags.
690 These tables are derived from ``FinalizeCalibrationTask`` and contain
691 PSF flags from the finalized PSF estimation.
692
693 Returns
694 -------
695 results : `lsst.pipe.base.Struct`
696 Results of running measurement task. Will contain the catalog in the
697 sources attribute. Optionally will have results of matching to a
698 reference catalog in the matchResults attribute, and denormalized
699 matches in the denormMatches attribute.
700 """
701 if butler is not None:
702 warnings.warn("The 'butler' parameter is no longer used and can be safely removed.",
703 category=FutureWarning, stacklevel=2)
704 butler = None
705
706 self.measurement.run(sources, exposure, exposureId=exposureId)
707
708 if self.config.doApCorr:
709 self.applyApCorr.run(
710 catalog=sources,
711 apCorrMap=exposure.getInfo().getApCorrMap()
712 )
713
714 # TODO DM-11568: this contiguous check-and-copy could go away if we
715 # reserve enough space during SourceDetection and/or SourceDeblend.
716 # NOTE: sourceSelectors require contiguous catalogs, so ensure
717 # contiguity now, so views are preserved from here on.
718 if not sources.isContiguous():
719 sources = sources.copy(deep=True)
720
721 if self.config.doRunCatalogCalculation:
722 self.catalogCalculation.run(sources)
723
724 self.setPrimaryFlags.run(sources, skyMap=skyInfo.skyMap, tractInfo=skyInfo.tractInfo,
725 patchInfo=skyInfo.patchInfo)
726 if self.config.doPropagateFlags:
727 if self.config.propagateFlags.target == PropagateSourceFlagsTask:
728 # New version
729 self.propagateFlags.run(
730 sources,
731 ccdInputs,
732 sourceTableHandleDict,
733 finalizedSourceTableHandleDict
734 )
735 else:
736 # Legacy deprecated version
737 self.propagateFlags.run(
738 butler,
739 sources,
740 ccdInputs,
741 exposure.getWcs(),
742 visitCatalogs,
743 wcsUpdates
744 )
745
746 results = Struct()
747
748 if self.config.doMatchSources:
749 matchResult = self.match.run(sources, exposure.getInfo().getFilter().bandLabel)
750 matches = afwTable.packMatches(matchResult.matches)
751 matches.table.setMetadata(matchResult.matchMeta)
752 results.matchResult = matches
753 if self.config.doWriteMatchesDenormalized:
754 if matchResult.matches:
755 denormMatches = denormalizeMatches(matchResult.matches, matchResult.matchMeta)
756 else:
757 self.log.warning("No matches, so generating dummy denormalized matches file")
758 denormMatches = afwTable.BaseCatalog(afwTable.Schema())
759 denormMatches.setMetadata(PropertyList())
760 denormMatches.getMetadata().add("COMMENT",
761 "This catalog is empty because no matches were found.")
762 results.denormMatches = denormMatches
763 results.denormMatches = denormMatches
764
765 results.outputSources = sources
766 return results