Coverage for python/lsst/meas/base/forcedPhotCoadd.py: 29%
171 statements
« prev ^ index » next coverage.py v6.4.1, created at 2022-06-17 02:19 -0700
« prev ^ index » next coverage.py v6.4.1, created at 2022-06-17 02:19 -0700
1# This file is part of meas_base.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
22import lsst.pex.config
23import lsst.afw.table
25import lsst.coadd.utils as coaddUtils
26import lsst.pipe.base as pipeBase
27from lsst.obs.base import ExposureIdInfo
29from .references import MultiBandReferencesTask
30from .forcedMeasurement import ForcedMeasurementTask
31from .applyApCorr import ApplyApCorrTask
32from .catalogCalculation import CatalogCalculationTask
34__all__ = ("ForcedPhotCoaddConfig", "ForcedPhotCoaddTask")
37class ForcedPhotCoaddRunner(pipeBase.ButlerInitializedTaskRunner):
38 """Get the psfCache setting into ForcedPhotCoaddTask"""
39 @staticmethod
40 def getTargetList(parsedCmd, **kwargs):
41 return pipeBase.ButlerInitializedTaskRunner.getTargetList(parsedCmd,
42 psfCache=parsedCmd.psfCache)
45class ForcedPhotCoaddConnections(pipeBase.PipelineTaskConnections,
46 dimensions=("band", "skymap", "tract", "patch"),
47 defaultTemplates={"inputCoaddName": "deep",
48 "outputCoaddName": "deep"}):
49 inputSchema = pipeBase.connectionTypes.InitInput(
50 doc="Schema for the input measurement catalogs.",
51 name="{inputCoaddName}Coadd_ref_schema",
52 storageClass="SourceCatalog",
53 )
54 outputSchema = pipeBase.connectionTypes.InitOutput(
55 doc="Schema for the output forced measurement catalogs.",
56 name="{outputCoaddName}Coadd_forced_src_schema",
57 storageClass="SourceCatalog",
58 )
59 exposure = pipeBase.connectionTypes.Input(
60 doc="Input exposure to perform photometry on.",
61 name="{inputCoaddName}Coadd_calexp",
62 storageClass="ExposureF",
63 dimensions=["band", "skymap", "tract", "patch"],
64 )
65 refCat = pipeBase.connectionTypes.Input(
66 doc="Catalog of shapes and positions at which to force photometry.",
67 name="{inputCoaddName}Coadd_ref",
68 storageClass="SourceCatalog",
69 dimensions=["skymap", "tract", "patch"],
70 )
71 refCatInBand = pipeBase.connectionTypes.Input(
72 doc="Catalog of shapes and positions in the band having forced photometry done",
73 name="{inputCoaddName}Coadd_meas",
74 storageClass="SourceCatalog",
75 dimensions=("band", "skymap", "tract", "patch")
76 )
77 footprintCatInBand = pipeBase.connectionTypes.Input(
78 doc="Catalog of footprints to attach to sources",
79 name="{inputCoaddName}Coadd_deblendedFlux",
80 storageClass="SourceCatalog",
81 dimensions=("band", "skymap", "tract", "patch")
82 )
83 scarletModels = pipeBase.connectionTypes.Input(
84 doc="Multiband scarlet models produced by the deblender",
85 name="{inputCoaddName}Coadd_scarletModelData",
86 storageClass="ScarletModelData",
87 dimensions=("tract", "patch", "skymap"),
88 )
89 refWcs = pipeBase.connectionTypes.Input(
90 doc="Reference world coordinate system.",
91 name="{inputCoaddName}Coadd.wcs",
92 storageClass="Wcs",
93 dimensions=["band", "skymap", "tract", "patch"],
94 ) # used in place of a skymap wcs because of DM-28880
95 measCat = pipeBase.connectionTypes.Output(
96 doc="Output forced photometry catalog.",
97 name="{outputCoaddName}Coadd_forced_src",
98 storageClass="SourceCatalog",
99 dimensions=["band", "skymap", "tract", "patch"],
100 )
102 def __init__(self, *, config=None):
103 super().__init__(config=config)
104 if config.footprintDatasetName != "ScarletModelData":
105 self.inputs.remove("scarletModels")
106 if config.footprintDatasetName != "DeblendedFlux":
107 self.inputs.remove("footprintCatInBand")
108 print("forced_src inputs\n", self.inputs)
111class ForcedPhotCoaddConfig(pipeBase.PipelineTaskConfig,
112 pipelineConnections=ForcedPhotCoaddConnections):
113 references = lsst.pex.config.ConfigurableField(
114 target=MultiBandReferencesTask,
115 doc="subtask to retrieve reference source catalog"
116 )
117 measurement = lsst.pex.config.ConfigurableField(
118 target=ForcedMeasurementTask,
119 doc="subtask to do forced measurement"
120 )
121 coaddName = lsst.pex.config.Field(
122 doc="coadd name: typically one of deep or goodSeeing",
123 dtype=str,
124 default="deep",
125 )
126 doApCorr = lsst.pex.config.Field(
127 dtype=bool,
128 default=True,
129 doc="Run subtask to apply aperture corrections"
130 )
131 applyApCorr = lsst.pex.config.ConfigurableField(
132 target=ApplyApCorrTask,
133 doc="Subtask to apply aperture corrections"
134 )
135 catalogCalculation = lsst.pex.config.ConfigurableField(
136 target=CatalogCalculationTask,
137 doc="Subtask to run catalogCalculation plugins on catalog"
138 )
139 footprintDatasetName = lsst.pex.config.Field(
140 doc="Dataset (without coadd prefix) that should be used to obtain (Heavy)Footprints for sources. "
141 "Must have IDs that match those of the reference catalog."
142 "If None, Footprints will be generated by transforming the reference Footprints.",
143 dtype=str,
144 default="ScarletModelData",
145 optional=True
146 )
147 doConserveFlux = lsst.pex.config.Field(
148 dtype=bool,
149 default=True,
150 doc="Whether to use the deblender models as templates to re-distribute the flux "
151 "from the 'exposure' (True), or to perform measurements on the deblender model footprints. "
152 "If footprintDatasetName != 'ScarletModelData' then this field is ignored.")
153 doStripFootprints = lsst.pex.config.Field(
154 dtype=bool,
155 default=True,
156 doc="Whether to strip footprints from the output catalog before "
157 "saving to disk. "
158 "This is usually done when using scarlet models to save disk space.")
159 hasFakes = lsst.pex.config.Field(
160 dtype=bool,
161 default=False,
162 doc="Should be set to True if fake sources have been inserted into the input data."
163 )
165 def setDefaults(self):
166 # Docstring inherited.
167 # Make catalogCalculation a no-op by default as no modelFlux is setup by default in
168 # ForcedMeasurementTask
169 super().setDefaults()
171 self.catalogCalculation.plugins.names = []
172 self.measurement.copyColumns["id"] = "id"
173 self.measurement.copyColumns["parent"] = "parent"
174 self.references.removePatchOverlaps = False # see validate() for why
175 self.measurement.plugins.names |= ['base_InputCount', 'base_Variance']
176 self.measurement.plugins['base_PixelFlags'].masksFpAnywhere = ['CLIPPED', 'SENSOR_EDGE',
177 'REJECTED', 'INEXACT_PSF']
178 self.measurement.plugins['base_PixelFlags'].masksFpCenter = ['CLIPPED', 'SENSOR_EDGE',
179 'REJECTED', 'INEXACT_PSF']
181 def validate(self):
182 super().validate()
183 if (self.measurement.doReplaceWithNoise and self.footprintDatasetName is not None
184 and self.references.removePatchOverlaps):
185 raise ValueError("Cannot use removePatchOverlaps=True with deblended footprints, as parent "
186 "sources may be rejected while their children are not.")
189class ForcedPhotCoaddTask(pipeBase.PipelineTask, pipeBase.CmdLineTask):
190 """A command-line driver for performing forced measurement on coadd images.
192 Parameters
193 ----------
194 butler : `lsst.daf.persistence.butler.Butler`, optional
195 A Butler which will be passed to the references subtask to allow it to
196 load its schema from disk. Optional, but must be specified if
197 ``refSchema`` is not; if both are specified, ``refSchema`` takes
198 precedence.
199 refSchema : `lsst.afw.table.Schema`, optional
200 The schema of the reference catalog, passed to the constructor of the
201 references subtask. Optional, but must be specified if ``butler`` is
202 not; if both are specified, ``refSchema`` takes precedence.
203 **kwds
204 Keyword arguments are passed to the supertask constructor.
205 """
207 ConfigClass = ForcedPhotCoaddConfig
208 RunnerClass = ForcedPhotCoaddRunner
209 _DefaultName = "forcedPhotCoadd"
210 dataPrefix = "deepCoadd_"
212 def __init__(self, butler=None, refSchema=None, initInputs=None, **kwds):
213 super().__init__(**kwds)
215 if initInputs is not None:
216 refSchema = initInputs['inputSchema'].schema
218 self.makeSubtask("references", butler=butler, schema=refSchema)
219 if refSchema is None:
220 refSchema = self.references.schema
221 self.makeSubtask("measurement", refSchema=refSchema)
222 # It is necessary to get the schema internal to the forced measurement task until such a time
223 # that the schema is not owned by the measurement task, but is passed in by an external caller
224 if self.config.doApCorr:
225 self.makeSubtask("applyApCorr", schema=self.measurement.schema)
226 self.makeSubtask('catalogCalculation', schema=self.measurement.schema)
227 self.outputSchema = lsst.afw.table.SourceCatalog(self.measurement.schema)
229 def runQuantum(self, butlerQC, inputRefs, outputRefs):
230 inputs = butlerQC.get(inputRefs)
232 refCatInBand = inputs.pop('refCatInBand')
233 if self.config.footprintDatasetName == "ScarletModelData":
234 footprintData = inputs.pop("scarletModels")
235 elif self.config.footprintDatasetName == "DeblendedFlux":
236 footprintData = inputs.pop("footprintCatIndBand")
237 else:
238 footprintData = None
239 inputs['measCat'], inputs['exposureId'] = self.generateMeasCat(inputRefs.exposure.dataId,
240 inputs['exposure'],
241 inputs['refCat'],
242 refCatInBand,
243 inputs['refWcs'],
244 "tract_patch",
245 footprintData)
246 outputs = self.run(**inputs)
247 # Strip HeavyFootprints to save space on disk
248 if self.config.footprintDatasetName == "ScarletModelData" and self.config.doStripFootprints:
249 sources = outputs.measCat
250 for source in sources[sources["parent"] != 0]:
251 source.setFootprint(None)
252 butlerQC.put(outputs, outputRefs)
254 def generateMeasCat(self, exposureDataId, exposure, refCat, refCatInBand, refWcs, idPackerName,
255 footprintData):
256 """Generate a measurement catalog for Gen3.
258 Parameters
259 ----------
260 exposureDataId : `DataId`
261 Butler dataId for this exposure.
262 exposure : `lsst.afw.image.exposure.Exposure`
263 Exposure to generate the catalog for.
264 refCat : `lsst.afw.table.SourceCatalog`
265 Catalog of shapes and positions at which to force photometry.
266 refCatInBand : `lsst.afw.table.SourceCatalog`
267 Catalog of shapes and position in the band forced photometry is
268 currently being performed
269 refWcs : `lsst.afw.image.SkyWcs`
270 Reference world coordinate system.
271 idPackerName : `str`
272 Type of ID packer to construct from the registry.
273 footprintData : `ScarletDataModel` or `lsst.afw.table.SourceCatalog`
274 Either the scarlet data models or the deblended catalog
275 containing footprints.
276 If `footprintData` is `None` then the footprints contained
277 in `refCatInBand` are used.
279 Returns
280 -------
281 measCat : `lsst.afw.table.SourceCatalog`
282 Catalog of forced sources to measure.
283 expId : `int`
284 Unique binary id associated with the input exposure
286 Raises
287 ------
288 LookupError
289 Raised if a footprint with a given source id was in the reference
290 catalog but not in the reference catalog in band (meaning there
291 was some sort of mismatch in the two input catalogs)
292 """
293 exposureIdInfo = ExposureIdInfo.fromDataId(exposureDataId, idPackerName)
294 idFactory = exposureIdInfo.makeSourceIdFactory()
296 measCat = self.measurement.generateMeasCat(exposure, refCat, refWcs,
297 idFactory=idFactory)
298 # attach footprints here, as the attachFootprints method is geared for gen2
299 # and is not worth modifying, as this can naturally live inside this method
300 if self.config.footprintDatasetName == "ScarletModelData":
301 # Load the scarlet models
302 self._attachScarletFootprints(
303 catalog=measCat,
304 modelData=footprintData,
305 exposure=exposure,
306 band=exposureDataId["band"]
307 )
308 else:
309 if self.config.footprintDatasetName is None:
310 footprintCat = refCatInBand
311 else:
312 footprintCat = footprintData
313 for srcRecord in measCat:
314 fpRecord = footprintCat.find(srcRecord.getId())
315 if fpRecord is None:
316 raise LookupError("Cannot find Footprint for source {}; please check that {} "
317 "IDs are compatible with reference source IDs"
318 .format(srcRecord.getId(), footprintCat))
319 srcRecord.setFootprint(fpRecord.getFootprint())
320 return measCat, exposureIdInfo.expId
322 def runDataRef(self, dataRef, psfCache=None):
323 """Perform forced measurement on a single exposure.
325 Parameters
326 ----------
327 dataRef : `lsst.daf.persistence.ButlerDataRef`
328 Passed to the ``references`` subtask to obtain the reference WCS,
329 the ``getExposure`` method (implemented by derived classes) to
330 read the measurment image, and the ``fetchReferences`` method to
331 get the exposure and load the reference catalog (see
332 :lsst-task`lsst.meas.base.references.CoaddSrcReferencesTask`).
333 Refer to derived class documentation for details of the datasets
334 and data ID keys which are used.
335 psfCache : `int`, optional
336 Size of PSF cache, or `None`. The size of the PSF cache can have
337 a significant effect upon the runtime for complicated PSF models.
339 Notes
340 -----
341 Sources are generated with ``generateMeasCat`` in the ``measurement``
342 subtask. These are passed to ``measurement``'s ``run`` method, which
343 fills the source catalog with the forced measurement results. The
344 sources are then passed to the ``writeOutputs`` method (implemented by
345 derived classes) which writes the outputs.
346 """
347 refWcs = self.references.getWcs(dataRef)
348 exposure = self.getExposure(dataRef)
349 if psfCache is not None:
350 exposure.getPsf().setCacheCapacity(psfCache)
351 refCat = self.fetchReferences(dataRef, exposure)
353 exposureId = coaddUtils.getGen3CoaddExposureId(dataRef, coaddName=self.config.coaddName,
354 includeBand=False, log=self.log)
355 measCat = self.measurement.generateMeasCat(
356 exposure, refCat, refWcs, idFactory=self.makeIdFactory(dataRef, exposureId=exposureId))
357 self.log.info("Performing forced measurement on %s", dataRef.dataId)
358 self.attachFootprints(measCat, refCat, exposure, refWcs, dataRef)
360 forcedPhotResult = self.run(measCat, exposure, refCat, refWcs, exposureId=exposureId)
362 self.writeOutput(dataRef, forcedPhotResult.measCat)
364 def run(self, measCat, exposure, refCat, refWcs, exposureId=None):
365 """Perform forced measurement on a single exposure.
367 Parameters
368 ----------
369 measCat : `lsst.afw.table.SourceCatalog`
370 The measurement catalog, based on the sources listed in the
371 reference catalog.
372 exposure : `lsst.afw.image.Exposure`
373 The measurement image upon which to perform forced detection.
374 refCat : `lsst.afw.table.SourceCatalog`
375 The reference catalog of sources to measure.
376 refWcs : `lsst.afw.image.SkyWcs`
377 The WCS for the references.
378 exposureId : `int`
379 Optional unique exposureId used for random seed in measurement
380 task.
382 Returns
383 -------
384 result : ~`lsst.pipe.base.Struct`
385 Structure with fields:
387 ``measCat``
388 Catalog of forced measurement results
389 (`lsst.afw.table.SourceCatalog`).
390 """
391 self.measurement.run(measCat, exposure, refCat, refWcs, exposureId=exposureId)
392 if self.config.doApCorr:
393 self.applyApCorr.run(
394 catalog=measCat,
395 apCorrMap=exposure.getInfo().getApCorrMap()
396 )
397 self.catalogCalculation.run(measCat)
399 return pipeBase.Struct(measCat=measCat)
401 def makeIdFactory(self, dataRef, exposureId):
402 """Create an object that generates globally unique source IDs.
404 Source IDs are created based on a per-CCD ID and the ID of the CCD
405 itself.
407 Parameters
408 ----------
409 dataRef : `lsst.daf.persistence.ButlerDataRef`
410 Butler data reference. The "CoaddId_bits" and "CoaddId" datasets
411 are accessed. The data ID must have tract and patch keys.
412 """
413 # With the default configuration, this IdFactory doesn't do anything,
414 # because the IDs it generates are immediately overwritten by the ID
415 # from the reference catalog (since that's in
416 # config.measurement.copyColumns). But we create one here anyway, to
417 # allow us to revert back to the old behavior of generating new forced
418 # source IDs, just by renaming the ID in config.copyColumns to
419 # "object_id".
420 exposureIdInfo = ExposureIdInfo(exposureId, dataRef.get(self.config.coaddName + "CoaddId_bits"))
421 return exposureIdInfo.makeSourceIdFactory()
423 def fetchReferences(self, dataRef, exposure):
424 """Return an iterable of reference sources which overlap the exposure.
426 Parameters
427 ----------
428 dataRef : `lsst.daf.persistence.ButlerDataRef`
429 Butler data reference corresponding to the image to be measured;
430 should have tract, patch, and filter keys.
432 exposure : `lsst.afw.image.Exposure`
433 Unused.
435 Notes
436 -----
437 All work is delegated to the references subtask; see
438 `CoaddSrcReferencesTask` for information about the default behavior.
439 """
440 skyMap = dataRef.get(self.dataPrefix + "skyMap", immediate=True)
441 tractInfo = skyMap[dataRef.dataId["tract"]]
442 patch = tuple(int(v) for v in dataRef.dataId["patch"].split(","))
443 patchInfo = tractInfo.getPatchInfo(patch)
444 references = lsst.afw.table.SourceCatalog(self.references.schema)
445 references.extend(self.references.fetchInPatches(dataRef, patchList=[patchInfo]))
446 return references
448 def attachFootprints(self, sources, refCat, exposure, refWcs, dataRef):
449 r"""Attach Footprints to source records.
451 For coadd forced photometry, we use the deblended "heavy"
452 `~lsst.afw.detection.Footprint`\ s from the single-band measurements
453 of the same band - because we've guaranteed that the peaks (and hence
454 child sources) will be consistent across all bands before we get to
455 measurement, this should yield reasonable deblending for most sources.
456 It's most likely limitation is that it will not provide good flux
457 upper limits for sources that were not detected in this band but were
458 blended with sources that were.
459 """
460 if self.config.footprintDatasetName is None:
461 return self.measurement.attachTransformedFootprints(sources, refCat, exposure, refWcs)
463 self.log.info("Loading deblended footprints for sources from %s, %s",
464 self.config.footprintDatasetName, dataRef.dataId)
466 if self.config.footprintDatasetName == "ScarletModelData":
467 # Load the scarlet models
468 dataModel = dataRef.get("%sCoadd_%s" % (self.config.coaddName, self.config.footprintDatasetName),
469 immediate=True)
470 self._attachScarletFootprints(refCat, dataModel, exposure, dataRef.dataId["band"])
471 else:
472 fpCat = dataRef.get("%sCoadd_%s" % (self.config.coaddName, self.config.footprintDatasetName),
473 immediate=True)
474 for refRecord, srcRecord in zip(refCat, sources):
475 fpRecord = fpCat.find(refRecord.getId())
476 if fpRecord is None:
477 raise LookupError("Cannot find Footprint for source %s; please check that %sCoadd_%s "
478 "IDs are compatible with reference source IDs" %
479 (srcRecord.getId(), self.config.coaddName,
480 self.config.footprintDatasetName))
481 srcRecord.setFootprint(fpRecord.getFootprint())
483 def _attachScarletFootprints(self, catalog, modelData, exposure, band):
484 """Attach scarlet models as HeavyFootprints
485 """
486 if self.config.doConserveFlux:
487 redistributeImage = exposure.image
488 else:
489 redistributeImage = None
490 # Attach the footprints
491 modelData.updateCatalogFootprints(
492 catalog=catalog,
493 band=band,
494 psfModel=exposure.getPsf(),
495 redistributeImage=redistributeImage,
496 removeScarletData=True,
497 updateFluxColumns=False,
498 )
500 def getExposure(self, dataRef):
501 """Read input exposure on which measurement will be performed.
503 Parameters
504 ----------
505 dataRef : `lsst.daf.persistence.ButlerDataRef`
506 Butler data reference.
507 """
508 if self.config.hasFakes:
509 name = "fakes_" + self.config.coaddName + "Coadd_calexp"
510 else:
511 name = self.config.coaddName + "Coadd_calexp"
513 return dataRef.get(name) if dataRef.datasetExists(name) else None
515 def writeOutput(self, dataRef, sources):
516 """Write forced source table
518 Parameters
519 ----------
520 dataRef : `lsst.daf.persistence.ButlerDataRef`
521 Butler data reference. The forced_src dataset (with
522 self.dataPrefix prepended) is all that will be modified.
523 sources : `lsst.afw.table.SourceCatalog`
524 Catalog of sources to save.
525 """
526 dataRef.put(sources, self.dataPrefix + "forced_src", flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS)
528 def getSchemaCatalogs(self):
529 """The schema catalogs that will be used by this task.
531 Returns
532 -------
533 schemaCatalogs : `dict`
534 Dictionary mapping dataset type to schema catalog.
536 Notes
537 -----
538 There is only one schema for each type of forced measurement. The
539 dataset type for this measurement is defined in the mapper.
540 """
541 catalog = lsst.afw.table.SourceCatalog(self.measurement.schema)
542 catalog.getTable().setMetadata(self.measurement.algMetadata)
543 datasetType = self.dataPrefix + "forced_src"
544 return {datasetType: catalog}
546 def _getConfigName(self):
547 # Documented in superclass
548 return self.dataPrefix + "forced_config"
550 def _getMetadataName(self):
551 # Documented in superclass
552 return self.dataPrefix + "forced_metadata"
554 @classmethod
555 def _makeArgumentParser(cls):
556 parser = pipeBase.ArgumentParser(name=cls._DefaultName)
557 parser.add_id_argument("--id", "deepCoadd_forced_src", help="data ID, with raw CCD keys + tract",
558 ContainerClass=coaddUtils.CoaddDataIdContainer)
559 parser.add_argument("--psfCache", type=int, default=100, help="Size of CoaddPsf cache")
560 return parser