Coverage for python/lsst/meas/base/tests.py: 15%
397 statements
« prev ^ index » next coverage.py v6.4.4, created at 2022-08-19 12:31 -0700
« prev ^ index » next coverage.py v6.4.4, created at 2022-08-19 12:31 -0700
1# This file is part of meas_base.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
22import numpy as np
24import lsst.geom
25import lsst.afw.table
26import lsst.afw.image
27import lsst.afw.detection
28import lsst.afw.geom
29import lsst.pex.exceptions
31from .sfm import SingleFrameMeasurementTask
32from .forcedMeasurement import ForcedMeasurementTask
33from . import CentroidResultKey
35__all__ = ("BlendContext", "TestDataset", "AlgorithmTestCase", "TransformTestCase",
36 "SingleFramePluginTransformSetupHelper", "ForcedPluginTransformSetupHelper",
37 "FluxTransformTestCase", "CentroidTransformTestCase")
40class BlendContext:
41 """Context manager which adds multiple overlapping sources and a parent.
43 Notes
44 -----
45 This is used as the return value for `TestDataset.addBlend`, and this is
46 the only way it should be used.
47 """
49 def __init__(self, owner):
50 self.owner = owner
51 self.parentRecord = self.owner.catalog.addNew()
52 self.parentImage = lsst.afw.image.ImageF(self.owner.exposure.getBBox())
53 self.children = []
55 def __enter__(self):
56 # BlendContext is its own context manager, so we just return self.
57 return self
59 def addChild(self, instFlux, centroid, shape=None):
60 """Add a child to the blend; return corresponding truth catalog record.
62 instFlux : `float`
63 Total instFlux of the source to be added.
64 centroid : `lsst.geom.Point2D`
65 Position of the source to be added.
66 shape : `lsst.afw.geom.Quadrupole`
67 Second moments of the source before PSF convolution. Note that
68 the truth catalog records post-convolution moments)
69 """
70 record, image = self.owner.addSource(instFlux, centroid, shape)
71 record.set(self.owner.keys["parent"], self.parentRecord.getId())
72 self.parentImage += image
73 self.children.append((record, image))
74 return record
76 def __exit__(self, type_, value, tb):
77 # We're not using the context manager for any kind of exception safety
78 # or guarantees; we just want the nice "with" statement syntax.
80 if type_ is not None:
81 # exception was raised; just skip all this and let it propagate
82 return
84 # On exit, compute and set the truth values for the parent object.
85 self.parentRecord.set(self.owner.keys["nChild"], len(self.children))
86 # Compute instFlux from sum of component fluxes
87 instFlux = 0.0
88 for record, image in self.children:
89 instFlux += record.get(self.owner.keys["instFlux"])
90 self.parentRecord.set(self.owner.keys["instFlux"], instFlux)
91 # Compute centroid from instFlux-weighted mean of component centroids
92 x = 0.0
93 y = 0.0
94 for record, image in self.children:
95 w = record.get(self.owner.keys["instFlux"])/instFlux
96 x += record.get(self.owner.keys["centroid"].getX())*w
97 y += record.get(self.owner.keys["centroid"].getY())*w
98 self.parentRecord.set(self.owner.keys["centroid"], lsst.geom.Point2D(x, y))
99 # Compute shape from instFlux-weighted mean of offset component shapes
100 xx = 0.0
101 yy = 0.0
102 xy = 0.0
103 for record, image in self.children:
104 w = record.get(self.owner.keys["instFlux"])/instFlux
105 dx = record.get(self.owner.keys["centroid"].getX()) - x
106 dy = record.get(self.owner.keys["centroid"].getY()) - y
107 xx += (record.get(self.owner.keys["shape"].getIxx()) + dx**2)*w
108 yy += (record.get(self.owner.keys["shape"].getIyy()) + dy**2)*w
109 xy += (record.get(self.owner.keys["shape"].getIxy()) + dx*dy)*w
110 self.parentRecord.set(self.owner.keys["shape"], lsst.afw.geom.Quadrupole(xx, yy, xy))
111 # Run detection on the parent image to get the parent Footprint.
112 self.owner._installFootprint(self.parentRecord, self.parentImage)
113 # Create perfect HeavyFootprints for all children; these will need to
114 # be modified later to account for the noise we'll add to the image.
115 deblend = lsst.afw.image.MaskedImageF(self.owner.exposure.getMaskedImage(), True)
116 for record, image in self.children:
117 deblend.getImage().getArray()[:, :] = image.getArray()
118 heavyFootprint = lsst.afw.detection.HeavyFootprintF(self.parentRecord.getFootprint(), deblend)
119 record.setFootprint(heavyFootprint)
122class TestDataset:
123 """A simulated dataset consisuting of test image and truth catalog.
125 TestDataset creates an idealized image made of pure Gaussians (including a
126 Gaussian PSF), with simple noise and idealized Footprints/HeavyFootprints
127 that simulated the outputs of detection and deblending. Multiple noise
128 realizations can be created from the same underlying sources, allowing
129 uncertainty estimates to be verified via Monte Carlo.
131 Parameters
132 ----------
133 bbox : `lsst.geom.Box2I` or `lsst.geom.Box2D`
134 Bounding box of the test image.
135 threshold : `float`
136 Threshold absolute value used to determine footprints for
137 simulated sources. This thresholding will be applied before noise is
138 actually added to images (or before the noise level is even known), so
139 this will necessarily produce somewhat artificial footprints.
140 exposure : `lsst.afw.image.ExposureF`
141 The image to which test sources should be added. Ownership should
142 be considered transferred from the caller to the TestDataset.
143 Must have a Gaussian PSF for truth catalog shapes to be exact.
144 **kwds
145 Keyword arguments forwarded to makeEmptyExposure if exposure is `None`.
147 Notes
148 -----
149 Typical usage:
151 .. code-block: py
153 bbox = lsst.geom.Box2I(lsst.geom.Point2I(0,0), lsst.geom.Point2I(100,
154 100))
155 dataset = TestDataset(bbox)
156 dataset.addSource(instFlux=1E5, centroid=lsst.geom.Point2D(25, 26))
157 dataset.addSource(instFlux=2E5, centroid=lsst.geom.Point2D(75, 24),
158 shape=lsst.afw.geom.Quadrupole(8, 7, 2))
159 with dataset.addBlend() as family:
160 family.addChild(instFlux=2E5, centroid=lsst.geom.Point2D(50, 72))
161 family.addChild(instFlux=1.5E5, centroid=lsst.geom.Point2D(51, 74))
162 exposure, catalog = dataset.realize(noise=100.0,
163 schema=TestDataset.makeMinimalSchema())
164 """
166 @classmethod
167 def makeMinimalSchema(cls):
168 """Return the minimal schema needed to hold truth catalog fields.
170 Notes
171 -----
172 When `TestDataset.realize` is called, the schema must include at least
173 these fields. Usually it will include additional fields for
174 measurement algorithm outputs, allowing the same catalog to be used
175 for both truth values (the fields from the minimal schema) and the
176 measurements.
177 """
178 if not hasattr(cls, "_schema"):
179 schema = lsst.afw.table.SourceTable.makeMinimalSchema()
180 cls.keys = {}
181 cls.keys["parent"] = schema.find("parent").key
182 cls.keys["nChild"] = schema.addField("deblend_nChild", type=np.int32)
183 cls.keys["instFlux"] = schema.addField("truth_instFlux", type=np.float64,
184 doc="true instFlux", units="count")
185 cls.keys["centroid"] = lsst.afw.table.Point2DKey.addFields(
186 schema, "truth", "true simulated centroid", "pixel"
187 )
188 cls.keys["centroid_sigma"] = lsst.afw.table.CovarianceMatrix2fKey.addFields(
189 schema, "truth", ['x', 'y'], "pixel"
190 )
191 cls.keys["centroid_flag"] = schema.addField("truth_flag", type="Flag",
192 doc="set if the object is a star")
193 cls.keys["shape"] = lsst.afw.table.QuadrupoleKey.addFields(
194 schema, "truth", "true shape after PSF convolution", lsst.afw.table.CoordinateType.PIXEL
195 )
196 cls.keys["isStar"] = schema.addField("truth_isStar", type="Flag",
197 doc="set if the object is a star")
198 schema.getAliasMap().set("slot_Shape", "truth")
199 schema.getAliasMap().set("slot_Centroid", "truth")
200 schema.getAliasMap().set("slot_ModelFlux", "truth")
201 cls._schema = schema
202 schema = lsst.afw.table.Schema(cls._schema)
203 schema.disconnectAliases()
204 return schema
206 @staticmethod
207 def makePerturbedWcs(oldWcs, minScaleFactor=1.2, maxScaleFactor=1.5,
208 minRotation=None, maxRotation=None,
209 minRefShift=None, maxRefShift=None,
210 minPixShift=2.0, maxPixShift=4.0, randomSeed=1):
211 """Return a perturbed version of the input WCS.
213 Create a new undistorted TAN WCS that is similar but not identical to
214 another, with random scaling, rotation, and offset (in both pixel
215 position and reference position).
217 Parameters
218 ----------
219 oldWcs : `lsst.afw.geom.SkyWcs`
220 The input WCS.
221 minScaleFactor : `float`
222 Minimum scale factor to apply to the input WCS.
223 maxScaleFactor : `float`
224 Maximum scale factor to apply to the input WCS.
225 minRotation : `lsst.geom.Angle` or `None`
226 Minimum rotation to apply to the input WCS. If `None`, defaults to
227 30 degrees.
228 maxRotation : `lsst.geom.Angle` or `None`
229 Minimum rotation to apply to the input WCS. If `None`, defaults to
230 60 degrees.
231 minRefShift : `lsst.geom.Angle` or `None`
232 Miniumum shift to apply to the input WCS reference value. If
233 `None`, defaults to 0.5 arcsec.
234 maxRefShift : `lsst.geom.Angle` or `None`
235 Miniumum shift to apply to the input WCS reference value. If
236 `None`, defaults to 1.0 arcsec.
237 minPixShift : `float`
238 Minimum shift to apply to the input WCS reference pixel.
239 maxPixShift : `float`
240 Maximum shift to apply to the input WCS reference pixel.
241 randomSeed : `int`
242 Random seed.
244 Returns
245 -------
246 newWcs : `lsst.afw.geom.SkyWcs`
247 A perturbed version of the input WCS.
249 Notes
250 -----
251 The maximum and minimum arguments are interpreted as absolute values
252 for a split range that covers both positive and negative values (as
253 this method is used in testing, it is typically most important to
254 avoid perturbations near zero). Scale factors are treated somewhat
255 differently: the actual scale factor is chosen between
256 ``minScaleFactor`` and ``maxScaleFactor`` OR (``1/maxScaleFactor``)
257 and (``1/minScaleFactor``).
259 The default range for rotation is 30-60 degrees, and the default range
260 for reference shift is 0.5-1.0 arcseconds (these cannot be safely
261 included directly as default values because Angle objects are
262 mutable).
264 The random number generator is primed with the seed given. If
265 `None`, a seed is automatically chosen.
266 """
267 random_state = np.random.RandomState(randomSeed)
268 if minRotation is None:
269 minRotation = 30.0*lsst.geom.degrees
270 if maxRotation is None:
271 maxRotation = 60.0*lsst.geom.degrees
272 if minRefShift is None:
273 minRefShift = 0.5*lsst.geom.arcseconds
274 if maxRefShift is None:
275 maxRefShift = 1.0*lsst.geom.arcseconds
277 def splitRandom(min1, max1, min2=None, max2=None):
278 if min2 is None:
279 min2 = -max1
280 if max2 is None:
281 max2 = -min1
282 if random_state.uniform() > 0.5:
283 return float(random_state.uniform(min1, max1))
284 else:
285 return float(random_state.uniform(min2, max2))
286 # Generate random perturbations
287 scaleFactor = splitRandom(minScaleFactor, maxScaleFactor, 1.0/maxScaleFactor, 1.0/minScaleFactor)
288 rotation = splitRandom(minRotation.asRadians(), maxRotation.asRadians())*lsst.geom.radians
289 refShiftRa = splitRandom(minRefShift.asRadians(), maxRefShift.asRadians())*lsst.geom.radians
290 refShiftDec = splitRandom(minRefShift.asRadians(), maxRefShift.asRadians())*lsst.geom.radians
291 pixShiftX = splitRandom(minPixShift, maxPixShift)
292 pixShiftY = splitRandom(minPixShift, maxPixShift)
293 # Compute new CD matrix
294 oldTransform = lsst.geom.LinearTransform(oldWcs.getCdMatrix())
295 rTransform = lsst.geom.LinearTransform.makeRotation(rotation)
296 sTransform = lsst.geom.LinearTransform.makeScaling(scaleFactor)
297 newTransform = oldTransform*rTransform*sTransform
298 matrix = newTransform.getMatrix()
299 # Compute new coordinate reference pixel (CRVAL)
300 oldSkyOrigin = oldWcs.getSkyOrigin()
301 newSkyOrigin = lsst.geom.SpherePoint(oldSkyOrigin.getRa() + refShiftRa,
302 oldSkyOrigin.getDec() + refShiftDec)
303 # Compute new pixel reference pixel (CRPIX)
304 oldPixOrigin = oldWcs.getPixelOrigin()
305 newPixOrigin = lsst.geom.Point2D(oldPixOrigin.getX() + pixShiftX,
306 oldPixOrigin.getY() + pixShiftY)
307 return lsst.afw.geom.makeSkyWcs(crpix=newPixOrigin, crval=newSkyOrigin, cdMatrix=matrix)
309 @staticmethod
310 def makeEmptyExposure(bbox, wcs=None, crval=None, cdelt=None, psfSigma=2.0, psfDim=17, calibration=4):
311 """Create an Exposure, with a PhotoCalib, Wcs, and Psf, but no pixel values.
313 Parameters
314 ----------
315 bbox : `lsst.geom.Box2I` or `lsst.geom.Box2D`
316 Bounding box of the image in image coordinates.
317 wcs : `lsst.afw.geom.SkyWcs`, optional
318 New WCS for the exposure (created from CRVAL and CDELT if `None`).
319 crval : `lsst.afw.geom.SpherePoint`, optional
320 ICRS center of the TAN WCS attached to the image. If `None`, (45
321 degrees, 45 degrees) is assumed.
322 cdelt : `lsst.geom.Angle`, optional
323 Pixel scale of the image. If `None`, 0.2 arcsec is assumed.
324 psfSigma : `float`, optional
325 Radius (sigma) of the Gaussian PSF attached to the image
326 psfDim : `int`, optional
327 Width and height of the image's Gaussian PSF attached to the image
328 calibration : `float`, optional
329 The spatially-constant calibration (in nJy/count) to set the
330 PhotoCalib of the exposure.
332 Returns
333 -------
334 exposure : `lsst.age.image.ExposureF`
335 An empty image.
336 """
337 if wcs is None:
338 if crval is None:
339 crval = lsst.geom.SpherePoint(45.0, 45.0, lsst.geom.degrees)
340 if cdelt is None:
341 cdelt = 0.2*lsst.geom.arcseconds
342 crpix = lsst.geom.Box2D(bbox).getCenter()
343 wcs = lsst.afw.geom.makeSkyWcs(crpix=crpix, crval=crval,
344 cdMatrix=lsst.afw.geom.makeCdMatrix(scale=cdelt))
345 exposure = lsst.afw.image.ExposureF(bbox)
346 psf = lsst.afw.detection.GaussianPsf(psfDim, psfDim, psfSigma)
347 photoCalib = lsst.afw.image.PhotoCalib(calibration)
348 exposure.setWcs(wcs)
349 exposure.setPsf(psf)
350 exposure.setPhotoCalib(photoCalib)
351 return exposure
353 @staticmethod
354 def drawGaussian(bbox, instFlux, ellipse):
355 """Create an image of an elliptical Gaussian.
357 Parameters
358 ----------
359 bbox : `lsst.geom.Box2I` or `lsst.geom.Box2D`
360 Bounding box of image to create.
361 instFlux : `float`
362 Total instrumental flux of the Gaussian (normalized analytically,
363 not using pixel values).
364 ellipse : `lsst.afw.geom.Ellipse`
365 Defines the centroid and shape.
367 Returns
368 -------
369 image : `lsst.afw.image.ImageF`
370 An image of the Gaussian.
371 """
372 x, y = np.meshgrid(np.arange(bbox.getBeginX(), bbox.getEndX()),
373 np.arange(bbox.getBeginY(), bbox.getEndY()))
374 t = ellipse.getGridTransform()
375 xt = t[t.XX] * x + t[t.XY] * y + t[t.X]
376 yt = t[t.YX] * x + t[t.YY] * y + t[t.Y]
377 image = lsst.afw.image.ImageF(bbox)
378 image.getArray()[:, :] = np.exp(-0.5*(xt**2 + yt**2))*instFlux/(2.0*ellipse.getCore().getArea())
379 return image
381 def __init__(self, bbox, threshold=10.0, exposure=None, **kwds):
382 if exposure is None:
383 exposure = self.makeEmptyExposure(bbox, **kwds)
384 self.threshold = lsst.afw.detection.Threshold(threshold, lsst.afw.detection.Threshold.VALUE)
385 self.exposure = exposure
386 self.psfShape = self.exposure.getPsf().computeShape(bbox.getCenter())
387 self.schema = self.makeMinimalSchema()
388 self.catalog = lsst.afw.table.SourceCatalog(self.schema)
390 def _installFootprint(self, record, image, setPeakSignificance=True):
391 """Create simulated Footprint and add it to a truth catalog record.
392 """
393 schema = lsst.afw.detection.PeakTable.makeMinimalSchema()
394 if setPeakSignificance:
395 schema.addField("significance", type=float,
396 doc="Ratio of peak value to configured standard deviation.")
397 # Run detection on the single-source image
398 fpSet = lsst.afw.detection.FootprintSet(image, self.threshold, peakSchema=schema)
399 # the call below to the FootprintSet ctor is actually a grow operation
400 fpSet = lsst.afw.detection.FootprintSet(fpSet, int(self.psfShape.getDeterminantRadius() + 1.0), True)
401 if setPeakSignificance:
402 # This isn't a traditional significance, since we're using the VALUE
403 # threshold type, but it's the best we can do in that case.
404 for footprint in fpSet.getFootprints():
405 footprint.updatePeakSignificance(self.threshold.getValue())
406 # Update the full exposure's mask plane to indicate the detection
407 fpSet.setMask(self.exposure.getMaskedImage().getMask(), "DETECTED")
408 # Attach the new footprint to the exposure
409 if len(fpSet.getFootprints()) > 1:
410 raise RuntimeError("Threshold value results in multiple Footprints for a single object")
411 if len(fpSet.getFootprints()) == 0:
412 raise RuntimeError("Threshold value results in zero Footprints for object")
413 record.setFootprint(fpSet.getFootprints()[0])
415 def addSource(self, instFlux, centroid, shape=None, setPeakSignificance=True):
416 """Add a source to the simulation.
418 Parameters
419 ----------
420 instFlux : `float`
421 Total instFlux of the source to be added.
422 centroid : `lsst.geom.Point2D`
423 Position of the source to be added.
424 shape : `lsst.afw.geom.Quadrupole`
425 Second moments of the source before PSF convolution. Note that the
426 truth catalog records post-convolution moments. If `None`, a point
427 source will be added.
428 setPeakSignificance : `bool`
429 Set the ``significance`` field for peaks in the footprints?
430 See ``lsst.meas.algorithms.SourceDetectionTask.setPeakSignificance``
431 for how this field is computed for real datasets.
433 Returns
434 -------
435 record : `lsst.afw.table.SourceRecord`
436 A truth catalog record.
437 image : `lsst.afw.image.ImageF`
438 Single-source image corresponding to the new source.
439 """
440 # Create and set the truth catalog fields
441 record = self.catalog.addNew()
442 record.set(self.keys["instFlux"], instFlux)
443 record.set(self.keys["centroid"], centroid)
444 covariance = np.random.normal(0, 0.1, 4).reshape(2, 2)
445 covariance[0, 1] = covariance[1, 0] # CovarianceMatrixKey assumes symmetric x_y_Cov
446 record.set(self.keys["centroid_sigma"], covariance.astype(np.float32))
447 if shape is None:
448 record.set(self.keys["isStar"], True)
449 fullShape = self.psfShape
450 else:
451 record.set(self.keys["isStar"], False)
452 fullShape = shape.convolve(self.psfShape)
453 record.set(self.keys["shape"], fullShape)
454 # Create an image containing just this source
455 image = self.drawGaussian(self.exposure.getBBox(), instFlux,
456 lsst.afw.geom.Ellipse(fullShape, centroid))
457 # Generate a footprint for this source
458 self._installFootprint(record, image, setPeakSignificance)
459 # Actually add the source to the full exposure
460 self.exposure.getMaskedImage().getImage().getArray()[:, :] += image.getArray()
461 return record, image
463 def addBlend(self):
464 """Return a context manager which can add a blend of multiple sources.
466 Notes
467 -----
468 Note that nothing stops you from creating overlapping sources just using the addSource() method,
469 but addBlend() is necesssary to create a parent object and deblended HeavyFootprints of the type
470 produced by the detection and deblending pipelines.
472 Examples
473 --------
474 .. code-block: py
475 d = TestDataset(...)
476 with d.addBlend() as b:
477 b.addChild(flux1, centroid1)
478 b.addChild(flux2, centroid2, shape2)
479 """
480 return BlendContext(self)
482 def transform(self, wcs, **kwds):
483 """Copy this dataset transformed to a new WCS, with new Psf and PhotoCalib.
485 Parameters
486 ----------
487 wcs : `lsst.afw.geom.SkyWcs`
488 WCS for the new dataset.
489 **kwds
490 Additional keyword arguments passed on to
491 `TestDataset.makeEmptyExposure`. If not specified, these revert
492 to the defaults for `~TestDataset.makeEmptyExposure`, not the
493 values in the current dataset.
495 Returns
496 -------
497 newDataset : `TestDataset`
498 Transformed copy of this dataset.
499 """
500 bboxD = lsst.geom.Box2D()
501 xyt = lsst.afw.geom.makeWcsPairTransform(self.exposure.getWcs(), wcs)
502 for corner in lsst.geom.Box2D(self.exposure.getBBox()).getCorners():
503 bboxD.include(xyt.applyForward(lsst.geom.Point2D(corner)))
504 bboxI = lsst.geom.Box2I(bboxD)
505 result = TestDataset(bbox=bboxI, wcs=wcs, **kwds)
506 oldPhotoCalib = self.exposure.getPhotoCalib()
507 newPhotoCalib = result.exposure.getPhotoCalib()
508 oldPsfShape = self.exposure.getPsf().computeShape(bboxD.getCenter())
509 for record in self.catalog:
510 if record.get(self.keys["nChild"]):
511 raise NotImplementedError("Transforming blended sources in TestDatasets is not supported")
512 magnitude = oldPhotoCalib.instFluxToMagnitude(record.get(self.keys["instFlux"]))
513 newFlux = newPhotoCalib.magnitudeToInstFlux(magnitude)
514 oldCentroid = record.get(self.keys["centroid"])
515 newCentroid = xyt.applyForward(oldCentroid)
516 if record.get(self.keys["isStar"]):
517 newDeconvolvedShape = None
518 else:
519 affine = lsst.afw.geom.linearizeTransform(xyt, oldCentroid)
520 oldFullShape = record.get(self.keys["shape"])
521 oldDeconvolvedShape = lsst.afw.geom.Quadrupole(
522 oldFullShape.getIxx() - oldPsfShape.getIxx(),
523 oldFullShape.getIyy() - oldPsfShape.getIyy(),
524 oldFullShape.getIxy() - oldPsfShape.getIxy(),
525 False
526 )
527 newDeconvolvedShape = oldDeconvolvedShape.transform(affine.getLinear())
528 result.addSource(newFlux, newCentroid, newDeconvolvedShape)
529 return result
531 def realize(self, noise, schema, randomSeed=1):
532 r"""Simulate an exposure and detection catalog for this dataset.
534 The simulation includes noise, and the detection catalog includes
535 `~lsst.afw.detection.heavyFootprint.HeavyFootprint`\ s.
537 Parameters
538 ----------
539 noise : `float`
540 Standard deviation of noise to be added to the exposure. The
541 noise will be Gaussian and constant, appropriate for the
542 sky-limited regime.
543 schema : `lsst.afw.table.Schema`
544 Schema of the new catalog to be created. Must start with
545 ``self.schema`` (i.e. ``schema.contains(self.schema)`` must be
546 `True`), but typically contains fields for already-configured
547 measurement algorithms as well.
548 randomSeed : `int`, optional
549 Seed for the random number generator.
550 If `None`, a seed is chosen automatically.
552 Returns
553 -------
554 `exposure` : `lsst.afw.image.ExposureF`
555 Simulated image.
556 `catalog` : `lsst.afw.table.SourceCatalog`
557 Simulated detection catalog.
558 """
559 random_state = np.random.RandomState(randomSeed)
560 assert schema.contains(self.schema)
561 mapper = lsst.afw.table.SchemaMapper(self.schema)
562 mapper.addMinimalSchema(self.schema, True)
563 exposure = self.exposure.clone()
564 exposure.getMaskedImage().getVariance().getArray()[:, :] = noise**2
565 exposure.getMaskedImage().getImage().getArray()[:, :] \
566 += random_state.randn(exposure.getHeight(), exposure.getWidth())*noise
567 catalog = lsst.afw.table.SourceCatalog(schema)
568 catalog.extend(self.catalog, mapper=mapper)
569 # Loop over sources and generate new HeavyFootprints that divide up
570 # the noisy pixels, not the ideal no-noise pixels.
571 for record in catalog:
572 # parent objects have non-Heavy Footprints, which don't need to be
573 # updated after adding noise.
574 if record.getParent() == 0:
575 continue
576 # get flattened arrays that correspond to the no-noise and noisy
577 # parent images
578 parent = catalog.find(record.getParent())
579 footprint = parent.getFootprint()
580 parentFluxArrayNoNoise = np.zeros(footprint.getArea(), dtype=np.float32)
581 footprint.spans.flatten(parentFluxArrayNoNoise,
582 self.exposure.getMaskedImage().getImage().getArray(),
583 self.exposure.getXY0())
584 parentFluxArrayNoisy = np.zeros(footprint.getArea(), dtype=np.float32)
585 footprint.spans.flatten(parentFluxArrayNoisy,
586 exposure.getMaskedImage().getImage().getArray(),
587 exposure.getXY0())
588 oldHeavy = record.getFootprint()
589 fraction = (oldHeavy.getImageArray() / parentFluxArrayNoNoise)
590 # N.B. this isn't a copy ctor - it's a copy from a vanilla
591 # Footprint, so it doesn't copy the arrays we don't want to
592 # change, and hence we have to do that ourselves below.
593 newHeavy = lsst.afw.detection.HeavyFootprintF(oldHeavy)
594 newHeavy.getImageArray()[:] = parentFluxArrayNoisy*fraction
595 newHeavy.getMaskArray()[:] = oldHeavy.getMaskArray()
596 newHeavy.getVarianceArray()[:] = oldHeavy.getVarianceArray()
597 record.setFootprint(newHeavy)
598 return exposure, catalog
601class AlgorithmTestCase:
603 def makeSingleFrameMeasurementConfig(self, plugin=None, dependencies=()):
604 """Create an instance of `SingleFrameMeasurementTask.ConfigClass`.
606 Only the specified plugin and its dependencies will be run; the
607 Centroid, Shape, and ModelFlux slots will be set to the truth fields
608 generated by the `TestDataset` class.
610 Parameters
611 ----------
612 plugin : `str`
613 Name of measurement plugin to enable.
614 dependencies : iterable of `str`, optional
615 Names of dependencies of the measurement plugin.
617 Returns
618 -------
619 config : `SingleFrameMeasurementTask.ConfigClass`
620 The resulting task configuration.
621 """
622 config = SingleFrameMeasurementTask.ConfigClass()
623 config.slots.centroid = "truth"
624 config.slots.shape = "truth"
625 config.slots.modelFlux = None
626 config.slots.apFlux = None
627 config.slots.psfFlux = None
628 config.slots.gaussianFlux = None
629 config.slots.calibFlux = None
630 config.plugins.names = (plugin,) + tuple(dependencies)
631 return config
633 def makeSingleFrameMeasurementTask(self, plugin=None, dependencies=(), config=None, schema=None,
634 algMetadata=None):
635 """Create a configured instance of `SingleFrameMeasurementTask`.
637 Parameters
638 ----------
639 plugin : `str`, optional
640 Name of measurement plugin to enable. If `None`, a configuration
641 must be supplied as the ``config`` parameter. If both are
642 specified, ``config`` takes precedence.
643 dependencies : iterable of `str`, optional
644 Names of dependencies of the specified measurement plugin.
645 config : `SingleFrameMeasurementTask.ConfigClass`, optional
646 Configuration for the task. If `None`, a measurement plugin must
647 be supplied as the ``plugin`` paramter. If both are specified,
648 ``config`` takes precedence.
649 schema : `lsst.afw.table.Schema`, optional
650 Measurement table schema. If `None`, a default schema is
651 generated.
652 algMetadata : `lsst.daf.base.PropertyList`, optional
653 Measurement algorithm metadata. If `None`, a default container
654 will be generated.
656 Returns
657 -------
658 task : `SingleFrameMeasurementTask`
659 A configured instance of the measurement task.
660 """
661 if config is None:
662 if plugin is None:
663 raise ValueError("Either plugin or config argument must not be None")
664 config = self.makeSingleFrameMeasurementConfig(plugin=plugin, dependencies=dependencies)
665 if schema is None:
666 schema = TestDataset.makeMinimalSchema()
667 # Clear all aliases so only those defined by config are set.
668 schema.setAliasMap(None)
669 if algMetadata is None:
670 algMetadata = lsst.daf.base.PropertyList()
671 return SingleFrameMeasurementTask(schema=schema, algMetadata=algMetadata, config=config)
673 def makeForcedMeasurementConfig(self, plugin=None, dependencies=()):
674 """Create an instance of `ForcedMeasurementTask.ConfigClass`.
676 In addition to the plugins specified in the plugin and dependencies
677 arguments, the `TransformedCentroid` and `TransformedShape` plugins
678 will be run and used as the centroid and shape slots; these simply
679 transform the reference catalog centroid and shape to the measurement
680 coordinate system.
682 Parameters
683 ----------
684 plugin : `str`
685 Name of measurement plugin to enable.
686 dependencies : iterable of `str`, optional
687 Names of dependencies of the measurement plugin.
689 Returns
690 -------
691 config : `ForcedMeasurementTask.ConfigClass`
692 The resulting task configuration.
693 """
695 config = ForcedMeasurementTask.ConfigClass()
696 config.slots.centroid = "base_TransformedCentroid"
697 config.slots.shape = "base_TransformedShape"
698 config.slots.modelFlux = None
699 config.slots.apFlux = None
700 config.slots.psfFlux = None
701 config.slots.gaussianFlux = None
702 config.plugins.names = (plugin,) + tuple(dependencies) + ("base_TransformedCentroid",
703 "base_TransformedShape")
704 return config
706 def makeForcedMeasurementTask(self, plugin=None, dependencies=(), config=None, refSchema=None,
707 algMetadata=None):
708 """Create a configured instance of `ForcedMeasurementTask`.
710 Parameters
711 ----------
712 plugin : `str`, optional
713 Name of measurement plugin to enable. If `None`, a configuration
714 must be supplied as the ``config`` parameter. If both are
715 specified, ``config`` takes precedence.
716 dependencies : iterable of `str`, optional
717 Names of dependencies of the specified measurement plugin.
718 config : `SingleFrameMeasurementTask.ConfigClass`, optional
719 Configuration for the task. If `None`, a measurement plugin must
720 be supplied as the ``plugin`` paramter. If both are specified,
721 ``config`` takes precedence.
722 refSchema : `lsst.afw.table.Schema`, optional
723 Reference table schema. If `None`, a default schema is
724 generated.
725 algMetadata : `lsst.daf.base.PropertyList`, optional
726 Measurement algorithm metadata. If `None`, a default container
727 will be generated.
729 Returns
730 -------
731 task : `ForcedMeasurementTask`
732 A configured instance of the measurement task.
733 """
734 if config is None:
735 if plugin is None:
736 raise ValueError("Either plugin or config argument must not be None")
737 config = self.makeForcedMeasurementConfig(plugin=plugin, dependencies=dependencies)
738 if refSchema is None:
739 refSchema = TestDataset.makeMinimalSchema()
740 if algMetadata is None:
741 algMetadata = lsst.daf.base.PropertyList()
742 return ForcedMeasurementTask(refSchema=refSchema, algMetadata=algMetadata, config=config)
745class TransformTestCase:
746 """Base class for testing measurement transformations.
748 Notes
749 -----
750 We test both that the transform itself operates successfully (fluxes are
751 converted to magnitudes, flags are propagated properly) and that the
752 transform is registered as the default for the appropriate measurement
753 algorithms.
755 In the simple case of one-measurement-per-transformation, the developer
756 need not directly write any tests themselves: simply customizing the class
757 variables is all that is required. More complex measurements (e.g.
758 multiple aperture fluxes) require extra effort.
759 """
760 name = "MeasurementTransformTest"
761 """The name used for the measurement algorithm (str).
763 Notes
764 -----
765 This determines the names of the fields in the resulting catalog. This
766 default should generally be fine, but subclasses can override if
767 required.
768 """
770 # These should be customized by subclassing.
771 controlClass = None
772 algorithmClass = None
773 transformClass = None
775 flagNames = ("flag",)
776 """Flags which may be set by the algorithm being tested (iterable of `str`).
777 """
779 # The plugin being tested should be registered under these names for
780 # single frame and forced measurement. Should be customized by
781 # subclassing.
782 singleFramePlugins = ()
783 forcedPlugins = ()
785 def setUp(self):
786 bbox = lsst.geom.Box2I(lsst.geom.Point2I(0, 0), lsst.geom.Point2I(200, 200))
787 self.calexp = TestDataset.makeEmptyExposure(bbox)
788 self._setupTransform()
790 def tearDown(self):
791 del self.calexp
792 del self.inputCat
793 del self.mapper
794 del self.transform
795 del self.outputCat
797 def _populateCatalog(self, baseNames):
798 records = []
799 for flagValue in (True, False):
800 records.append(self.inputCat.addNew())
801 for baseName in baseNames:
802 for flagName in self.flagNames:
803 if records[-1].schema.join(baseName, flagName) in records[-1].schema:
804 records[-1].set(records[-1].schema.join(baseName, flagName), flagValue)
805 self._setFieldsInRecords(records, baseName)
807 def _checkOutput(self, baseNames):
808 for inSrc, outSrc in zip(self.inputCat, self.outputCat):
809 for baseName in baseNames:
810 self._compareFieldsInRecords(inSrc, outSrc, baseName)
811 for flagName in self.flagNames:
812 keyName = outSrc.schema.join(baseName, flagName)
813 if keyName in inSrc.schema:
814 self.assertEqual(outSrc.get(keyName), inSrc.get(keyName))
815 else:
816 self.assertFalse(keyName in outSrc.schema)
818 def _runTransform(self, doExtend=True):
819 if doExtend:
820 self.outputCat.extend(self.inputCat, mapper=self.mapper)
821 self.transform(self.inputCat, self.outputCat, self.calexp.getWcs(), self.calexp.getPhotoCalib())
823 def testTransform(self, baseNames=None):
824 """Test the transformation on a catalog containing random data.
826 Parameters
827 ----------
828 baseNames : iterable of `str`
829 Iterable of the initial parts of measurement field names.
831 Notes
832 -----
833 We check that:
835 - An appropriate exception is raised on an attempt to transform
836 between catalogs with different numbers of rows;
837 - Otherwise, all appropriate conversions are properly appled and that
838 flags have been propagated.
840 The ``baseNames`` argument requires some explanation. This should be
841 an iterable of the leading parts of the field names for each
842 measurement; that is, everything that appears before ``_instFlux``,
843 ``_flag``, etc. In the simple case of a single measurement per plugin,
844 this is simply equal to ``self.name`` (thus measurements are stored as
845 ``self.name + "_instFlux"``, etc). More generally, the developer may
846 specify whatever iterable they require. For example, to handle
847 multiple apertures, we could have ``(self.name + "_0", self.name +
848 "_1", ...)``.
849 """
850 baseNames = baseNames or [self.name]
851 self._populateCatalog(baseNames)
852 self.assertRaises(lsst.pex.exceptions.LengthError, self._runTransform, False)
853 self._runTransform()
854 self._checkOutput(baseNames)
856 def _checkRegisteredTransform(self, registry, name):
857 # If this is a Python-based transform, we can compare directly; if
858 # it's wrapped C++, we need to compare the wrapped class.
859 self.assertEqual(registry[name].PluginClass.getTransformClass(), self.transformClass)
861 def testRegistration(self):
862 """Test that the transformation is appropriately registered.
863 """
864 for pluginName in self.singleFramePlugins:
865 self._checkRegisteredTransform(lsst.meas.base.SingleFramePlugin.registry, pluginName)
866 for pluginName in self.forcedPlugins:
867 self._checkRegisteredTransform(lsst.meas.base.ForcedPlugin.registry, pluginName)
870class SingleFramePluginTransformSetupHelper:
872 def _setupTransform(self):
873 self.control = self.controlClass()
874 inputSchema = lsst.afw.table.SourceTable.makeMinimalSchema()
875 # Trick algorithms that depend on the slot centroid or alias into thinking they've been defined;
876 # it doesn't matter for this test since we won't actually use the plugins for anything besides
877 # defining the schema.
878 inputSchema.getAliasMap().set("slot_Centroid", "dummy")
879 inputSchema.getAliasMap().set("slot_Shape", "dummy")
880 self.algorithmClass(self.control, self.name, inputSchema)
881 inputSchema.getAliasMap().erase("slot_Centroid")
882 inputSchema.getAliasMap().erase("slot_Shape")
883 self.inputCat = lsst.afw.table.SourceCatalog(inputSchema)
884 self.mapper = lsst.afw.table.SchemaMapper(inputSchema)
885 self.transform = self.transformClass(self.control, self.name, self.mapper)
886 self.outputCat = lsst.afw.table.BaseCatalog(self.mapper.getOutputSchema())
889class ForcedPluginTransformSetupHelper:
891 def _setupTransform(self):
892 self.control = self.controlClass()
893 inputMapper = lsst.afw.table.SchemaMapper(lsst.afw.table.SourceTable.makeMinimalSchema(),
894 lsst.afw.table.SourceTable.makeMinimalSchema())
895 # Trick algorithms that depend on the slot centroid or alias into thinking they've been defined;
896 # it doesn't matter for this test since we won't actually use the plugins for anything besides
897 # defining the schema.
898 inputMapper.editOutputSchema().getAliasMap().set("slot_Centroid", "dummy")
899 inputMapper.editOutputSchema().getAliasMap().set("slot_Shape", "dummy")
900 self.algorithmClass(self.control, self.name, inputMapper, lsst.daf.base.PropertyList())
901 inputMapper.editOutputSchema().getAliasMap().erase("slot_Centroid")
902 inputMapper.editOutputSchema().getAliasMap().erase("slot_Shape")
903 self.inputCat = lsst.afw.table.SourceCatalog(inputMapper.getOutputSchema())
904 self.mapper = lsst.afw.table.SchemaMapper(inputMapper.getOutputSchema())
905 self.transform = self.transformClass(self.control, self.name, self.mapper)
906 self.outputCat = lsst.afw.table.BaseCatalog(self.mapper.getOutputSchema())
909class FluxTransformTestCase(TransformTestCase):
911 def _setFieldsInRecords(self, records, name):
912 for record in records:
913 record[record.schema.join(name, 'instFlux')] = np.random.random()
914 record[record.schema.join(name, 'instFluxErr')] = np.random.random()
916 # Negative instFluxes should be converted to NaNs.
917 assert len(records) > 1
918 records[0][record.schema.join(name, 'instFlux')] = -1
920 def _compareFieldsInRecords(self, inSrc, outSrc, name):
921 instFluxName = inSrc.schema.join(name, 'instFlux')
922 instFluxErrName = inSrc.schema.join(name, 'instFluxErr')
923 if inSrc[instFluxName] > 0:
924 mag = self.calexp.getPhotoCalib().instFluxToMagnitude(inSrc[instFluxName],
925 inSrc[instFluxErrName])
926 self.assertEqual(outSrc[outSrc.schema.join(name, 'mag')], mag.value)
927 self.assertEqual(outSrc[outSrc.schema.join(name, 'magErr')], mag.error)
928 else:
929 # negative instFlux results in NaN magnitude, but can still have finite error
930 self.assertTrue(np.isnan(outSrc[outSrc.schema.join(name, 'mag')]))
931 if np.isnan(inSrc[instFluxErrName]):
932 self.assertTrue(np.isnan(outSrc[outSrc.schema.join(name, 'magErr')]))
933 else:
934 mag = self.calexp.getPhotoCalib().instFluxToMagnitude(inSrc[instFluxName],
935 inSrc[instFluxErrName])
936 self.assertEqual(outSrc[outSrc.schema.join(name, 'magErr')], mag.error)
939class CentroidTransformTestCase(TransformTestCase):
941 def _setFieldsInRecords(self, records, name):
942 for record in records:
943 record[record.schema.join(name, 'x')] = np.random.random()
944 record[record.schema.join(name, 'y')] = np.random.random()
945 # Some algorithms set no errors; some set only sigma on x & y; some provide
946 # a full covariance matrix. Set only those which exist in the schema.
947 for fieldSuffix in ('xErr', 'yErr', 'x_y_Cov'):
948 fieldName = record.schema.join(name, fieldSuffix)
949 if fieldName in record.schema:
950 record[fieldName] = np.random.random()
952 def _compareFieldsInRecords(self, inSrc, outSrc, name):
953 centroidResultKey = CentroidResultKey(inSrc.schema[self.name])
954 centroidResult = centroidResultKey.get(inSrc)
956 coord = lsst.afw.table.CoordKey(outSrc.schema[self.name]).get(outSrc)
957 coordTruth = self.calexp.getWcs().pixelToSky(centroidResult.getCentroid())
958 self.assertEqual(coordTruth, coord)
960 # If the centroid has an associated uncertainty matrix, the coordinate
961 # must have one too, and vice versa.
962 try:
963 coordErr = lsst.afw.table.CovarianceMatrix2fKey(outSrc.schema[self.name],
964 ["ra", "dec"]).get(outSrc)
965 except lsst.pex.exceptions.NotFoundError:
966 self.assertFalse(centroidResultKey.getCentroidErr().isValid())
967 else:
968 transform = self.calexp.getWcs().linearizePixelToSky(coordTruth, lsst.geom.radians)
969 coordErrTruth = np.dot(np.dot(transform.getLinear().getMatrix(),
970 centroidResult.getCentroidErr()),
971 transform.getLinear().getMatrix().transpose())
972 np.testing.assert_array_almost_equal(np.array(coordErrTruth), coordErr)