Coverage for python/lsst/cp/pipe/linearity.py : 15%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
22import numpy as np
24import lsst.afw.image as afwImage
25import lsst.afw.math as afwMath
26import lsst.pipe.base as pipeBase
27import lsst.pipe.base.connectionTypes as cT
28import lsst.pex.config as pexConfig
30from lsstDebug import getDebugFrame
31from lsst.ip.isr import (Linearizer, IsrProvenance)
33from .utils import (fitLeastSq, funcPolynomial)
36__all__ = ["LinearitySolveTask", "LinearitySolveConfig", "MeasureLinearityTask"]
39class LinearitySolveConnections(pipeBase.PipelineTaskConnections,
40 dimensions=("instrument", "detector")):
41 inputPtc = cT.Input(
42 name="inputPtc",
43 doc="Input PTC dataset.",
44 storageClass="StructuredDataDict",
45 dimensions=("instrument", "detector"),
46 multiple=False,
47 )
48 camera = cT.Input(
49 name="camera",
50 doc="Camera Geometry definition.",
51 storageClass="Camera",
52 dimensions=("instrument", ),
53 )
54 outputLinearizer = cT.Output(
55 name="linearity",
56 doc="Output linearity measurements.",
57 storageClass="Linearizer",
58 dimensions=("instrument", "detector"),
59 isCalibration=True,
60 )
63class LinearitySolveConfig(pipeBase.PipelineTaskConfig,
64 pipelineConnections=LinearitySolveConnections):
65 """Configuration for solving the linearity from PTC dataset.
66 """
67 linearityType = pexConfig.ChoiceField(
68 dtype=str,
69 doc="Type of linearizer to construct.",
70 default="Squared",
71 allowed={
72 "LookupTable": "Create a lookup table solution.",
73 "Polynomial": "Create an arbitrary polynomial solution.",
74 "Squared": "Create a single order squared solution.",
75 "Spline": "Create a spline based solution.",
76 "None": "Create a dummy solution.",
77 }
78 )
79 polynomialOrder = pexConfig.Field(
80 dtype=int,
81 doc="Degree of polynomial to fit.",
82 default=3,
83 )
84 splineKnots = pexConfig.Field(
85 dtype=int,
86 doc="Number of spline knots to use in fit.",
87 default=10,
88 )
89 maxLookupTableAdu = pexConfig.Field(
90 dtype=int,
91 doc="Maximum DN value for a LookupTable linearizer.",
92 default=2**18,
93 )
94 maxLinearAdu = pexConfig.Field(
95 dtype=float,
96 doc="Maximum DN value to use to estimate linear term.",
97 default=20000.0,
98 )
99 minLinearAdu = pexConfig.Field(
100 dtype=float,
101 doc="Minimum DN value to use to estimate linear term.",
102 default=2000.0,
103 )
104 nSigmaClipLinear = pexConfig.Field(
105 dtype=float,
106 doc="Maximum deviation from linear solution for Poissonian noise.",
107 default=5.0,
108 )
111class LinearitySolveTask(pipeBase.PipelineTask, pipeBase.CmdLineTask):
112 """Fit the linearity from the PTC dataset.
113 """
114 ConfigClass = LinearitySolveConfig
115 _DefaultName = 'cpLinearitySolve'
117 def runQuantum(self, butlerQC, inputRefs, outputRefs):
118 """Ensure that the input and output dimensions are passed along.
120 Parameters
121 ----------
122 butlerQC : `lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext`
123 Butler to operate on.
124 inputRefs : `lsst.pipe.base.connections.InputQuantizedConnection`
125 Input data refs to load.
126 ouptutRefs : `lsst.pipe.base.connections.OutputQuantizedConnection`
127 Output data refs to persist.
128 """
129 inputs = butlerQC.get(inputRefs)
131 # Use the dimensions to set calib/provenance information.
132 inputs['inputDims'] = [exp.dataId.byName() for exp in inputRefs.inputPtc]
134 outputs = self.run(**inputs)
135 butlerQC.put(outputs, outputRefs)
137 def run(self, inputPtc, camera, inputDims):
138 """Fit non-linearity to PTC data, returning the correct Linearizer
139 object.
141 Parameters
142 ----------
143 inputPtc : `lsst.cp.pipe.PtcDataset`
144 Pre-measured PTC dataset.
145 camera : `lsst.afw.cameraGeom.Camera`
146 Camera geometry.
147 inputDims : `lsst.daf.butler.DataCoordinate` or `dict`
148 DataIds to use to populate the output calibration.
150 Returns
151 -------
152 results : `lsst.pipe.base.Struct`
153 The results struct containing:
155 ``outputLinearizer`` : `lsst.ip.isr.Linearizer`
156 Final linearizer calibration.
157 ``outputProvenance`` : `lsst.ip.isr.IsrProvenance`
158 Provenance data for the new calibration.
160 Notes
161 -----
162 This task currently fits only polynomial-defined corrections,
163 where the correction coefficients are defined such that:
164 corrImage = uncorrImage + sum_i c_i uncorrImage^(2 + i)
165 These `c_i` are defined in terms of the direct polynomial fit:
166 meanVector ~ P(x=timeVector) = sum_j k_j x^j
167 such that c_(j-2) = -k_j/(k_1^j) in units of DN^(1-j) (c.f.,
168 Eq. 37 of 2003.05978). The `config.polynomialOrder` or
169 `config.splineKnots` define the maximum order of x^j to fit.
170 As k_0 and k_1 are degenerate with bias level and gain, they
171 are not included in the non-linearity correction.
172 """
173 detector = camera[inputDims['detector']]
175 if self.config.linearityType == 'LookupTable':
176 table = np.zeros((len(detector), self.config.maxLookupTableAdu), dtype=np.float32)
177 tableIndex = 0
178 else:
179 table = None
180 tableIndex = None # This will fail if we increment it.
182 if self.config.linearityType == 'Spline':
183 fitOrder = self.config.splineKnots
184 else:
185 fitOrder = self.config.polynomialOrder
187 # Initialize the linearizer.
188 linearizer = Linearizer(detector=detector, table=table, log=self.log)
190 for i, amp in enumerate(detector):
191 ampName = amp.getName()
192 if (len(inputPtc.expIdMask[ampName]) == 0):
193 self.log.warn(f"Mask not found for {ampName} in non-linearity fit. Using all points.")
194 mask = np.repeat(True, len(inputPtc.expIdMask[ampName]))
195 else:
196 mask = inputPtc.expIdMask[ampName]
198 inputAbscissa = np.array(inputPtc.rawExpTimes[ampName])[mask]
199 inputOrdinate = np.array(inputPtc.rawMeans[ampName])[mask]
201 # Determine proxy-to-linear-flux transformation
202 fluxMask = inputOrdinate < self.config.maxLinearAdu
203 lowMask = inputOrdinate > self.config.minLinearAdu
204 fluxMask = fluxMask & lowMask
205 linearAbscissa = inputAbscissa[fluxMask]
206 linearOrdinate = inputOrdinate[fluxMask]
208 linearFit, linearFitErr, chiSq, weights = self.irlsFit([0.0, 100.0], linearAbscissa,
209 linearOrdinate, funcPolynomial)
210 # Convert this proxy-to-flux fit into an expected linear flux
211 linearOrdinate = linearFit[0] + linearFit[1] * inputAbscissa
213 # Exclude low end outliers
214 threshold = self.config.nSigmaClipLinear * np.sqrt(linearOrdinate)
215 fluxMask = np.abs(inputOrdinate - linearOrdinate) < threshold
216 linearOrdinate = linearOrdinate[fluxMask]
217 fitOrdinate = inputOrdinate[fluxMask]
218 self.debugFit('linearFit', inputAbscissa, inputOrdinate, linearOrdinate, fluxMask, ampName)
220 # Do fits
221 if self.config.linearityType in ['Polynomial', 'Squared', 'LookupTable']:
222 polyFit = np.zeros(fitOrder + 1)
223 polyFit[1] = 1.0
224 polyFit, polyFitErr, chiSq, weights = self.irlsFit(polyFit, linearOrdinate,
225 fitOrdinate, funcPolynomial)
227 # Truncate the polynomial fit
228 k1 = polyFit[1]
229 linearityFit = [-coeff/(k1**order) for order, coeff in enumerate(polyFit)]
230 significant = np.where(np.abs(linearityFit) > 1e-10, True, False)
231 self.log.info(f"Significant polynomial fits: {significant}")
233 modelOrdinate = funcPolynomial(polyFit, linearAbscissa)
234 self.debugFit('polyFit', linearAbscissa, fitOrdinate, modelOrdinate, None, ampName)
236 if self.config.linearityType == 'Squared':
237 linearityFit = [linearityFit[2]]
238 elif self.config.linearityType == 'LookupTable':
239 # Use linear part to get time at wich signal is maxAduForLookupTableLinearizer DN
240 tMax = (self.config.maxLookupTableAdu - polyFit[0])/polyFit[1]
241 timeRange = np.linspace(0, tMax, self.config.maxLookupTableAdu)
242 signalIdeal = polyFit[0] + polyFit[1]*timeRange
243 signalUncorrected = funcPolynomial(polyFit, timeRange)
244 lookupTableRow = signalIdeal - signalUncorrected # LinearizerLookupTable has correction
246 linearizer.tableData[tableIndex, :] = lookupTableRow
247 linearityFit = [tableIndex, 0]
248 tableIndex += 1
249 elif self.config.linearityType in ['Spline']:
250 # See discussion in `lsst.ip.isr.linearize.py` before modifying.
251 numPerBin, binEdges = np.histogram(linearOrdinate, bins=fitOrder)
252 with np.errstate(invalid="ignore"):
253 # Algorithm note: With the counts of points per
254 # bin above, the next histogram calculates the
255 # values to put in each bin by weighting each
256 # point by the correction value.
257 values = np.histogram(linearOrdinate, bins=fitOrder,
258 weights=(inputOrdinate[fluxMask] - linearOrdinate))[0]/numPerBin
260 # After this is done, the binCenters are
261 # calculated by weighting by the value we're
262 # binning over. This ensures that widely
263 # spaced/poorly sampled data aren't assigned to
264 # the midpoint of the bin (as could be done using
265 # the binEdges above), but to the weighted mean of
266 # the inputs. Note that both histograms are
267 # scaled by the count per bin to normalize what
268 # the histogram returns (a sum of the points
269 # inside) into an average.
270 binCenters = np.histogram(linearOrdinate, bins=fitOrder,
271 weights=linearOrdinate)[0]/numPerBin
272 values = values[numPerBin > 0]
273 binCenters = binCenters[numPerBin > 0]
275 self.debugFit('splineFit', binCenters, np.abs(values), values, None, ampName)
276 interp = afwMath.makeInterpolate(binCenters.tolist(), values.tolist(),
277 afwMath.stringToInterpStyle("AKIMA_SPLINE"))
278 modelOrdinate = linearOrdinate + interp.interpolate(linearOrdinate)
279 self.debugFit('splineFit', linearOrdinate, fitOrdinate, modelOrdinate, None, ampName)
281 # If we exclude a lot of points, we may end up with
282 # less than fitOrder points. Pad out the low-flux end
283 # to ensure equal lengths.
284 if len(binCenters) != fitOrder:
285 padN = fitOrder - len(binCenters)
286 binCenters = np.pad(binCenters, (padN, 0), 'linear_ramp',
287 end_values=(binCenters.min() - 1.0, ))
288 # This stores the correction, which is zero at low values.
289 values = np.pad(values, (padN, 0))
291 # Pack the spline into a single array.
292 linearityFit = np.concatenate((binCenters.tolist(), values.tolist())).tolist()
293 polyFit = [0.0]
294 polyFitErr = [0.0]
295 chiSq = np.nan
296 else:
297 polyFit = [0.0]
298 polyFitErr = [0.0]
299 chiSq = np.nan
300 linearityFit = [0.0]
302 linearizer.linearityType[ampName] = self.config.linearityType
303 linearizer.linearityCoeffs[ampName] = np.array(linearityFit)
304 linearizer.linearityBBox[ampName] = amp.getBBox()
305 linearizer.fitParams[ampName] = np.array(polyFit)
306 linearizer.fitParamsErr[ampName] = np.array(polyFitErr)
307 linearizer.fitChiSq[ampName] = chiSq
309 image = afwImage.ImageF(len(inputOrdinate), 1)
310 image.getArray()[:, :] = inputOrdinate
311 linearizeFunction = linearizer.getLinearityTypeByName(linearizer.linearityType[ampName])
312 linearizeFunction()(image,
313 **{'coeffs': linearizer.linearityCoeffs[ampName],
314 'table': linearizer.tableData,
315 'log': linearizer.log})
316 linearizeModel = image.getArray()[0, :]
318 self.debugFit('solution', inputOrdinate[fluxMask], linearOrdinate,
319 linearizeModel[fluxMask], None, ampName)
321 linearizer.hasLinearity = True
322 linearizer.validate()
323 linearizer.updateMetadata(camera=camera, detector=detector, filterName='NONE')
324 linearizer.updateMetadata(setDate=True, setCalibId=True)
325 provenance = IsrProvenance(calibType='linearizer')
327 return pipeBase.Struct(
328 outputLinearizer=linearizer,
329 outputProvenance=provenance,
330 )
332 def irlsFit(self, initialParams, dataX, dataY, function, weightsY=None):
333 """Iteratively reweighted least squares fit.
335 This uses the `lsst.cp.pipe.utils.fitLeastSq`, but applies
336 weights based on the Cauchy distribution to the fitter. See
337 e.g. Holland and Welsch, 1977, doi:10.1080/03610927708827533
339 Parameters
340 ----------
341 initialParams : `list` [`float`]
342 Starting parameters.
343 dataX : `numpy.array` [`float`]
344 Abscissa data.
345 dataY : `numpy.array` [`float`]
346 Ordinate data.
347 function : callable
348 Function to fit.
349 weightsY : `numpy.array` [`float`]
350 Weights to apply to the data.
352 Returns
353 -------
354 polyFit : `list` [`float`]
355 Final best fit parameters.
356 polyFitErr : `list` [`float`]
357 Final errors on fit parameters.
358 chiSq : `float`
359 Reduced chi squared.
360 weightsY : `list` [`float`]
361 Final weights used for each point.
363 """
364 if not weightsY:
365 weightsY = np.ones_like(dataX)
367 polyFit, polyFitErr, chiSq = fitLeastSq(initialParams, dataX, dataY, function, weightsY=weightsY)
368 for iteration in range(10):
369 # Use Cauchy weights
370 resid = np.abs(dataY - function(polyFit, dataX)) / np.sqrt(dataY)
371 weightsY = 1.0 / (1.0 + np.sqrt(resid / 2.385))
372 polyFit, polyFitErr, chiSq = fitLeastSq(initialParams, dataX, dataY, function, weightsY=weightsY)
374 return polyFit, polyFitErr, chiSq, weightsY
376 def debugFit(self, stepname, xVector, yVector, yModel, mask, ampName):
377 """Debug method for linearity fitting.
379 Parameters
380 ----------
381 stepname : `str`
382 A label to use to check if we care to debug at a given
383 line of code.
384 xVector : `numpy.array`
385 The values to use as the independent variable in the
386 linearity fit.
387 yVector : `numpy.array`
388 The values to use as the dependent variable in the
389 linearity fit.
390 yModel : `numpy.array`
391 The values to use as the linearized result.
392 mask : `numpy.array` [ `bool` ], optional
393 A mask to indicate which entries of ``xVector`` and
394 ``yVector`` to keep.
395 ampName : `str`
396 Amplifier name to lookup linearity correction values.
398 """
399 frame = getDebugFrame(self._display, stepname)
400 if frame:
401 import matplotlib.pyplot as plt
402 fig, axs = plt.subplots(2)
404 if mask is None:
405 mask = np.ones_like(xVector, dtype=bool)
407 fig.suptitle(f"{stepname} {ampName} {self.config.linearityType}")
408 if stepname == 'linearFit':
409 axs[0].set_xlabel("Input Abscissa (time or mondiode)")
410 axs[0].set_ylabel("Input Ordinate (flux)")
411 axs[1].set_xlabel("Linear Ordinate (linear flux)")
412 axs[1].set_ylabel("Flux Difference: (input - linear)")
413 elif stepname in ('polyFit', 'splineFit'):
414 axs[0].set_xlabel("Linear Abscissa (linear flux)")
415 axs[0].set_ylabel("Input Ordinate (flux)")
416 axs[1].set_xlabel("Linear Ordinate (linear flux)")
417 axs[1].set_ylabel("Flux Difference: (input - full model fit)")
418 elif stepname == 'solution':
419 axs[0].set_xlabel("Input Abscissa (time or mondiode)")
420 axs[0].set_ylabel("Linear Ordinate (linear flux)")
421 axs[1].set_xlabel("Model flux (linear flux)")
422 axs[1].set_ylabel("Flux Difference: (linear - model)")
424 axs[0].set_yscale('log')
425 axs[0].set_xscale('log')
426 axs[0].scatter(xVector, yVector)
427 axs[0].scatter(xVector[~mask], yVector[~mask], c='red', marker='x')
428 axs[1].set_xscale('log')
430 axs[1].scatter(yModel, yVector[mask] - yModel)
431 fig.show()
433 prompt = "Press Enter or c to continue [chpx]..."
434 while True:
435 ans = input(prompt).lower()
436 if ans in ("", " ", "c",):
437 break
438 elif ans in ("p", ):
439 import pdb
440 pdb.set_trace()
441 elif ans in ("h", ):
442 print("[h]elp [c]ontinue [p]db")
443 elif ans in ('x', ):
444 exit()
445 plt.close()
448class MeasureLinearityConfig(pexConfig.Config):
449 solver = pexConfig.ConfigurableField(
450 target=LinearitySolveTask,
451 doc="Task to convert PTC data to linearity solutions.",
452 )
455class MeasureLinearityTask(pipeBase.CmdLineTask):
456 """Stand alone Gen2 linearity measurement.
458 This class wraps the Gen3 linearity task to allow it to be run as
459 a Gen2 CmdLineTask.
460 """
461 ConfigClass = MeasureLinearityConfig
462 _DefaultName = "measureLinearity"
464 def __init__(self, **kwargs):
465 super().__init__(**kwargs)
466 self.makeSubtask("solver")
468 def runDataRef(self, dataRef):
469 """Run new linearity code for gen2.
471 Parameters
472 ----------
473 dataRef : `lsst.daf.persistence.ButlerDataRef`
474 Input dataref for the photon transfer curve data.
476 Returns
477 -------
478 results : `lsst.pipe.base.Struct`
479 The results struct containing:
481 ``outputLinearizer`` : `lsst.ip.isr.Linearizer`
482 Final linearizer calibration.
483 ``outputProvenance`` : `lsst.ip.isr.IsrProvenance`
484 Provenance data for the new calibration.
485 """
486 ptc = dataRef.get('photonTransferCurveDataset')
487 camera = dataRef.get('camera')
488 inputDims = dataRef.dataId # This is the closest gen2 has.
489 linearityResults = self.solver.run(ptc, camera=camera, inputDims=inputDims)
491 inputDims['calibDate'] = linearityResults.outputLinearizer.getMetadata().get('CALIBDATE')
492 butler = dataRef.getButler()
493 butler.put(linearityResults.outputLinearizer, "linearizer", inputDims)
494 return linearityResults