Coverage for python/lsst/cp/pipe/utils.py: 11%
341 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-20 12:45 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-20 12:45 +0000
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
23__all__ = ['ddict2dict', 'CovFastFourierTransform']
26import galsim
27import logging
28import numpy as np
29import itertools
30import numpy.polynomial.polynomial as poly
32from scipy.optimize import leastsq
33from scipy.stats import median_abs_deviation, norm
35from lsst.ip.isr import isrMock
36import lsst.afw.image
37import lsst.afw.math
40def sigmaClipCorrection(nSigClip):
41 """Correct measured sigma to account for clipping.
43 If we clip our input data and then measure sigma, then the
44 measured sigma is smaller than the true value because real
45 points beyond the clip threshold have been removed. This is a
46 small (1.5% at nSigClip=3) effect when nSigClip >~ 3, but the
47 default parameters for measure crosstalk use nSigClip=2.0.
48 This causes the measured sigma to be about 15% smaller than
49 real. This formula corrects the issue, for the symmetric case
50 (upper clip threshold equal to lower clip threshold).
52 Parameters
53 ----------
54 nSigClip : `float`
55 Number of sigma the measurement was clipped by.
57 Returns
58 -------
59 scaleFactor : `float`
60 Scale factor to increase the measured sigma by.
61 """
62 varFactor = 1.0 - (2 * nSigClip * norm.pdf(nSigClip)) / (norm.cdf(nSigClip) - norm.cdf(-nSigClip))
63 return 1.0 / np.sqrt(varFactor)
66def calculateWeightedReducedChi2(measured, model, weightsMeasured, nData, nParsModel):
67 """Calculate weighted reduced chi2.
69 Parameters
70 ----------
71 measured : `list`
72 List with measured data.
73 model : `list`
74 List with modeled data.
75 weightsMeasured : `list`
76 List with weights for the measured data.
77 nData : `int`
78 Number of data points.
79 nParsModel : `int`
80 Number of parameters in the model.
82 Returns
83 -------
84 redWeightedChi2 : `float`
85 Reduced weighted chi2.
86 """
87 wRes = (measured - model)*weightsMeasured
88 return ((wRes*wRes).sum())/(nData-nParsModel)
91def makeMockFlats(expTime, gain=1.0, readNoiseElectrons=5, fluxElectrons=1000,
92 randomSeedFlat1=1984, randomSeedFlat2=666, powerLawBfParams=[],
93 expId1=0, expId2=1):
94 """Create a pair or mock flats with isrMock.
96 Parameters
97 ----------
98 expTime : `float`
99 Exposure time of the flats.
100 gain : `float`, optional
101 Gain, in e/ADU.
102 readNoiseElectrons : `float`, optional
103 Read noise rms, in electrons.
104 fluxElectrons : `float`, optional
105 Flux of flats, in electrons per second.
106 randomSeedFlat1 : `int`, optional
107 Random seed for the normal distrubutions for the mean signal
108 and noise (flat1).
109 randomSeedFlat2 : `int`, optional
110 Random seed for the normal distrubutions for the mean signal
111 and noise (flat2).
112 powerLawBfParams : `list`, optional
113 Parameters for `galsim.cdmodel.PowerLawCD` to simulate the
114 brightter-fatter effect.
115 expId1 : `int`, optional
116 Exposure ID for first flat.
117 expId2 : `int`, optional
118 Exposure ID for second flat.
120 Returns
121 -------
122 flatExp1 : `lsst.afw.image.exposure.ExposureF`
123 First exposure of flat field pair.
124 flatExp2 : `lsst.afw.image.exposure.ExposureF`
125 Second exposure of flat field pair.
127 Notes
128 -----
129 The parameters of `galsim.cdmodel.PowerLawCD` are `n, r0, t0, rx,
130 tx, r, t, alpha`. For more information about their meaning, see
131 the Galsim documentation
132 https://galsim-developers.github.io/GalSim/_build/html/_modules/galsim/cdmodel.html # noqa: W505
133 and Gruen+15 (1501.02802).
135 Example: galsim.cdmodel.PowerLawCD(8, 1.1e-7, 1.1e-7, 1.0e-8,
136 1.0e-8, 1.0e-9, 1.0e-9, 2.0)
137 """
138 flatFlux = fluxElectrons # e/s
139 flatMean = flatFlux*expTime # e
140 readNoise = readNoiseElectrons # e
142 mockImageConfig = isrMock.IsrMock.ConfigClass()
144 mockImageConfig.flatDrop = 0.99999
145 mockImageConfig.isTrimmed = True
147 flatExp1 = isrMock.FlatMock(config=mockImageConfig).run()
148 flatExp2 = flatExp1.clone()
149 (shapeY, shapeX) = flatExp1.getDimensions()
150 flatWidth = np.sqrt(flatMean)
152 rng1 = np.random.RandomState(randomSeedFlat1)
153 flatData1 = rng1.normal(flatMean, flatWidth, (shapeX, shapeY)) + rng1.normal(0.0, readNoise,
154 (shapeX, shapeY))
155 rng2 = np.random.RandomState(randomSeedFlat2)
156 flatData2 = rng2.normal(flatMean, flatWidth, (shapeX, shapeY)) + rng2.normal(0.0, readNoise,
157 (shapeX, shapeY))
158 # Simulate BF with power law model in galsim
159 if len(powerLawBfParams):
160 if not len(powerLawBfParams) == 8:
161 raise RuntimeError("Wrong number of parameters for `galsim.cdmodel.PowerLawCD`. "
162 f"Expected 8; passed {len(powerLawBfParams)}.")
163 cd = galsim.cdmodel.PowerLawCD(*powerLawBfParams)
164 tempFlatData1 = galsim.Image(flatData1)
165 temp2FlatData1 = cd.applyForward(tempFlatData1)
167 tempFlatData2 = galsim.Image(flatData2)
168 temp2FlatData2 = cd.applyForward(tempFlatData2)
170 flatExp1.image.array[:] = temp2FlatData1.array/gain # ADU
171 flatExp2.image.array[:] = temp2FlatData2.array/gain # ADU
172 else:
173 flatExp1.image.array[:] = flatData1/gain # ADU
174 flatExp2.image.array[:] = flatData2/gain # ADU
176 visitInfoExp1 = lsst.afw.image.VisitInfo(exposureTime=expTime)
177 visitInfoExp2 = lsst.afw.image.VisitInfo(exposureTime=expTime)
179 flatExp1.info.id = expId1
180 flatExp1.getInfo().setVisitInfo(visitInfoExp1)
181 flatExp2.info.id = expId2
182 flatExp2.getInfo().setVisitInfo(visitInfoExp2)
184 return flatExp1, flatExp2
187def irlsFit(initialParams, dataX, dataY, function, weightsY=None, weightType='Cauchy', scaleResidual=True):
188 """Iteratively reweighted least squares fit.
190 This uses the `lsst.cp.pipe.utils.fitLeastSq`, but applies weights
191 based on the Cauchy distribution by default. Other weight options
192 are implemented. See e.g. Holland and Welsch, 1977,
193 doi:10.1080/03610927708827533
195 Parameters
196 ----------
197 initialParams : `list` [`float`]
198 Starting parameters.
199 dataX : `numpy.array`, (N,)
200 Abscissa data.
201 dataY : `numpy.array`, (N,)
202 Ordinate data.
203 function : callable
204 Function to fit.
205 weightsY : `numpy.array`, (N,)
206 Weights to apply to the data.
207 weightType : `str`, optional
208 Type of weighting to use. One of Cauchy, Anderson, bisquare,
209 box, Welsch, Huber, logistic, or Fair.
210 scaleResidual : `bool`, optional
211 If true, the residual is scaled by the sqrt of the Y values.
213 Returns
214 -------
215 polyFit : `list` [`float`]
216 Final best fit parameters.
217 polyFitErr : `list` [`float`]
218 Final errors on fit parameters.
219 chiSq : `float`
220 Reduced chi squared.
221 weightsY : `list` [`float`]
222 Final weights used for each point.
224 Raises
225 ------
226 RuntimeError :
227 Raised if an unknown weightType string is passed.
228 """
229 if not weightsY:
230 weightsY = np.ones_like(dataX)
232 polyFit, polyFitErr, chiSq = fitLeastSq(initialParams, dataX, dataY, function, weightsY=weightsY)
233 for iteration in range(10):
234 resid = np.abs(dataY - function(polyFit, dataX))
235 if scaleResidual:
236 resid = resid / np.sqrt(dataY)
237 if weightType == 'Cauchy':
238 # Use Cauchy weighting. This is a soft weight.
239 # At [2, 3, 5, 10] sigma, weights are [.59, .39, .19, .05].
240 Z = resid / 2.385
241 weightsY = 1.0 / (1.0 + np.square(Z))
242 elif weightType == 'Anderson':
243 # Anderson+1972 weighting. This is a hard weight.
244 # At [2, 3, 5, 10] sigma, weights are [.67, .35, 0.0, 0.0].
245 Z = resid / (1.339 * np.pi)
246 weightsY = np.where(Z < 1.0, np.sinc(Z), 0.0)
247 elif weightType == 'bisquare':
248 # Beaton and Tukey (1974) biweight. This is a hard weight.
249 # At [2, 3, 5, 10] sigma, weights are [.81, .59, 0.0, 0.0].
250 Z = resid / 4.685
251 weightsY = np.where(Z < 1.0, 1.0 - np.square(Z), 0.0)
252 elif weightType == 'box':
253 # Hinich and Talwar (1975). This is a hard weight.
254 # At [2, 3, 5, 10] sigma, weights are [1.0, 0.0, 0.0, 0.0].
255 weightsY = np.where(resid < 2.795, 1.0, 0.0)
256 elif weightType == 'Welsch':
257 # Dennis and Welsch (1976). This is a hard weight.
258 # At [2, 3, 5, 10] sigma, weights are [.64, .36, .06, 1e-5].
259 Z = resid / 2.985
260 weightsY = np.exp(-1.0 * np.square(Z))
261 elif weightType == 'Huber':
262 # Huber (1964) weighting. This is a soft weight.
263 # At [2, 3, 5, 10] sigma, weights are [.67, .45, .27, .13].
264 Z = resid / 1.345
265 weightsY = np.where(Z < 1.0, 1.0, 1 / Z)
266 elif weightType == 'logistic':
267 # Logistic weighting. This is a soft weight.
268 # At [2, 3, 5, 10] sigma, weights are [.56, .40, .24, .12].
269 Z = resid / 1.205
270 weightsY = np.tanh(Z) / Z
271 elif weightType == 'Fair':
272 # Fair (1974) weighting. This is a soft weight.
273 # At [2, 3, 5, 10] sigma, weights are [.41, .32, .22, .12].
274 Z = resid / 1.4
275 weightsY = (1.0 / (1.0 + (Z)))
276 else:
277 raise RuntimeError(f"Unknown weighting type: {weightType}")
278 polyFit, polyFitErr, chiSq = fitLeastSq(initialParams, dataX, dataY, function, weightsY=weightsY)
280 return polyFit, polyFitErr, chiSq, weightsY
283def fitLeastSq(initialParams, dataX, dataY, function, weightsY=None):
284 """Do a fit and estimate the parameter errors using using
285 scipy.optimize.leastq.
287 optimize.leastsq returns the fractional covariance matrix. To
288 estimate the standard deviation of the fit parameters, multiply
289 the entries of this matrix by the unweighted reduced chi squared
290 and take the square root of the diagonal elements.
292 Parameters
293 ----------
294 initialParams : `list` [`float`]
295 initial values for fit parameters. For ptcFitType=POLYNOMIAL,
296 its length determines the degree of the polynomial.
297 dataX : `numpy.array`, (N,)
298 Data in the abscissa axis.
299 dataY : `numpy.array`, (N,)
300 Data in the ordinate axis.
301 function : callable object (function)
302 Function to fit the data with.
303 weightsY : `numpy.array`, (N,)
304 Weights of the data in the ordinate axis.
306 Return
307 ------
308 pFitSingleLeastSquares : `list` [`float`]
309 List with fitted parameters.
310 pErrSingleLeastSquares : `list` [`float`]
311 List with errors for fitted parameters.
313 reducedChiSqSingleLeastSquares : `float`
314 Reduced chi squared, unweighted if weightsY is not provided.
315 """
316 if weightsY is None:
317 weightsY = np.ones(len(dataX))
319 def errFunc(p, x, y, weightsY=None):
320 if weightsY is None:
321 weightsY = np.ones(len(x))
322 return (function(p, x) - y)*weightsY
324 pFit, pCov, infoDict, errMessage, success = leastsq(errFunc, initialParams,
325 args=(dataX, dataY, weightsY), full_output=1,
326 epsfcn=0.0001)
328 if (len(dataY) > len(initialParams)) and pCov is not None:
329 reducedChiSq = calculateWeightedReducedChi2(dataY, function(pFit, dataX), weightsY, len(dataY),
330 len(initialParams))
331 pCov *= reducedChiSq
332 else:
333 pCov = np.zeros((len(initialParams), len(initialParams)))
334 pCov[:, :] = np.nan
335 reducedChiSq = np.nan
337 errorVec = []
338 for i in range(len(pFit)):
339 errorVec.append(np.fabs(pCov[i][i])**0.5)
341 pFitSingleLeastSquares = pFit
342 pErrSingleLeastSquares = np.array(errorVec)
344 return pFitSingleLeastSquares, pErrSingleLeastSquares, reducedChiSq
347def fitBootstrap(initialParams, dataX, dataY, function, weightsY=None, confidenceSigma=1.):
348 """Do a fit using least squares and bootstrap to estimate parameter errors.
350 The bootstrap error bars are calculated by fitting 100 random data sets.
352 Parameters
353 ----------
354 initialParams : `list` [`float`]
355 initial values for fit parameters. For ptcFitType=POLYNOMIAL,
356 its length determines the degree of the polynomial.
357 dataX : `numpy.array`, (N,)
358 Data in the abscissa axis.
359 dataY : `numpy.array`, (N,)
360 Data in the ordinate axis.
361 function : callable object (function)
362 Function to fit the data with.
363 weightsY : `numpy.array`, (N,), optional.
364 Weights of the data in the ordinate axis.
365 confidenceSigma : `float`, optional.
366 Number of sigmas that determine confidence interval for the
367 bootstrap errors.
369 Return
370 ------
371 pFitBootstrap : `list` [`float`]
372 List with fitted parameters.
373 pErrBootstrap : `list` [`float`]
374 List with errors for fitted parameters.
375 reducedChiSqBootstrap : `float`
376 Reduced chi squared, unweighted if weightsY is not provided.
377 """
378 if weightsY is None:
379 weightsY = np.ones(len(dataX))
381 def errFunc(p, x, y, weightsY):
382 if weightsY is None:
383 weightsY = np.ones(len(x))
384 return (function(p, x) - y)*weightsY
386 # Fit first time
387 pFit, _ = leastsq(errFunc, initialParams, args=(dataX, dataY, weightsY), full_output=0)
389 # Get the stdev of the residuals
390 residuals = errFunc(pFit, dataX, dataY, weightsY)
391 # 100 random data sets are generated and fitted
392 pars = []
393 for i in range(100):
394 randomDelta = np.random.normal(0., np.fabs(residuals), len(dataY))
395 randomDataY = dataY + randomDelta
396 randomFit, _ = leastsq(errFunc, initialParams,
397 args=(dataX, randomDataY, weightsY), full_output=0)
398 pars.append(randomFit)
399 pars = np.array(pars)
400 meanPfit = np.mean(pars, 0)
402 # confidence interval for parameter estimates
403 errPfit = confidenceSigma*np.std(pars, 0)
404 pFitBootstrap = meanPfit
405 pErrBootstrap = errPfit
407 reducedChiSq = calculateWeightedReducedChi2(dataY, function(pFitBootstrap, dataX), weightsY, len(dataY),
408 len(initialParams))
409 return pFitBootstrap, pErrBootstrap, reducedChiSq
412def funcPolynomial(pars, x):
413 """Polynomial function definition
414 Parameters
415 ----------
416 params : `list`
417 Polynomial coefficients. Its length determines the polynomial order.
419 x : `numpy.array`, (N,)
420 Abscisa array.
422 Returns
423 -------
424 y : `numpy.array`, (N,)
425 Ordinate array after evaluating polynomial of order
426 len(pars)-1 at `x`.
427 """
428 return poly.polyval(x, [*pars])
431def funcAstier(pars, x):
432 """Single brighter-fatter parameter model for PTC; Equation 16 of
433 Astier+19.
435 Parameters
436 ----------
437 params : `list`
438 Parameters of the model: a00 (brightter-fatter), gain (e/ADU),
439 and noise (e^2).
440 x : `numpy.array`, (N,)
441 Signal mu (ADU).
443 Returns
444 -------
445 y : `numpy.array`, (N,)
446 C_00 (variance) in ADU^2.
447 """
448 a00, gain, noise = pars
449 return 0.5/(a00*gain*gain)*(np.exp(2*a00*x*gain)-1) + noise/(gain*gain) # C_00
452def arrangeFlatsByExpTime(exposureList, exposureIdList, log=None):
453 """Arrange exposures by exposure time.
455 Parameters
456 ----------
457 exposureList : `list` [`lsst.pipe.base.connections.DeferredDatasetRef`]
458 Input list of exposure references.
459 exposureIdList : `list` [`int`]
460 List of exposure ids as obtained by dataId[`exposure`].
461 log : `lsst.utils.logging.LsstLogAdapter`, optional
462 Log object.
464 Returns
465 ------
466 flatsAtExpTime : `dict` [`float`,
467 `list`[(`lsst.pipe.base.connections.DeferredDatasetRef`,
468 `int`)]]
469 Dictionary that groups references to flat-field exposures
470 (and their IDs) that have the same exposure time (seconds).
471 """
472 flatsAtExpTime = {}
473 assert len(exposureList) == len(exposureIdList), "Different lengths for exp. list and exp. ID lists"
474 for expRef, expId in zip(exposureList, exposureIdList):
475 expTime = expRef.get(component='visitInfo').exposureTime
476 if not np.isfinite(expTime) and log is not None:
477 log.warning("Exposure %d has non-finite exposure time.", expId)
478 listAtExpTime = flatsAtExpTime.setdefault(expTime, [])
479 listAtExpTime.append((expRef, expId))
481 return flatsAtExpTime
484def arrangeFlatsByExpFlux(exposureList, exposureIdList, fluxKeyword, log=None):
485 """Arrange exposures by exposure flux.
487 Parameters
488 ----------
489 exposureList : `list` [`lsst.pipe.base.connections.DeferredDatasetRef`]
490 Input list of exposure references.
491 exposureIdList : `list` [`int`]
492 List of exposure ids as obtained by dataId[`exposure`].
493 fluxKeyword : `str`
494 Header keyword that contains the flux per exposure.
495 log : `lsst.utils.logging.LsstLogAdapter`, optional
496 Log object.
498 Returns
499 -------
500 flatsAtFlux : `dict` [`float`,
501 `list`[(`lsst.pipe.base.connections.DeferredDatasetRef`,
502 `int`)]]
503 Dictionary that groups references to flat-field exposures
504 (and their IDs) that have the same flux.
505 """
506 flatsAtExpFlux = {}
507 assert len(exposureList) == len(exposureIdList), "Different lengths for exp. list and exp. ID lists"
508 for expRef, expId in zip(exposureList, exposureIdList):
509 # Get flux from header, assuming it is in the metadata.
510 try:
511 expFlux = expRef.get().getMetadata()[fluxKeyword]
512 except KeyError:
513 # If it's missing from the header, continue; it will
514 # be caught and rejected when pairing exposures.
515 expFlux = None
516 if expFlux is None:
517 if log is not None:
518 log.warning("Exposure %d does not have valid header keyword %s.", expId, fluxKeyword)
519 expFlux = np.nan
520 listAtExpFlux = flatsAtExpFlux.setdefault(expFlux, [])
521 listAtExpFlux.append((expRef, expId))
523 return flatsAtExpFlux
526def arrangeFlatsByExpId(exposureList, exposureIdList):
527 """Arrange exposures by exposure ID.
529 There is no guarantee that this will properly group exposures, but
530 allows a sequence of flats that have different illumination
531 (despite having the same exposure time) to be processed.
533 Parameters
534 ----------
535 exposureList : `list`[`lsst.pipe.base.connections.DeferredDatasetRef`]
536 Input list of exposure references.
537 exposureIdList : `list`[`int`]
538 List of exposure ids as obtained by dataId[`exposure`].
540 Returns
541 ------
542 flatsAtExpId : `dict` [`float`,
543 `list`[(`lsst.pipe.base.connections.DeferredDatasetRef`,
544 `int`)]]
545 Dictionary that groups references to flat-field exposures (and their
546 IDs) sequentially by their exposure id.
548 Notes
549 -----
551 This algorithm sorts the input exposure references by their exposure
552 id, and then assigns each pair of exposure references (exp_j, exp_{j+1})
553 to pair k, such that 2*k = j, where j is the python index of one of the
554 exposure references (starting from zero). By checking for the IndexError
555 while appending, we can ensure that there will only ever be fully
556 populated pairs.
557 """
558 flatsAtExpId = {}
559 assert len(exposureList) == len(exposureIdList), "Different lengths for exp. list and exp. ID lists"
560 # Sort exposures by expIds, which are in the second list `exposureIdList`.
561 sortedExposures = sorted(zip(exposureList, exposureIdList), key=lambda pair: pair[1])
563 for jPair, expTuple in enumerate(sortedExposures):
564 if (jPair + 1) % 2:
565 kPair = jPair // 2
566 listAtExpId = flatsAtExpId.setdefault(kPair, [])
567 try:
568 listAtExpId.append(expTuple)
569 listAtExpId.append(sortedExposures[jPair + 1])
570 except IndexError:
571 pass
573 return flatsAtExpId
576class CovFastFourierTransform:
577 """A class to compute (via FFT) the nearby pixels correlation function.
579 Implements appendix of Astier+19.
581 Parameters
582 ----------
583 diff : `numpy.array`
584 Image where to calculate the covariances (e.g., the difference
585 image of two flats).
586 w : `numpy.array`
587 Weight image (mask): it should consist of 1's (good pixel) and
588 0's (bad pixels).
589 fftShape : `tuple`
590 2d-tuple with the shape of the FFT
591 maxRangeCov : `int`
592 Maximum range for the covariances.
593 """
595 def __init__(self, diff, w, fftShape, maxRangeCov):
596 # check that the zero padding implied by "fft_shape"
597 # is large enough for the required correlation range
598 assert fftShape[0] > diff.shape[0]+maxRangeCov+1
599 assert fftShape[1] > diff.shape[1]+maxRangeCov+1
600 # for some reason related to numpy.fft.rfftn,
601 # the second dimension should be even, so
602 if fftShape[1]%2 == 1:
603 fftShape = (fftShape[0], fftShape[1]+1)
604 tIm = np.fft.rfft2(diff*w, fftShape)
605 tMask = np.fft.rfft2(w, fftShape)
606 # sum of "squares"
607 self.pCov = np.fft.irfft2(tIm*tIm.conjugate())
608 # sum of values
609 self.pMean = np.fft.irfft2(tIm*tMask.conjugate())
610 # number of w!=0 pixels.
611 self.pCount = np.fft.irfft2(tMask*tMask.conjugate())
613 def cov(self, dx, dy):
614 """Covariance for dx,dy averaged with dx,-dy if both non zero.
616 Implements appendix of Astier+19.
618 Parameters
619 ----------
620 dx : `int`
621 Lag in x
622 dy : `int`
623 Lag in y
625 Returns
626 -------
627 0.5*(cov1+cov2) : `float`
628 Covariance at (dx, dy) lag
629 npix1+npix2 : `int`
630 Number of pixels used in covariance calculation.
632 Raises
633 ------
634 ValueError if number of pixels for a given lag is 0.
635 """
636 # compensate rounding errors
637 nPix1 = int(round(self.pCount[dy, dx]))
638 if nPix1 == 0:
639 raise ValueError(f"Could not compute covariance term {dy}, {dx}, as there are no good pixels.")
640 cov1 = self.pCov[dy, dx]/nPix1-self.pMean[dy, dx]*self.pMean[-dy, -dx]/(nPix1*nPix1)
641 if (dx == 0 or dy == 0):
642 return cov1, nPix1
643 nPix2 = int(round(self.pCount[-dy, dx]))
644 if nPix2 == 0:
645 raise ValueError("Could not compute covariance term {dy}, {dx} as there are no good pixels.")
646 cov2 = self.pCov[-dy, dx]/nPix2-self.pMean[-dy, dx]*self.pMean[dy, -dx]/(nPix2*nPix2)
647 return 0.5*(cov1+cov2), nPix1+nPix2
649 def reportCovFastFourierTransform(self, maxRange):
650 """Produce a list of tuples with covariances.
652 Implements appendix of Astier+19.
654 Parameters
655 ----------
656 maxRange : `int`
657 Maximum range of covariances.
659 Returns
660 -------
661 tupleVec : `list`
662 List with covariance tuples.
663 """
664 tupleVec = []
665 # (dy,dx) = (0,0) has to be first
666 for dy in range(maxRange+1):
667 for dx in range(maxRange+1):
668 cov, npix = self.cov(dx, dy)
669 if (dx == 0 and dy == 0):
670 var = cov
671 tupleVec.append((dx, dy, var, cov, npix))
672 return tupleVec
675def getFitDataFromCovariances(i, j, mu, fullCov, fullCovModel, fullCovSqrtWeights, gain=1.0,
676 divideByMu=False, returnMasked=False):
677 """Get measured signal and covariance, cov model, weigths, and mask at
678 covariance lag (i, j).
680 Parameters
681 ----------
682 i : `int`
683 Lag for covariance matrix.
684 j : `int`
685 Lag for covariance matrix.
686 mu : `list`
687 Mean signal values.
688 fullCov : `list` of `numpy.array`
689 Measured covariance matrices at each mean signal level in mu.
690 fullCovSqrtWeights : `list` of `numpy.array`
691 List of square root of measured covariances at each mean
692 signal level in mu.
693 fullCovModel : `list` of `numpy.array`
694 List of modeled covariances at each mean signal level in mu.
695 gain : `float`, optional
696 Gain, in e-/ADU. If other than 1.0 (default), the returned
697 quantities will be in electrons or powers of electrons.
698 divideByMu : `bool`, optional
699 Divide returned covariance, model, and weights by the mean
700 signal mu?
701 returnMasked : `bool`, optional
702 Use mask (based on weights) in returned arrays (mu,
703 covariance, and model)?
705 Returns
706 -------
707 mu : `numpy.array`
708 list of signal values at (i, j).
709 covariance : `numpy.array`
710 Covariance at (i, j) at each mean signal mu value (fullCov[:, i, j]).
711 covarianceModel : `numpy.array`
712 Covariance model at (i, j).
713 weights : `numpy.array`
714 Weights at (i, j).
715 maskFromWeights : `numpy.array`, optional
716 Boolean mask of the covariance at (i,j), where the weights
717 differ from 0.
718 """
719 mu = np.array(mu)
720 fullCov = np.array(fullCov)
721 fullCovModel = np.array(fullCovModel)
722 fullCovSqrtWeights = np.array(fullCovSqrtWeights)
723 covariance = fullCov[:, i, j]*(gain**2)
724 covarianceModel = fullCovModel[:, i, j]*(gain**2)
725 weights = fullCovSqrtWeights[:, i, j]/(gain**2)
727 maskFromWeights = weights != 0
728 if returnMasked:
729 weights = weights[maskFromWeights]
730 covarianceModel = covarianceModel[maskFromWeights]
731 mu = mu[maskFromWeights]
732 covariance = covariance[maskFromWeights]
734 if divideByMu:
735 covariance /= mu
736 covarianceModel /= mu
737 weights *= mu
738 return mu, covariance, covarianceModel, weights, maskFromWeights
741def symmetrize(inputArray):
742 """ Copy array over 4 quadrants prior to convolution.
744 Parameters
745 ----------
746 inputarray : `numpy.array`
747 Input array to symmetrize.
749 Returns
750 -------
751 aSym : `numpy.array`
752 Symmetrized array.
753 """
754 targetShape = list(inputArray.shape)
755 r1, r2 = inputArray.shape[-1], inputArray.shape[-2]
756 targetShape[-1] = 2*r1-1
757 targetShape[-2] = 2*r2-1
758 aSym = np.ndarray(tuple(targetShape))
759 aSym[..., r2-1:, r1-1:] = inputArray
760 aSym[..., r2-1:, r1-1::-1] = inputArray
761 aSym[..., r2-1::-1, r1-1::-1] = inputArray
762 aSym[..., r2-1::-1, r1-1:] = inputArray
764 return aSym
767def ddict2dict(d):
768 """Convert nested default dictionaries to regular dictionaries.
770 This is needed to prevent yaml persistence issues.
772 Parameters
773 ----------
774 d : `defaultdict`
775 A possibly nested set of `defaultdict`.
777 Returns
778 -------
779 dict : `dict`
780 A possibly nested set of `dict`.
781 """
782 for k, v in d.items():
783 if isinstance(v, dict):
784 d[k] = ddict2dict(v)
785 return dict(d)
788class Pol2D:
789 """2D Polynomial Regression.
791 Parameters
792 ----------
793 x : numpy.ndarray
794 Input array for the x-coordinate.
795 y : numpy.ndarray
796 Input array for the y-coordinate.
797 z : numpy.ndarray
798 Input array for the dependent variable.
799 order : int
800 Order of the polynomial.
801 w : numpy.ndarray, optional
802 Weight array for weighted regression. Default is None.
804 Notes
805 -----
806 Ported from by https://gitlab.in2p3.fr/astier/bfptc P. Astier.
808 Example:
809 >>> x = np.array([1, 2, 3])
810 >>> y = np.array([4, 5, 6])
811 >>> z = np.array([7, 8, 9])
812 >>> order = 2
813 >>> poly_reg = Pol2D(x, y, z, order)
814 >>> result = poly_reg.eval(2.5, 5.5)
815 """
816 def __init__(self, x, y, z, order, w=None):
817 """
818 orderx : `int`
819 Effective order in the x-direction.
820 ordery : `int`
821 Effective order in the y-direction.
822 coeff : `numpy.ndarray`
823 Coefficients of the polynomial regression.
824 """
825 self.orderx = min(order, x.shape[0] - 1)
826 self.ordery = min(order, x.shape[1] - 1)
827 G = self.monomials(x.ravel(), y.ravel())
828 if w is None:
829 self.coeff, _, rank, _ = np.linalg.lstsq(G, z.ravel(), rcond=None)
830 else:
831 self.coeff, _, rank, _ = np.linalg.lstsq((w.ravel() * G.T).T, z.ravel() * w.ravel(), rcond=None)
833 def monomials(self, x, y):
834 """
835 Generate the monomials matrix for the given x and y.
837 Parameters
838 ----------
839 x : numpy.ndarray
840 Input array for the x-coordinate.
841 y : numpy.ndarray
842 Input array for the y-coordinate.
844 Returns
845 -------
846 G : numpy.ndarray
847 Monomials matrix.
848 """
849 ncols = (self.orderx + 1) * (self.ordery + 1)
850 G = np.zeros(x.shape + (ncols,))
851 ij = itertools.product(range(self.orderx + 1), range(self.ordery + 1))
852 for k, (i, j) in enumerate(ij):
853 G[..., k] = x**i * y**j
854 return G
856 def eval(self, x, y):
857 """
858 Evaluate the polynomial at the given x and y coordinates.
860 Parameters
861 ----------
862 x : `float`
863 x-coordinate for evaluation.
864 y : `float`
865 y-coordinate for evaluation.
867 Returns
868 -------
869 result : `float`
870 Result of the polynomial evaluation.
871 """
872 G = self.monomials(x, y)
873 return np.dot(G, self.coeff)
876class AstierSplineLinearityFitter:
877 """Class to fit the Astier spline linearity model.
879 This is a spline fit with photodiode data based on a model
880 from Pierre Astier, referenced in June 2023 from
881 https://me.lsst.eu/astier/bot/7224D/model_nonlin.py
883 This model fits a spline with (optional) nuisance parameters
884 to allow for different linearity coefficients with different
885 photodiode settings. The minimization is a least-squares
886 fit with the residual of
887 Sum[(S(mu_i) + mu_i)/(k_j * D_i) - 1]**2, where S(mu_i) is
888 an Akima Spline function of mu_i, the observed flat-pair
889 mean; D_j is the photo-diode measurement corresponding to
890 that flat-pair; and k_j is a constant of proportionality
891 which is over index j as it is allowed to
892 be different based on different photodiode settings (e.g.
893 CCOBCURR).
895 The fit has additional constraints to ensure that the spline
896 goes through the (0, 0) point, as well as a normalization
897 condition so that the average of the spline over the full
898 range is 0. The normalization ensures that the spline only
899 fits deviations from linearity, rather than the linear
900 function itself which is degenerate with the gain.
902 Parameters
903 ----------
904 nodes : `np.ndarray` (N,)
905 Array of spline node locations.
906 grouping_values : `np.ndarray` (M,)
907 Array of values to group values for different proportionality
908 constants (e.g. CCOBCURR).
909 pd : `np.ndarray` (M,)
910 Array of photodiode measurements.
911 mu : `np.ndarray` (M,)
912 Array of flat mean values.
913 mask : `np.ndarray` (M,), optional
914 Input mask (True is good point, False is bad point).
915 log : `logging.logger`, optional
916 Logger object to use for logging.
917 """
918 def __init__(self, nodes, grouping_values, pd, mu, mask=None, log=None):
919 self._pd = pd
920 self._mu = mu
921 self._grouping_values = grouping_values
922 self.log = log if log else logging.getLogger(__name__)
924 self._nodes = nodes
925 if nodes[0] != 0.0:
926 raise ValueError("First node must be 0.0")
927 if not np.all(np.diff(nodes) > 0):
928 raise ValueError("Nodes must be sorted with no repeats.")
930 # Check if sorted (raise otherwise)
931 if not np.all(np.diff(self._grouping_values) >= 0):
932 raise ValueError("Grouping values must be sorted.")
934 _, uindex, ucounts = np.unique(self._grouping_values, return_index=True, return_counts=True)
935 self.ngroup = len(uindex)
937 self.group_indices = []
938 for i in range(self.ngroup):
939 self.group_indices.append(np.arange(uindex[i], uindex[i] + ucounts[i]))
941 # Outlier weight values. Will be 1 (in) or 0 (out).
942 self._w = np.ones(len(self._pd))
944 if mask is not None:
945 self._w[~mask] = 0.0
947 # Values to regularize spline fit.
948 self._x_regularize = np.linspace(0.0, self._mu[self.mask].max(), 100)
950 def estimate_p0(self):
951 """Estimate initial fit parameters.
953 Returns
954 -------
955 p0 : `np.ndarray`
956 Parameter array, with spline values (one for each node) followed
957 by proportionality constants (one for each group).
958 """
959 npt = len(self._nodes) + self.ngroup
960 p0 = np.zeros(npt)
962 # Do a simple linear fit and set all the constants to this.
963 linfit = np.polyfit(self._pd[self.mask], self._mu[self.mask], 1)
964 p0[-self.ngroup:] = linfit[0]
966 # Look at the residuals...
967 ratio_model = self.compute_ratio_model(
968 self._nodes,
969 self.group_indices,
970 p0,
971 self._pd,
972 self._mu,
973 )
974 # ...and adjust the linear parameters accordingly.
975 p0[-self.ngroup:] *= np.median(ratio_model[self.mask])
977 # Re-compute the residuals.
978 ratio_model2 = self.compute_ratio_model(
979 self._nodes,
980 self.group_indices,
981 p0,
982 self._pd,
983 self._mu,
984 )
986 # And compute a first guess of the spline nodes.
987 bins = np.searchsorted(self._nodes, self._mu[self.mask])
988 tot_arr = np.zeros(len(self._nodes))
989 n_arr = np.zeros(len(self._nodes), dtype=int)
990 np.add.at(tot_arr, bins, ratio_model2[self.mask])
991 np.add.at(n_arr, bins, 1)
993 ratio = np.ones(len(self._nodes))
994 ratio[n_arr > 0] = tot_arr[n_arr > 0]/n_arr[n_arr > 0]
995 ratio[0] = 1.0
996 p0[0: len(self._nodes)] = (ratio - 1) * self._nodes
998 return p0
1000 @staticmethod
1001 def compute_ratio_model(nodes, group_indices, pars, pd, mu, return_spline=False):
1002 """Compute the ratio model values.
1004 Parameters
1005 ----------
1006 nodes : `np.ndarray` (M,)
1007 Array of node positions.
1008 group_indices : `list` [`np.ndarray`]
1009 List of group indices, one array for each group.
1010 pars : `np.ndarray`
1011 Parameter array, with spline values (one for each node) followed
1012 by proportionality constants (one for each group.)
1013 pd : `np.ndarray` (N,)
1014 Array of photodiode measurements.
1015 mu : `np.ndarray` (N,)
1016 Array of flat means.
1017 return_spline : `bool`, optional
1018 Return the spline interpolation as well as the model ratios?
1020 Returns
1021 -------
1022 ratio_models : `np.ndarray` (N,)
1023 Model ratio, (mu_i - S(mu_i))/(k_j * D_i)
1024 spl : `lsst.afw.math.thing`
1025 Spline interpolator (returned if return_spline=True).
1026 """
1027 spl = lsst.afw.math.makeInterpolate(
1028 nodes,
1029 pars[0: len(nodes)],
1030 lsst.afw.math.stringToInterpStyle("AKIMA_SPLINE"),
1031 )
1033 numerator = mu - spl.interpolate(mu)
1034 denominator = pd.copy()
1035 ngroup = len(group_indices)
1036 kj = pars[-ngroup:]
1037 for j in range(ngroup):
1038 denominator[group_indices[j]] *= kj[j]
1040 if return_spline:
1041 return numerator / denominator, spl
1042 else:
1043 return numerator / denominator
1045 def fit(self, p0, min_iter=3, max_iter=20, max_rejection_per_iteration=5, n_sigma_clip=5.0):
1046 """
1047 Perform iterative fit for linear + spline model with offsets.
1049 Parameters
1050 ----------
1051 p0 : `np.ndarray`
1052 Initial fit parameters (one for each knot, followed by one for
1053 each grouping).
1054 min_iter : `int`, optional
1055 Minimum number of fit iterations.
1056 max_iter : `int`, optional
1057 Maximum number of fit iterations.
1058 max_rejection_per_iteration : `int`, optional
1059 Maximum number of points to reject per iteration.
1060 n_sigma_clip : `float`, optional
1061 Number of sigma to do clipping in each iteration.
1062 """
1063 init_params = p0
1064 for k in range(max_iter):
1065 params, cov_params, _, msg, ierr = leastsq(
1066 self,
1067 init_params,
1068 full_output=True,
1069 ftol=1e-5,
1070 maxfev=12000,
1071 )
1072 init_params = params.copy()
1074 # We need to cut off the constraints at the end (there are more
1075 # residuals than data points.)
1076 res = self(params)[: len(self._w)]
1077 std_res = median_abs_deviation(res[self.good_points], scale="normal")
1078 sample = len(self.good_points)
1080 # We don't want to reject too many outliers at once.
1081 if sample > max_rejection_per_iteration:
1082 sres = np.sort(np.abs(res))
1083 cut = max(sres[-max_rejection_per_iteration], std_res*n_sigma_clip)
1084 else:
1085 cut = std_res*n_sigma_clip
1087 outliers = np.abs(res) > cut
1088 self._w[outliers] = 0
1089 if outliers.sum() != 0:
1090 self.log.info(
1091 "After iteration %d there are %d outliers (of %d).",
1092 k,
1093 outliers.sum(),
1094 sample,
1095 )
1096 elif k >= min_iter:
1097 self.log.info("After iteration %d there are no more outliers.", k)
1098 break
1100 return params
1102 @property
1103 def mask(self):
1104 return (self._w > 0)
1106 @property
1107 def good_points(self):
1108 return self.mask.nonzero()[0]
1110 def __call__(self, pars):
1112 ratio_model, spl = self.compute_ratio_model(
1113 self._nodes,
1114 self.group_indices,
1115 pars,
1116 self._pd,
1117 self._mu,
1118 return_spline=True,
1119 )
1121 resid = self._w*(ratio_model - 1.0)
1122 # Ensure masked points have 0 residual.
1123 resid[~self.mask] = 0.0
1125 constraint = [1e3 * np.mean(spl.interpolate(self._x_regularize))]
1126 # 0 should transform to 0
1127 constraint.append(spl.interpolate(0)*1e10)
1129 return np.hstack([resid, constraint])