Coverage for python/lsst/cp/pipe/utils.py: 9%
222 statements
« prev ^ index » next coverage.py v6.5.0, created at 2023-02-02 07:34 -0800
« prev ^ index » next coverage.py v6.5.0, created at 2023-02-02 07:34 -0800
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
23__all__ = ['ddict2dict', 'CovFastFourierTransform']
25import numpy as np
26from scipy.optimize import leastsq
27import numpy.polynomial.polynomial as poly
28from scipy.stats import norm
30from lsst.ip.isr import isrMock
31import lsst.afw.image
33import galsim
36def sigmaClipCorrection(nSigClip):
37 """Correct measured sigma to account for clipping.
39 If we clip our input data and then measure sigma, then the
40 measured sigma is smaller than the true value because real
41 points beyond the clip threshold have been removed. This is a
42 small (1.5% at nSigClip=3) effect when nSigClip >~ 3, but the
43 default parameters for measure crosstalk use nSigClip=2.0.
44 This causes the measured sigma to be about 15% smaller than
45 real. This formula corrects the issue, for the symmetric case
46 (upper clip threshold equal to lower clip threshold).
48 Parameters
49 ----------
50 nSigClip : `float`
51 Number of sigma the measurement was clipped by.
53 Returns
54 -------
55 scaleFactor : `float`
56 Scale factor to increase the measured sigma by.
57 """
58 varFactor = 1.0 - (2 * nSigClip * norm.pdf(nSigClip)) / (norm.cdf(nSigClip) - norm.cdf(-nSigClip))
59 return 1.0 / np.sqrt(varFactor)
62def calculateWeightedReducedChi2(measured, model, weightsMeasured, nData, nParsModel):
63 """Calculate weighted reduced chi2.
65 Parameters
66 ----------
67 measured : `list`
68 List with measured data.
69 model : `list`
70 List with modeled data.
71 weightsMeasured : `list`
72 List with weights for the measured data.
73 nData : `int`
74 Number of data points.
75 nParsModel : `int`
76 Number of parameters in the model.
78 Returns
79 -------
80 redWeightedChi2 : `float`
81 Reduced weighted chi2.
82 """
83 wRes = (measured - model)*weightsMeasured
84 return ((wRes*wRes).sum())/(nData-nParsModel)
87def makeMockFlats(expTime, gain=1.0, readNoiseElectrons=5, fluxElectrons=1000,
88 randomSeedFlat1=1984, randomSeedFlat2=666, powerLawBfParams=[],
89 expId1=0, expId2=1):
90 """Create a pair or mock flats with isrMock.
92 Parameters
93 ----------
94 expTime : `float`
95 Exposure time of the flats.
96 gain : `float`, optional
97 Gain, in e/ADU.
98 readNoiseElectrons : `float`, optional
99 Read noise rms, in electrons.
100 fluxElectrons : `float`, optional
101 Flux of flats, in electrons per second.
102 randomSeedFlat1 : `int`, optional
103 Random seed for the normal distrubutions for the mean signal
104 and noise (flat1).
105 randomSeedFlat2 : `int`, optional
106 Random seed for the normal distrubutions for the mean signal
107 and noise (flat2).
108 powerLawBfParams : `list`, optional
109 Parameters for `galsim.cdmodel.PowerLawCD` to simulate the
110 brightter-fatter effect.
111 expId1 : `int`, optional
112 Exposure ID for first flat.
113 expId2 : `int`, optional
114 Exposure ID for second flat.
116 Returns
117 -------
118 flatExp1 : `lsst.afw.image.exposure.ExposureF`
119 First exposure of flat field pair.
120 flatExp2 : `lsst.afw.image.exposure.ExposureF`
121 Second exposure of flat field pair.
123 Notes
124 -----
125 The parameters of `galsim.cdmodel.PowerLawCD` are `n, r0, t0, rx,
126 tx, r, t, alpha`. For more information about their meaning, see
127 the Galsim documentation
128 https://galsim-developers.github.io/GalSim/_build/html/_modules/galsim/cdmodel.html # noqa: W505
129 and Gruen+15 (1501.02802).
131 Example: galsim.cdmodel.PowerLawCD(8, 1.1e-7, 1.1e-7, 1.0e-8,
132 1.0e-8, 1.0e-9, 1.0e-9, 2.0)
133 """
134 flatFlux = fluxElectrons # e/s
135 flatMean = flatFlux*expTime # e
136 readNoise = readNoiseElectrons # e
138 mockImageConfig = isrMock.IsrMock.ConfigClass()
140 mockImageConfig.flatDrop = 0.99999
141 mockImageConfig.isTrimmed = True
143 flatExp1 = isrMock.FlatMock(config=mockImageConfig).run()
144 flatExp2 = flatExp1.clone()
145 (shapeY, shapeX) = flatExp1.getDimensions()
146 flatWidth = np.sqrt(flatMean)
148 rng1 = np.random.RandomState(randomSeedFlat1)
149 flatData1 = rng1.normal(flatMean, flatWidth, (shapeX, shapeY)) + rng1.normal(0.0, readNoise,
150 (shapeX, shapeY))
151 rng2 = np.random.RandomState(randomSeedFlat2)
152 flatData2 = rng2.normal(flatMean, flatWidth, (shapeX, shapeY)) + rng2.normal(0.0, readNoise,
153 (shapeX, shapeY))
154 # Simulate BF with power law model in galsim
155 if len(powerLawBfParams):
156 if not len(powerLawBfParams) == 8:
157 raise RuntimeError("Wrong number of parameters for `galsim.cdmodel.PowerLawCD`. "
158 f"Expected 8; passed {len(powerLawBfParams)}.")
159 cd = galsim.cdmodel.PowerLawCD(*powerLawBfParams)
160 tempFlatData1 = galsim.Image(flatData1)
161 temp2FlatData1 = cd.applyForward(tempFlatData1)
163 tempFlatData2 = galsim.Image(flatData2)
164 temp2FlatData2 = cd.applyForward(tempFlatData2)
166 flatExp1.image.array[:] = temp2FlatData1.array/gain # ADU
167 flatExp2.image.array[:] = temp2FlatData2.array/gain # ADU
168 else:
169 flatExp1.image.array[:] = flatData1/gain # ADU
170 flatExp2.image.array[:] = flatData2/gain # ADU
172 visitInfoExp1 = lsst.afw.image.VisitInfo(exposureId=expId1, exposureTime=expTime)
173 visitInfoExp2 = lsst.afw.image.VisitInfo(exposureId=expId2, exposureTime=expTime)
175 flatExp1.info.id = expId1
176 flatExp1.getInfo().setVisitInfo(visitInfoExp1)
177 flatExp2.info.id = expId2
178 flatExp2.getInfo().setVisitInfo(visitInfoExp2)
180 return flatExp1, flatExp2
183def irlsFit(initialParams, dataX, dataY, function, weightsY=None, weightType='Cauchy', scaleResidual=True):
184 """Iteratively reweighted least squares fit.
186 This uses the `lsst.cp.pipe.utils.fitLeastSq`, but applies weights
187 based on the Cauchy distribution by default. Other weight options
188 are implemented. See e.g. Holland and Welsch, 1977,
189 doi:10.1080/03610927708827533
191 Parameters
192 ----------
193 initialParams : `list` [`float`]
194 Starting parameters.
195 dataX : `numpy.array`, (N,)
196 Abscissa data.
197 dataY : `numpy.array`, (N,)
198 Ordinate data.
199 function : callable
200 Function to fit.
201 weightsY : `numpy.array`, (N,)
202 Weights to apply to the data.
203 weightType : `str`, optional
204 Type of weighting to use. One of Cauchy, Anderson, bisquare,
205 box, Welsch, Huber, logistic, or Fair.
206 scaleResidual : `bool`, optional
207 If true, the residual is scaled by the sqrt of the Y values.
209 Returns
210 -------
211 polyFit : `list` [`float`]
212 Final best fit parameters.
213 polyFitErr : `list` [`float`]
214 Final errors on fit parameters.
215 chiSq : `float`
216 Reduced chi squared.
217 weightsY : `list` [`float`]
218 Final weights used for each point.
220 Raises
221 ------
222 RuntimeError :
223 Raised if an unknown weightType string is passed.
224 """
225 if not weightsY:
226 weightsY = np.ones_like(dataX)
228 polyFit, polyFitErr, chiSq = fitLeastSq(initialParams, dataX, dataY, function, weightsY=weightsY)
229 for iteration in range(10):
230 resid = np.abs(dataY - function(polyFit, dataX))
231 if scaleResidual:
232 resid = resid / np.sqrt(dataY)
233 if weightType == 'Cauchy':
234 # Use Cauchy weighting. This is a soft weight.
235 # At [2, 3, 5, 10] sigma, weights are [.59, .39, .19, .05].
236 Z = resid / 2.385
237 weightsY = 1.0 / (1.0 + np.square(Z))
238 elif weightType == 'Anderson':
239 # Anderson+1972 weighting. This is a hard weight.
240 # At [2, 3, 5, 10] sigma, weights are [.67, .35, 0.0, 0.0].
241 Z = resid / (1.339 * np.pi)
242 weightsY = np.where(Z < 1.0, np.sinc(Z), 0.0)
243 elif weightType == 'bisquare':
244 # Beaton and Tukey (1974) biweight. This is a hard weight.
245 # At [2, 3, 5, 10] sigma, weights are [.81, .59, 0.0, 0.0].
246 Z = resid / 4.685
247 weightsY = np.where(Z < 1.0, 1.0 - np.square(Z), 0.0)
248 elif weightType == 'box':
249 # Hinich and Talwar (1975). This is a hard weight.
250 # At [2, 3, 5, 10] sigma, weights are [1.0, 0.0, 0.0, 0.0].
251 weightsY = np.where(resid < 2.795, 1.0, 0.0)
252 elif weightType == 'Welsch':
253 # Dennis and Welsch (1976). This is a hard weight.
254 # At [2, 3, 5, 10] sigma, weights are [.64, .36, .06, 1e-5].
255 Z = resid / 2.985
256 weightsY = np.exp(-1.0 * np.square(Z))
257 elif weightType == 'Huber':
258 # Huber (1964) weighting. This is a soft weight.
259 # At [2, 3, 5, 10] sigma, weights are [.67, .45, .27, .13].
260 Z = resid / 1.345
261 weightsY = np.where(Z < 1.0, 1.0, 1 / Z)
262 elif weightType == 'logistic':
263 # Logistic weighting. This is a soft weight.
264 # At [2, 3, 5, 10] sigma, weights are [.56, .40, .24, .12].
265 Z = resid / 1.205
266 weightsY = np.tanh(Z) / Z
267 elif weightType == 'Fair':
268 # Fair (1974) weighting. This is a soft weight.
269 # At [2, 3, 5, 10] sigma, weights are [.41, .32, .22, .12].
270 Z = resid / 1.4
271 weightsY = (1.0 / (1.0 + (Z)))
272 else:
273 raise RuntimeError(f"Unknown weighting type: {weightType}")
274 polyFit, polyFitErr, chiSq = fitLeastSq(initialParams, dataX, dataY, function, weightsY=weightsY)
276 return polyFit, polyFitErr, chiSq, weightsY
279def fitLeastSq(initialParams, dataX, dataY, function, weightsY=None):
280 """Do a fit and estimate the parameter errors using using
281 scipy.optimize.leastq.
283 optimize.leastsq returns the fractional covariance matrix. To
284 estimate the standard deviation of the fit parameters, multiply
285 the entries of this matrix by the unweighted reduced chi squared
286 and take the square root of the diagonal elements.
288 Parameters
289 ----------
290 initialParams : `list` [`float`]
291 initial values for fit parameters. For ptcFitType=POLYNOMIAL,
292 its length determines the degree of the polynomial.
293 dataX : `numpy.array`, (N,)
294 Data in the abscissa axis.
295 dataY : `numpy.array`, (N,)
296 Data in the ordinate axis.
297 function : callable object (function)
298 Function to fit the data with.
299 weightsY : `numpy.array`, (N,)
300 Weights of the data in the ordinate axis.
302 Return
303 ------
304 pFitSingleLeastSquares : `list` [`float`]
305 List with fitted parameters.
306 pErrSingleLeastSquares : `list` [`float`]
307 List with errors for fitted parameters.
309 reducedChiSqSingleLeastSquares : `float`
310 Reduced chi squared, unweighted if weightsY is not provided.
311 """
312 if weightsY is None:
313 weightsY = np.ones(len(dataX))
315 def errFunc(p, x, y, weightsY=None):
316 if weightsY is None:
317 weightsY = np.ones(len(x))
318 return (function(p, x) - y)*weightsY
320 pFit, pCov, infoDict, errMessage, success = leastsq(errFunc, initialParams,
321 args=(dataX, dataY, weightsY), full_output=1,
322 epsfcn=0.0001)
324 if (len(dataY) > len(initialParams)) and pCov is not None:
325 reducedChiSq = calculateWeightedReducedChi2(dataY, function(pFit, dataX), weightsY, len(dataY),
326 len(initialParams))
327 pCov *= reducedChiSq
328 else:
329 pCov = np.zeros((len(initialParams), len(initialParams)))
330 pCov[:, :] = np.nan
331 reducedChiSq = np.nan
333 errorVec = []
334 for i in range(len(pFit)):
335 errorVec.append(np.fabs(pCov[i][i])**0.5)
337 pFitSingleLeastSquares = pFit
338 pErrSingleLeastSquares = np.array(errorVec)
340 return pFitSingleLeastSquares, pErrSingleLeastSquares, reducedChiSq
343def fitBootstrap(initialParams, dataX, dataY, function, weightsY=None, confidenceSigma=1.):
344 """Do a fit using least squares and bootstrap to estimate parameter errors.
346 The bootstrap error bars are calculated by fitting 100 random data sets.
348 Parameters
349 ----------
350 initialParams : `list` [`float`]
351 initial values for fit parameters. For ptcFitType=POLYNOMIAL,
352 its length determines the degree of the polynomial.
353 dataX : `numpy.array`, (N,)
354 Data in the abscissa axis.
355 dataY : `numpy.array`, (N,)
356 Data in the ordinate axis.
357 function : callable object (function)
358 Function to fit the data with.
359 weightsY : `numpy.array`, (N,), optional.
360 Weights of the data in the ordinate axis.
361 confidenceSigma : `float`, optional.
362 Number of sigmas that determine confidence interval for the
363 bootstrap errors.
365 Return
366 ------
367 pFitBootstrap : `list` [`float`]
368 List with fitted parameters.
369 pErrBootstrap : `list` [`float`]
370 List with errors for fitted parameters.
371 reducedChiSqBootstrap : `float`
372 Reduced chi squared, unweighted if weightsY is not provided.
373 """
374 if weightsY is None:
375 weightsY = np.ones(len(dataX))
377 def errFunc(p, x, y, weightsY):
378 if weightsY is None:
379 weightsY = np.ones(len(x))
380 return (function(p, x) - y)*weightsY
382 # Fit first time
383 pFit, _ = leastsq(errFunc, initialParams, args=(dataX, dataY, weightsY), full_output=0)
385 # Get the stdev of the residuals
386 residuals = errFunc(pFit, dataX, dataY, weightsY)
387 # 100 random data sets are generated and fitted
388 pars = []
389 for i in range(100):
390 randomDelta = np.random.normal(0., np.fabs(residuals), len(dataY))
391 randomDataY = dataY + randomDelta
392 randomFit, _ = leastsq(errFunc, initialParams,
393 args=(dataX, randomDataY, weightsY), full_output=0)
394 pars.append(randomFit)
395 pars = np.array(pars)
396 meanPfit = np.mean(pars, 0)
398 # confidence interval for parameter estimates
399 errPfit = confidenceSigma*np.std(pars, 0)
400 pFitBootstrap = meanPfit
401 pErrBootstrap = errPfit
403 reducedChiSq = calculateWeightedReducedChi2(dataY, function(pFitBootstrap, dataX), weightsY, len(dataY),
404 len(initialParams))
405 return pFitBootstrap, pErrBootstrap, reducedChiSq
408def funcPolynomial(pars, x):
409 """Polynomial function definition
410 Parameters
411 ----------
412 params : `list`
413 Polynomial coefficients. Its length determines the polynomial order.
415 x : `numpy.array`, (N,)
416 Abscisa array.
418 Returns
419 -------
420 y : `numpy.array`, (N,)
421 Ordinate array after evaluating polynomial of order
422 len(pars)-1 at `x`.
423 """
424 return poly.polyval(x, [*pars])
427def funcAstier(pars, x):
428 """Single brighter-fatter parameter model for PTC; Equation 16 of
429 Astier+19.
431 Parameters
432 ----------
433 params : `list`
434 Parameters of the model: a00 (brightter-fatter), gain (e/ADU),
435 and noise (e^2).
436 x : `numpy.array`, (N,)
437 Signal mu (ADU).
439 Returns
440 -------
441 y : `numpy.array`, (N,)
442 C_00 (variance) in ADU^2.
443 """
444 a00, gain, noise = pars
445 return 0.5/(a00*gain*gain)*(np.exp(2*a00*x*gain)-1) + noise/(gain*gain) # C_00
448def arrangeFlatsByExpTime(exposureList, exposureIdList):
449 """Arrange exposures by exposure time.
451 Parameters
452 ----------
453 exposureList : `list` [`lsst.pipe.base.connections.DeferredDatasetRef`]
454 Input list of exposure references.
455 exposureIdList : `list` [`int`]
456 List of exposure ids as obtained by dataId[`exposure`].
458 Returns
459 ------
460 flatsAtExpTime : `dict` [`float`,
461 `list`[(`lsst.pipe.base.connections.DeferredDatasetRef`,
462 `int`)]]
463 Dictionary that groups references to flat-field exposures
464 (and their IDs) that have the same exposure time (seconds).
465 """
466 flatsAtExpTime = {}
467 assert len(exposureList) == len(exposureIdList), "Different lengths for exp. list and exp. ID lists"
468 for expRef, expId in zip(exposureList, exposureIdList):
469 expTime = expRef.get(component='visitInfo').exposureTime
470 listAtExpTime = flatsAtExpTime.setdefault(expTime, [])
471 listAtExpTime.append((expRef, expId))
473 return flatsAtExpTime
476def arrangeFlatsByExpFlux(exposureList, exposureIdList, fluxKeyword):
477 """Arrange exposures by exposure flux.
479 Parameters
480 ----------
481 exposureList : `list` [`lsst.pipe.base.connections.DeferredDatasetRef`]
482 Input list of exposure references.
483 exposureIdList : `list` [`int`]
484 List of exposure ids as obtained by dataId[`exposure`].
485 fluxKeyword : `str`
486 Header keyword that contains the flux per exposure.
488 Returns
489 -------
490 flatsAtFlux : `dict` [`float`,
491 `list`[(`lsst.pipe.base.connections.DeferredDatasetRef`,
492 `int`)]]
493 Dictionary that groups references to flat-field exposures
494 (and their IDs) that have the same flux.
495 """
496 flatsAtExpFlux = {}
497 assert len(exposureList) == len(exposureIdList), "Different lengths for exp. list and exp. ID lists"
498 for expRef, expId in zip(exposureList, exposureIdList):
499 # Get flux from header, assuming it is in the metadata.
500 expFlux = expRef.get().getMetadata()[fluxKeyword]
501 listAtExpFlux = flatsAtExpFlux.setdefault(expFlux, [])
502 listAtExpFlux.append((expRef, expId))
504 return flatsAtExpFlux
507def arrangeFlatsByExpId(exposureList, exposureIdList):
508 """Arrange exposures by exposure ID.
510 There is no guarantee that this will properly group exposures, but
511 allows a sequence of flats that have different illumination
512 (despite having the same exposure time) to be processed.
514 Parameters
515 ----------
516 exposureList : `list`[`lsst.pipe.base.connections.DeferredDatasetRef`]
517 Input list of exposure references.
518 exposureIdList : `list`[`int`]
519 List of exposure ids as obtained by dataId[`exposure`].
521 Returns
522 ------
523 flatsAtExpId : `dict` [`float`,
524 `list`[(`lsst.pipe.base.connections.DeferredDatasetRef`,
525 `int`)]]
526 Dictionary that groups references to flat-field exposures (and their
527 IDs) sequentially by their exposure id.
529 Notes
530 -----
532 This algorithm sorts the input exposure references by their exposure
533 id, and then assigns each pair of exposure references (exp_j, exp_{j+1})
534 to pair k, such that 2*k = j, where j is the python index of one of the
535 exposure references (starting from zero). By checking for the IndexError
536 while appending, we can ensure that there will only ever be fully
537 populated pairs.
538 """
539 flatsAtExpId = {}
540 assert len(exposureList) == len(exposureIdList), "Different lengths for exp. list and exp. ID lists"
541 # Sort exposures by expIds, which are in the second list `exposureIdList`.
542 sortedExposures = sorted(zip(exposureList, exposureIdList), key=lambda pair: pair[1])
544 for jPair, expTuple in enumerate(sortedExposures):
545 if (jPair + 1) % 2:
546 kPair = jPair // 2
547 listAtExpId = flatsAtExpId.setdefault(kPair, [])
548 try:
549 listAtExpId.append(expTuple)
550 listAtExpId.append(sortedExposures[jPair + 1])
551 except IndexError:
552 pass
554 return flatsAtExpId
557class CovFastFourierTransform:
558 """A class to compute (via FFT) the nearby pixels correlation function.
560 Implements appendix of Astier+19.
562 Parameters
563 ----------
564 diff : `numpy.array`
565 Image where to calculate the covariances (e.g., the difference
566 image of two flats).
567 w : `numpy.array`
568 Weight image (mask): it should consist of 1's (good pixel) and
569 0's (bad pixels).
570 fftShape : `tuple`
571 2d-tuple with the shape of the FFT
572 maxRangeCov : `int`
573 Maximum range for the covariances.
574 """
576 def __init__(self, diff, w, fftShape, maxRangeCov):
577 # check that the zero padding implied by "fft_shape"
578 # is large enough for the required correlation range
579 assert(fftShape[0] > diff.shape[0]+maxRangeCov+1)
580 assert(fftShape[1] > diff.shape[1]+maxRangeCov+1)
581 # for some reason related to numpy.fft.rfftn,
582 # the second dimension should be even, so
583 if fftShape[1]%2 == 1:
584 fftShape = (fftShape[0], fftShape[1]+1)
585 tIm = np.fft.rfft2(diff*w, fftShape)
586 tMask = np.fft.rfft2(w, fftShape)
587 # sum of "squares"
588 self.pCov = np.fft.irfft2(tIm*tIm.conjugate())
589 # sum of values
590 self.pMean = np.fft.irfft2(tIm*tMask.conjugate())
591 # number of w!=0 pixels.
592 self.pCount = np.fft.irfft2(tMask*tMask.conjugate())
594 def cov(self, dx, dy):
595 """Covariance for dx,dy averaged with dx,-dy if both non zero.
597 Implements appendix of Astier+19.
599 Parameters
600 ----------
601 dx : `int`
602 Lag in x
603 dy : `int`
604 Lag in y
606 Returns
607 -------
608 0.5*(cov1+cov2) : `float`
609 Covariance at (dx, dy) lag
610 npix1+npix2 : `int`
611 Number of pixels used in covariance calculation.
612 """
613 # compensate rounding errors
614 nPix1 = int(round(self.pCount[dy, dx]))
615 cov1 = self.pCov[dy, dx]/nPix1-self.pMean[dy, dx]*self.pMean[-dy, -dx]/(nPix1*nPix1)
616 if (dx == 0 or dy == 0):
617 return cov1, nPix1
618 nPix2 = int(round(self.pCount[-dy, dx]))
619 cov2 = self.pCov[-dy, dx]/nPix2-self.pMean[-dy, dx]*self.pMean[dy, -dx]/(nPix2*nPix2)
620 return 0.5*(cov1+cov2), nPix1+nPix2
622 def reportCovFastFourierTransform(self, maxRange):
623 """Produce a list of tuples with covariances.
625 Implements appendix of Astier+19.
627 Parameters
628 ----------
629 maxRange : `int`
630 Maximum range of covariances.
632 Returns
633 -------
634 tupleVec : `list`
635 List with covariance tuples.
636 """
637 tupleVec = []
638 # (dy,dx) = (0,0) has to be first
639 for dy in range(maxRange+1):
640 for dx in range(maxRange+1):
641 cov, npix = self.cov(dx, dy)
642 if (dx == 0 and dy == 0):
643 var = cov
644 tupleVec.append((dx, dy, var, cov, npix))
645 return tupleVec
648def getFitDataFromCovariances(i, j, mu, fullCov, fullCovModel, fullCovSqrtWeights, gain=1.0,
649 divideByMu=False, returnMasked=False):
650 """Get measured signal and covariance, cov model, weigths, and mask at
651 covariance lag (i, j).
653 Parameters
654 ----------
655 i : `int`
656 Lag for covariance matrix.
657 j : `int`
658 Lag for covariance matrix.
659 mu : `list`
660 Mean signal values.
661 fullCov : `list` of `numpy.array`
662 Measured covariance matrices at each mean signal level in mu.
663 fullCovSqrtWeights : `list` of `numpy.array`
664 List of square root of measured covariances at each mean
665 signal level in mu.
666 fullCovModel : `list` of `numpy.array`
667 List of modeled covariances at each mean signal level in mu.
668 gain : `float`, optional
669 Gain, in e-/ADU. If other than 1.0 (default), the returned
670 quantities will be in electrons or powers of electrons.
671 divideByMu : `bool`, optional
672 Divide returned covariance, model, and weights by the mean
673 signal mu?
674 returnMasked : `bool`, optional
675 Use mask (based on weights) in returned arrays (mu,
676 covariance, and model)?
678 Returns
679 -------
680 mu : `numpy.array`
681 list of signal values at (i, j).
682 covariance : `numpy.array`
683 Covariance at (i, j) at each mean signal mu value (fullCov[:, i, j]).
684 covarianceModel : `numpy.array`
685 Covariance model at (i, j).
686 weights : `numpy.array`
687 Weights at (i, j).
688 maskFromWeights : `numpy.array`, optional
689 Boolean mask of the covariance at (i,j), where the weights
690 differ from 0.
691 """
692 mu = np.array(mu)
693 fullCov = np.array(fullCov)
694 fullCovModel = np.array(fullCovModel)
695 fullCovSqrtWeights = np.array(fullCovSqrtWeights)
696 covariance = fullCov[:, i, j]*(gain**2)
697 covarianceModel = fullCovModel[:, i, j]*(gain**2)
698 weights = fullCovSqrtWeights[:, i, j]/(gain**2)
700 maskFromWeights = weights != 0
701 if returnMasked:
702 weights = weights[maskFromWeights]
703 covarianceModel = covarianceModel[maskFromWeights]
704 mu = mu[maskFromWeights]
705 covariance = covariance[maskFromWeights]
707 if divideByMu:
708 covariance /= mu
709 covarianceModel /= mu
710 weights *= mu
711 return mu, covariance, covarianceModel, weights, maskFromWeights
714def symmetrize(inputArray):
715 """ Copy array over 4 quadrants prior to convolution.
717 Parameters
718 ----------
719 inputarray : `numpy.array`
720 Input array to symmetrize.
722 Returns
723 -------
724 aSym : `numpy.array`
725 Symmetrized array.
726 """
727 targetShape = list(inputArray.shape)
728 r1, r2 = inputArray.shape[-1], inputArray.shape[-2]
729 targetShape[-1] = 2*r1-1
730 targetShape[-2] = 2*r2-1
731 aSym = np.ndarray(tuple(targetShape))
732 aSym[..., r2-1:, r1-1:] = inputArray
733 aSym[..., r2-1:, r1-1::-1] = inputArray
734 aSym[..., r2-1::-1, r1-1::-1] = inputArray
735 aSym[..., r2-1::-1, r1-1:] = inputArray
737 return aSym
740def ddict2dict(d):
741 """Convert nested default dictionaries to regular dictionaries.
743 This is needed to prevent yaml persistence issues.
745 Parameters
746 ----------
747 d : `defaultdict`
748 A possibly nested set of `defaultdict`.
750 Returns
751 -------
752 dict : `dict`
753 A possibly nested set of `dict`.
754 """
755 for k, v in d.items():
756 if isinstance(v, dict):
757 d[k] = ddict2dict(v)
758 return dict(d)