Coverage for python/lsst/cp/pipe/utils.py: 9%
214 statements
« prev ^ index » next coverage.py v6.5.0, created at 2022-12-23 02:31 -0800
« prev ^ index » next coverage.py v6.5.0, created at 2022-12-23 02:31 -0800
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
23__all__ = ['ddict2dict', 'CovFastFourierTransform']
25import numpy as np
26from scipy.optimize import leastsq
27import numpy.polynomial.polynomial as poly
28from scipy.stats import norm
30from lsst.ip.isr import isrMock
31import lsst.afw.image
33import galsim
36def sigmaClipCorrection(nSigClip):
37 """Correct measured sigma to account for clipping.
39 If we clip our input data and then measure sigma, then the
40 measured sigma is smaller than the true value because real
41 points beyond the clip threshold have been removed. This is a
42 small (1.5% at nSigClip=3) effect when nSigClip >~ 3, but the
43 default parameters for measure crosstalk use nSigClip=2.0.
44 This causes the measured sigma to be about 15% smaller than
45 real. This formula corrects the issue, for the symmetric case
46 (upper clip threshold equal to lower clip threshold).
48 Parameters
49 ----------
50 nSigClip : `float`
51 Number of sigma the measurement was clipped by.
53 Returns
54 -------
55 scaleFactor : `float`
56 Scale factor to increase the measured sigma by.
57 """
58 varFactor = 1.0 - (2 * nSigClip * norm.pdf(nSigClip)) / (norm.cdf(nSigClip) - norm.cdf(-nSigClip))
59 return 1.0 / np.sqrt(varFactor)
62def calculateWeightedReducedChi2(measured, model, weightsMeasured, nData, nParsModel):
63 """Calculate weighted reduced chi2.
65 Parameters
66 ----------
67 measured : `list`
68 List with measured data.
69 model : `list`
70 List with modeled data.
71 weightsMeasured : `list`
72 List with weights for the measured data.
73 nData : `int`
74 Number of data points.
75 nParsModel : `int`
76 Number of parameters in the model.
78 Returns
79 -------
80 redWeightedChi2 : `float`
81 Reduced weighted chi2.
82 """
83 wRes = (measured - model)*weightsMeasured
84 return ((wRes*wRes).sum())/(nData-nParsModel)
87def makeMockFlats(expTime, gain=1.0, readNoiseElectrons=5, fluxElectrons=1000,
88 randomSeedFlat1=1984, randomSeedFlat2=666, powerLawBfParams=[],
89 expId1=0, expId2=1):
90 """Create a pair or mock flats with isrMock.
92 Parameters
93 ----------
94 expTime : `float`
95 Exposure time of the flats.
96 gain : `float`, optional
97 Gain, in e/ADU.
98 readNoiseElectrons : `float`, optional
99 Read noise rms, in electrons.
100 fluxElectrons : `float`, optional
101 Flux of flats, in electrons per second.
102 randomSeedFlat1 : `int`, optional
103 Random seed for the normal distrubutions for the mean signal
104 and noise (flat1).
105 randomSeedFlat2 : `int`, optional
106 Random seed for the normal distrubutions for the mean signal
107 and noise (flat2).
108 powerLawBfParams : `list`, optional
109 Parameters for `galsim.cdmodel.PowerLawCD` to simulate the
110 brightter-fatter effect.
111 expId1 : `int`, optional
112 Exposure ID for first flat.
113 expId2 : `int`, optional
114 Exposure ID for second flat.
116 Returns
117 -------
118 flatExp1 : `lsst.afw.image.exposure.ExposureF`
119 First exposure of flat field pair.
120 flatExp2 : `lsst.afw.image.exposure.ExposureF`
121 Second exposure of flat field pair.
123 Notes
124 -----
125 The parameters of `galsim.cdmodel.PowerLawCD` are `n, r0, t0, rx,
126 tx, r, t, alpha`. For more information about their meaning, see
127 the Galsim documentation
128 https://galsim-developers.github.io/GalSim/_build/html/_modules/galsim/cdmodel.html # noqa: W505
129 and Gruen+15 (1501.02802).
131 Example: galsim.cdmodel.PowerLawCD(8, 1.1e-7, 1.1e-7, 1.0e-8,
132 1.0e-8, 1.0e-9, 1.0e-9, 2.0)
133 """
134 flatFlux = fluxElectrons # e/s
135 flatMean = flatFlux*expTime # e
136 readNoise = readNoiseElectrons # e
138 mockImageConfig = isrMock.IsrMock.ConfigClass()
140 mockImageConfig.flatDrop = 0.99999
141 mockImageConfig.isTrimmed = True
143 flatExp1 = isrMock.FlatMock(config=mockImageConfig).run()
144 flatExp2 = flatExp1.clone()
145 (shapeY, shapeX) = flatExp1.getDimensions()
146 flatWidth = np.sqrt(flatMean)
148 rng1 = np.random.RandomState(randomSeedFlat1)
149 flatData1 = rng1.normal(flatMean, flatWidth, (shapeX, shapeY)) + rng1.normal(0.0, readNoise,
150 (shapeX, shapeY))
151 rng2 = np.random.RandomState(randomSeedFlat2)
152 flatData2 = rng2.normal(flatMean, flatWidth, (shapeX, shapeY)) + rng2.normal(0.0, readNoise,
153 (shapeX, shapeY))
154 # Simulate BF with power law model in galsim
155 if len(powerLawBfParams):
156 if not len(powerLawBfParams) == 8:
157 raise RuntimeError("Wrong number of parameters for `galsim.cdmodel.PowerLawCD`. "
158 f"Expected 8; passed {len(powerLawBfParams)}.")
159 cd = galsim.cdmodel.PowerLawCD(*powerLawBfParams)
160 tempFlatData1 = galsim.Image(flatData1)
161 temp2FlatData1 = cd.applyForward(tempFlatData1)
163 tempFlatData2 = galsim.Image(flatData2)
164 temp2FlatData2 = cd.applyForward(tempFlatData2)
166 flatExp1.image.array[:] = temp2FlatData1.array/gain # ADU
167 flatExp2.image.array[:] = temp2FlatData2.array/gain # ADU
168 else:
169 flatExp1.image.array[:] = flatData1/gain # ADU
170 flatExp2.image.array[:] = flatData2/gain # ADU
172 visitInfoExp1 = lsst.afw.image.VisitInfo(exposureId=expId1, exposureTime=expTime)
173 visitInfoExp2 = lsst.afw.image.VisitInfo(exposureId=expId2, exposureTime=expTime)
175 flatExp1.info.id = expId1
176 flatExp1.getInfo().setVisitInfo(visitInfoExp1)
177 flatExp2.info.id = expId2
178 flatExp2.getInfo().setVisitInfo(visitInfoExp2)
180 return flatExp1, flatExp2
183def irlsFit(initialParams, dataX, dataY, function, weightsY=None, weightType='Cauchy', scaleResidual=True):
184 """Iteratively reweighted least squares fit.
186 This uses the `lsst.cp.pipe.utils.fitLeastSq`, but applies weights
187 based on the Cauchy distribution by default. Other weight options
188 are implemented. See e.g. Holland and Welsch, 1977,
189 doi:10.1080/03610927708827533
191 Parameters
192 ----------
193 initialParams : `list` [`float`]
194 Starting parameters.
195 dataX : `numpy.array`, (N,)
196 Abscissa data.
197 dataY : `numpy.array`, (N,)
198 Ordinate data.
199 function : callable
200 Function to fit.
201 weightsY : `numpy.array`, (N,)
202 Weights to apply to the data.
203 weightType : `str`, optional
204 Type of weighting to use. One of Cauchy, Anderson, bisquare,
205 box, Welsch, Huber, logistic, or Fair.
206 scaleResidual : `bool`, optional
207 If true, the residual is scaled by the sqrt of the Y values.
209 Returns
210 -------
211 polyFit : `list` [`float`]
212 Final best fit parameters.
213 polyFitErr : `list` [`float`]
214 Final errors on fit parameters.
215 chiSq : `float`
216 Reduced chi squared.
217 weightsY : `list` [`float`]
218 Final weights used for each point.
220 Raises
221 ------
222 RuntimeError :
223 Raised if an unknown weightType string is passed.
224 """
225 if not weightsY:
226 weightsY = np.ones_like(dataX)
228 polyFit, polyFitErr, chiSq = fitLeastSq(initialParams, dataX, dataY, function, weightsY=weightsY)
229 for iteration in range(10):
230 resid = np.abs(dataY - function(polyFit, dataX))
231 if scaleResidual:
232 resid = resid / np.sqrt(dataY)
233 if weightType == 'Cauchy':
234 # Use Cauchy weighting. This is a soft weight.
235 # At [2, 3, 5, 10] sigma, weights are [.59, .39, .19, .05].
236 Z = resid / 2.385
237 weightsY = 1.0 / (1.0 + np.square(Z))
238 elif weightType == 'Anderson':
239 # Anderson+1972 weighting. This is a hard weight.
240 # At [2, 3, 5, 10] sigma, weights are [.67, .35, 0.0, 0.0].
241 Z = resid / (1.339 * np.pi)
242 weightsY = np.where(Z < 1.0, np.sinc(Z), 0.0)
243 elif weightType == 'bisquare':
244 # Beaton and Tukey (1974) biweight. This is a hard weight.
245 # At [2, 3, 5, 10] sigma, weights are [.81, .59, 0.0, 0.0].
246 Z = resid / 4.685
247 weightsY = np.where(Z < 1.0, 1.0 - np.square(Z), 0.0)
248 elif weightType == 'box':
249 # Hinich and Talwar (1975). This is a hard weight.
250 # At [2, 3, 5, 10] sigma, weights are [1.0, 0.0, 0.0, 0.0].
251 weightsY = np.where(resid < 2.795, 1.0, 0.0)
252 elif weightType == 'Welsch':
253 # Dennis and Welsch (1976). This is a hard weight.
254 # At [2, 3, 5, 10] sigma, weights are [.64, .36, .06, 1e-5].
255 Z = resid / 2.985
256 weightsY = np.exp(-1.0 * np.square(Z))
257 elif weightType == 'Huber':
258 # Huber (1964) weighting. This is a soft weight.
259 # At [2, 3, 5, 10] sigma, weights are [.67, .45, .27, .13].
260 Z = resid / 1.345
261 weightsY = np.where(Z < 1.0, 1.0, 1 / Z)
262 elif weightType == 'logistic':
263 # Logistic weighting. This is a soft weight.
264 # At [2, 3, 5, 10] sigma, weights are [.56, .40, .24, .12].
265 Z = resid / 1.205
266 weightsY = np.tanh(Z) / Z
267 elif weightType == 'Fair':
268 # Fair (1974) weighting. This is a soft weight.
269 # At [2, 3, 5, 10] sigma, weights are [.41, .32, .22, .12].
270 Z = resid / 1.4
271 weightsY = (1.0 / (1.0 + (Z)))
272 else:
273 raise RuntimeError(f"Unknown weighting type: {weightType}")
274 polyFit, polyFitErr, chiSq = fitLeastSq(initialParams, dataX, dataY, function, weightsY=weightsY)
276 return polyFit, polyFitErr, chiSq, weightsY
279def fitLeastSq(initialParams, dataX, dataY, function, weightsY=None):
280 """Do a fit and estimate the parameter errors using using
281 scipy.optimize.leastq.
283 optimize.leastsq returns the fractional covariance matrix. To
284 estimate the standard deviation of the fit parameters, multiply
285 the entries of this matrix by the unweighted reduced chi squared
286 and take the square root of the diagonal elements.
288 Parameters
289 ----------
290 initialParams : `list` [`float`]
291 initial values for fit parameters. For ptcFitType=POLYNOMIAL,
292 its length determines the degree of the polynomial.
293 dataX : `numpy.array`, (N,)
294 Data in the abscissa axis.
295 dataY : `numpy.array`, (N,)
296 Data in the ordinate axis.
297 function : callable object (function)
298 Function to fit the data with.
299 weightsY : `numpy.array`, (N,)
300 Weights of the data in the ordinate axis.
302 Return
303 ------
304 pFitSingleLeastSquares : `list` [`float`]
305 List with fitted parameters.
306 pErrSingleLeastSquares : `list` [`float`]
307 List with errors for fitted parameters.
309 reducedChiSqSingleLeastSquares : `float`
310 Reduced chi squared, unweighted if weightsY is not provided.
311 """
312 if weightsY is None:
313 weightsY = np.ones(len(dataX))
315 def errFunc(p, x, y, weightsY=None):
316 if weightsY is None:
317 weightsY = np.ones(len(x))
318 return (function(p, x) - y)*weightsY
320 pFit, pCov, infoDict, errMessage, success = leastsq(errFunc, initialParams,
321 args=(dataX, dataY, weightsY), full_output=1,
322 epsfcn=0.0001)
324 if (len(dataY) > len(initialParams)) and pCov is not None:
325 reducedChiSq = calculateWeightedReducedChi2(dataY, function(pFit, dataX), weightsY, len(dataY),
326 len(initialParams))
327 pCov *= reducedChiSq
328 else:
329 pCov = np.zeros((len(initialParams), len(initialParams)))
330 pCov[:, :] = np.nan
331 reducedChiSq = np.nan
333 errorVec = []
334 for i in range(len(pFit)):
335 errorVec.append(np.fabs(pCov[i][i])**0.5)
337 pFitSingleLeastSquares = pFit
338 pErrSingleLeastSquares = np.array(errorVec)
340 return pFitSingleLeastSquares, pErrSingleLeastSquares, reducedChiSq
343def fitBootstrap(initialParams, dataX, dataY, function, weightsY=None, confidenceSigma=1.):
344 """Do a fit using least squares and bootstrap to estimate parameter errors.
346 The bootstrap error bars are calculated by fitting 100 random data sets.
348 Parameters
349 ----------
350 initialParams : `list` [`float`]
351 initial values for fit parameters. For ptcFitType=POLYNOMIAL,
352 its length determines the degree of the polynomial.
353 dataX : `numpy.array`, (N,)
354 Data in the abscissa axis.
355 dataY : `numpy.array`, (N,)
356 Data in the ordinate axis.
357 function : callable object (function)
358 Function to fit the data with.
359 weightsY : `numpy.array`, (N,), optional.
360 Weights of the data in the ordinate axis.
361 confidenceSigma : `float`, optional.
362 Number of sigmas that determine confidence interval for the
363 bootstrap errors.
365 Return
366 ------
367 pFitBootstrap : `list` [`float`]
368 List with fitted parameters.
369 pErrBootstrap : `list` [`float`]
370 List with errors for fitted parameters.
371 reducedChiSqBootstrap : `float`
372 Reduced chi squared, unweighted if weightsY is not provided.
373 """
374 if weightsY is None:
375 weightsY = np.ones(len(dataX))
377 def errFunc(p, x, y, weightsY):
378 if weightsY is None:
379 weightsY = np.ones(len(x))
380 return (function(p, x) - y)*weightsY
382 # Fit first time
383 pFit, _ = leastsq(errFunc, initialParams, args=(dataX, dataY, weightsY), full_output=0)
385 # Get the stdev of the residuals
386 residuals = errFunc(pFit, dataX, dataY, weightsY)
387 # 100 random data sets are generated and fitted
388 pars = []
389 for i in range(100):
390 randomDelta = np.random.normal(0., np.fabs(residuals), len(dataY))
391 randomDataY = dataY + randomDelta
392 randomFit, _ = leastsq(errFunc, initialParams,
393 args=(dataX, randomDataY, weightsY), full_output=0)
394 pars.append(randomFit)
395 pars = np.array(pars)
396 meanPfit = np.mean(pars, 0)
398 # confidence interval for parameter estimates
399 errPfit = confidenceSigma*np.std(pars, 0)
400 pFitBootstrap = meanPfit
401 pErrBootstrap = errPfit
403 reducedChiSq = calculateWeightedReducedChi2(dataY, function(pFitBootstrap, dataX), weightsY, len(dataY),
404 len(initialParams))
405 return pFitBootstrap, pErrBootstrap, reducedChiSq
408def funcPolynomial(pars, x):
409 """Polynomial function definition
410 Parameters
411 ----------
412 params : `list`
413 Polynomial coefficients. Its length determines the polynomial order.
415 x : `numpy.array`, (N,)
416 Abscisa array.
418 Returns
419 -------
420 y : `numpy.array`, (N,)
421 Ordinate array after evaluating polynomial of order
422 len(pars)-1 at `x`.
423 """
424 return poly.polyval(x, [*pars])
427def funcAstier(pars, x):
428 """Single brighter-fatter parameter model for PTC; Equation 16 of
429 Astier+19.
431 Parameters
432 ----------
433 params : `list`
434 Parameters of the model: a00 (brightter-fatter), gain (e/ADU),
435 and noise (e^2).
436 x : `numpy.array`, (N,)
437 Signal mu (ADU).
439 Returns
440 -------
441 y : `numpy.array`, (N,)
442 C_00 (variance) in ADU^2.
443 """
444 a00, gain, noise = pars
445 return 0.5/(a00*gain*gain)*(np.exp(2*a00*x*gain)-1) + noise/(gain*gain) # C_00
448def arrangeFlatsByExpTime(exposureList, exposureIdList):
449 """Arrange exposures by exposure time.
451 Parameters
452 ----------
453 exposureList : `list` [`lsst.pipe.base.connections.DeferredDatasetRef`]
454 Input list of exposure references.
455 exposureIdList : `list` [`int`]
456 List of exposure ids as obtained by dataId[`exposure`].
458 Returns
459 ------
460 flatsAtExpTime : `dict` [`float`,
461 `list`[(`lsst.pipe.base.connections.DeferredDatasetRef`,
462 `int`)]]
463 Dictionary that groups references to flat-field exposures
464 (and their IDs) that have the same exposure time (seconds).
465 """
466 flatsAtExpTime = {}
467 assert len(exposureList) == len(exposureIdList), "Different lengths for exp. list and exp. ID lists"
468 for expRef, expId in zip(exposureList, exposureIdList):
469 expTime = expRef.get(component='visitInfo').exposureTime
470 listAtExpTime = flatsAtExpTime.setdefault(expTime, [])
471 listAtExpTime.append((expRef, expId))
473 return flatsAtExpTime
476def arrangeFlatsByExpId(exposureList, exposureIdList):
477 """Arrange exposures by exposure ID.
479 There is no guarantee that this will properly group exposures, but
480 allows a sequence of flats that have different illumination
481 (despite having the same exposure time) to be processed.
483 Parameters
484 ----------
485 exposureList : `list`[`lsst.pipe.base.connections.DeferredDatasetRef`]
486 Input list of exposure references.
487 exposureIdList : `list`[`int`]
488 List of exposure ids as obtained by dataId[`exposure`].
490 Returns
491 ------
492 flatsAtExpId : `dict` [`float`,
493 `list`[(`lsst.pipe.base.connections.DeferredDatasetRef`,
494 `int`)]]
495 Dictionary that groups references to flat-field exposures (and their
496 IDs) sequentially by their exposure id.
498 Notes
499 -----
501 This algorithm sorts the input exposure references by their exposure
502 id, and then assigns each pair of exposure references (exp_j, exp_{j+1})
503 to pair k, such that 2*k = j, where j is the python index of one of the
504 exposure references (starting from zero). By checking for the IndexError
505 while appending, we can ensure that there will only ever be fully
506 populated pairs.
507 """
508 flatsAtExpId = {}
509 assert len(exposureList) == len(exposureIdList), "Different lengths for exp. list and exp. ID lists"
510 # Sort exposures by expIds, which are in the second list `exposureIdList`.
511 sortedExposures = sorted(zip(exposureList, exposureIdList), key=lambda pair: pair[1])
513 for jPair, expTuple in enumerate(sortedExposures):
514 if (jPair + 1) % 2:
515 kPair = jPair // 2
516 listAtExpId = flatsAtExpId.setdefault(kPair, [])
517 try:
518 listAtExpId.append(expTuple)
519 listAtExpId.append(sortedExposures[jPair + 1])
520 except IndexError:
521 pass
523 return flatsAtExpId
526class CovFastFourierTransform:
527 """A class to compute (via FFT) the nearby pixels correlation function.
529 Implements appendix of Astier+19.
531 Parameters
532 ----------
533 diff : `numpy.array`
534 Image where to calculate the covariances (e.g., the difference
535 image of two flats).
536 w : `numpy.array`
537 Weight image (mask): it should consist of 1's (good pixel) and
538 0's (bad pixels).
539 fftShape : `tuple`
540 2d-tuple with the shape of the FFT
541 maxRangeCov : `int`
542 Maximum range for the covariances.
543 """
545 def __init__(self, diff, w, fftShape, maxRangeCov):
546 # check that the zero padding implied by "fft_shape"
547 # is large enough for the required correlation range
548 assert(fftShape[0] > diff.shape[0]+maxRangeCov+1)
549 assert(fftShape[1] > diff.shape[1]+maxRangeCov+1)
550 # for some reason related to numpy.fft.rfftn,
551 # the second dimension should be even, so
552 if fftShape[1]%2 == 1:
553 fftShape = (fftShape[0], fftShape[1]+1)
554 tIm = np.fft.rfft2(diff*w, fftShape)
555 tMask = np.fft.rfft2(w, fftShape)
556 # sum of "squares"
557 self.pCov = np.fft.irfft2(tIm*tIm.conjugate())
558 # sum of values
559 self.pMean = np.fft.irfft2(tIm*tMask.conjugate())
560 # number of w!=0 pixels.
561 self.pCount = np.fft.irfft2(tMask*tMask.conjugate())
563 def cov(self, dx, dy):
564 """Covariance for dx,dy averaged with dx,-dy if both non zero.
566 Implements appendix of Astier+19.
568 Parameters
569 ----------
570 dx : `int`
571 Lag in x
572 dy : `int`
573 Lag in y
575 Returns
576 -------
577 0.5*(cov1+cov2) : `float`
578 Covariance at (dx, dy) lag
579 npix1+npix2 : `int`
580 Number of pixels used in covariance calculation.
581 """
582 # compensate rounding errors
583 nPix1 = int(round(self.pCount[dy, dx]))
584 cov1 = self.pCov[dy, dx]/nPix1-self.pMean[dy, dx]*self.pMean[-dy, -dx]/(nPix1*nPix1)
585 if (dx == 0 or dy == 0):
586 return cov1, nPix1
587 nPix2 = int(round(self.pCount[-dy, dx]))
588 cov2 = self.pCov[-dy, dx]/nPix2-self.pMean[-dy, dx]*self.pMean[dy, -dx]/(nPix2*nPix2)
589 return 0.5*(cov1+cov2), nPix1+nPix2
591 def reportCovFastFourierTransform(self, maxRange):
592 """Produce a list of tuples with covariances.
594 Implements appendix of Astier+19.
596 Parameters
597 ----------
598 maxRange : `int`
599 Maximum range of covariances.
601 Returns
602 -------
603 tupleVec : `list`
604 List with covariance tuples.
605 """
606 tupleVec = []
607 # (dy,dx) = (0,0) has to be first
608 for dy in range(maxRange+1):
609 for dx in range(maxRange+1):
610 cov, npix = self.cov(dx, dy)
611 if (dx == 0 and dy == 0):
612 var = cov
613 tupleVec.append((dx, dy, var, cov, npix))
614 return tupleVec
617def getFitDataFromCovariances(i, j, mu, fullCov, fullCovModel, fullCovSqrtWeights, gain=1.0,
618 divideByMu=False, returnMasked=False):
619 """Get measured signal and covariance, cov model, weigths, and mask at
620 covariance lag (i, j).
622 Parameters
623 ----------
624 i : `int`
625 Lag for covariance matrix.
626 j : `int`
627 Lag for covariance matrix.
628 mu : `list`
629 Mean signal values.
630 fullCov : `list` of `numpy.array`
631 Measured covariance matrices at each mean signal level in mu.
632 fullCovSqrtWeights : `list` of `numpy.array`
633 List of square root of measured covariances at each mean
634 signal level in mu.
635 fullCovModel : `list` of `numpy.array`
636 List of modeled covariances at each mean signal level in mu.
637 gain : `float`, optional
638 Gain, in e-/ADU. If other than 1.0 (default), the returned
639 quantities will be in electrons or powers of electrons.
640 divideByMu : `bool`, optional
641 Divide returned covariance, model, and weights by the mean
642 signal mu?
643 returnMasked : `bool`, optional
644 Use mask (based on weights) in returned arrays (mu,
645 covariance, and model)?
647 Returns
648 -------
649 mu : `numpy.array`
650 list of signal values at (i, j).
651 covariance : `numpy.array`
652 Covariance at (i, j) at each mean signal mu value (fullCov[:, i, j]).
653 covarianceModel : `numpy.array`
654 Covariance model at (i, j).
655 weights : `numpy.array`
656 Weights at (i, j).
657 maskFromWeights : `numpy.array`, optional
658 Boolean mask of the covariance at (i,j), where the weights
659 differ from 0.
660 """
661 mu = np.array(mu)
662 fullCov = np.array(fullCov)
663 fullCovModel = np.array(fullCovModel)
664 fullCovSqrtWeights = np.array(fullCovSqrtWeights)
665 covariance = fullCov[:, i, j]*(gain**2)
666 covarianceModel = fullCovModel[:, i, j]*(gain**2)
667 weights = fullCovSqrtWeights[:, i, j]/(gain**2)
669 maskFromWeights = weights != 0
670 if returnMasked:
671 weights = weights[maskFromWeights]
672 covarianceModel = covarianceModel[maskFromWeights]
673 mu = mu[maskFromWeights]
674 covariance = covariance[maskFromWeights]
676 if divideByMu:
677 covariance /= mu
678 covarianceModel /= mu
679 weights *= mu
680 return mu, covariance, covarianceModel, weights, maskFromWeights
683def symmetrize(inputArray):
684 """ Copy array over 4 quadrants prior to convolution.
686 Parameters
687 ----------
688 inputarray : `numpy.array`
689 Input array to symmetrize.
691 Returns
692 -------
693 aSym : `numpy.array`
694 Symmetrized array.
695 """
696 targetShape = list(inputArray.shape)
697 r1, r2 = inputArray.shape[-1], inputArray.shape[-2]
698 targetShape[-1] = 2*r1-1
699 targetShape[-2] = 2*r2-1
700 aSym = np.ndarray(tuple(targetShape))
701 aSym[..., r2-1:, r1-1:] = inputArray
702 aSym[..., r2-1:, r1-1::-1] = inputArray
703 aSym[..., r2-1::-1, r1-1::-1] = inputArray
704 aSym[..., r2-1::-1, r1-1:] = inputArray
706 return aSym
709def ddict2dict(d):
710 """Convert nested default dictionaries to regular dictionaries.
712 This is needed to prevent yaml persistence issues.
714 Parameters
715 ----------
716 d : `defaultdict`
717 A possibly nested set of `defaultdict`.
719 Returns
720 -------
721 dict : `dict`
722 A possibly nested set of `dict`.
723 """
724 for k, v in d.items():
725 if isinstance(v, dict):
726 d[k] = ddict2dict(v)
727 return dict(d)