Coverage for python/lsst/cp/pipe/utils.py: 10%

218 statements  

« prev     ^ index     » next       coverage.py v6.4.4, created at 2022-08-25 01:44 -0700

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22 

23__all__ = ['countMaskedPixels', 'ddict2dict', 'CovFastFourierTransform'] 

24 

25import numpy as np 

26from scipy.optimize import leastsq 

27import numpy.polynomial.polynomial as poly 

28from scipy.stats import norm 

29 

30from lsst.ip.isr import isrMock 

31import lsst.afw.image 

32 

33import galsim 

34 

35 

36def sigmaClipCorrection(nSigClip): 

37 """Correct measured sigma to account for clipping. 

38 

39 If we clip our input data and then measure sigma, then the 

40 measured sigma is smaller than the true value because real 

41 points beyond the clip threshold have been removed. This is a 

42 small (1.5% at nSigClip=3) effect when nSigClip >~ 3, but the 

43 default parameters for measure crosstalk use nSigClip=2.0. 

44 This causes the measured sigma to be about 15% smaller than 

45 real. This formula corrects the issue, for the symmetric case 

46 (upper clip threshold equal to lower clip threshold). 

47 

48 Parameters 

49 ---------- 

50 nSigClip : `float` 

51 Number of sigma the measurement was clipped by. 

52 

53 Returns 

54 ------- 

55 scaleFactor : `float` 

56 Scale factor to increase the measured sigma by. 

57 """ 

58 varFactor = 1.0 - (2 * nSigClip * norm.pdf(nSigClip)) / (norm.cdf(nSigClip) - norm.cdf(-nSigClip)) 

59 return 1.0 / np.sqrt(varFactor) 

60 

61 

62def calculateWeightedReducedChi2(measured, model, weightsMeasured, nData, nParsModel): 

63 """Calculate weighted reduced chi2. 

64 

65 Parameters 

66 ---------- 

67 measured : `list` 

68 List with measured data. 

69 model : `list` 

70 List with modeled data. 

71 weightsMeasured : `list` 

72 List with weights for the measured data. 

73 nData : `int` 

74 Number of data points. 

75 nParsModel : `int` 

76 Number of parameters in the model. 

77 

78 Returns 

79 ------- 

80 redWeightedChi2 : `float` 

81 Reduced weighted chi2. 

82 """ 

83 wRes = (measured - model)*weightsMeasured 

84 return ((wRes*wRes).sum())/(nData-nParsModel) 

85 

86 

87def makeMockFlats(expTime, gain=1.0, readNoiseElectrons=5, fluxElectrons=1000, 

88 randomSeedFlat1=1984, randomSeedFlat2=666, powerLawBfParams=[], 

89 expId1=0, expId2=1): 

90 """Create a pair or mock flats with isrMock. 

91 

92 Parameters 

93 ---------- 

94 expTime : `float` 

95 Exposure time of the flats. 

96 gain : `float`, optional 

97 Gain, in e/ADU. 

98 readNoiseElectrons : `float`, optional 

99 Read noise rms, in electrons. 

100 fluxElectrons : `float`, optional 

101 Flux of flats, in electrons per second. 

102 randomSeedFlat1 : `int`, optional 

103 Random seed for the normal distrubutions for the mean signal 

104 and noise (flat1). 

105 randomSeedFlat2 : `int`, optional 

106 Random seed for the normal distrubutions for the mean signal 

107 and noise (flat2). 

108 powerLawBfParams : `list`, optional 

109 Parameters for `galsim.cdmodel.PowerLawCD` to simulate the 

110 brightter-fatter effect. 

111 expId1 : `int`, optional 

112 Exposure ID for first flat. 

113 expId2 : `int`, optional 

114 Exposure ID for second flat. 

115 

116 Returns 

117 ------- 

118 flatExp1 : `lsst.afw.image.exposure.ExposureF` 

119 First exposure of flat field pair. 

120 flatExp2 : `lsst.afw.image.exposure.ExposureF` 

121 Second exposure of flat field pair. 

122 

123 Notes 

124 ----- 

125 The parameters of `galsim.cdmodel.PowerLawCD` are `n, r0, t0, rx, 

126 tx, r, t, alpha`. For more information about their meaning, see 

127 the Galsim documentation 

128 https://galsim-developers.github.io/GalSim/_build/html/_modules/galsim/cdmodel.html # noqa: W505 

129 and Gruen+15 (1501.02802). 

130 

131 Example: galsim.cdmodel.PowerLawCD(8, 1.1e-7, 1.1e-7, 1.0e-8, 

132 1.0e-8, 1.0e-9, 1.0e-9, 2.0) 

133 """ 

134 flatFlux = fluxElectrons # e/s 

135 flatMean = flatFlux*expTime # e 

136 readNoise = readNoiseElectrons # e 

137 

138 mockImageConfig = isrMock.IsrMock.ConfigClass() 

139 

140 mockImageConfig.flatDrop = 0.99999 

141 mockImageConfig.isTrimmed = True 

142 

143 flatExp1 = isrMock.FlatMock(config=mockImageConfig).run() 

144 flatExp2 = flatExp1.clone() 

145 (shapeY, shapeX) = flatExp1.getDimensions() 

146 flatWidth = np.sqrt(flatMean) 

147 

148 rng1 = np.random.RandomState(randomSeedFlat1) 

149 flatData1 = rng1.normal(flatMean, flatWidth, (shapeX, shapeY)) + rng1.normal(0.0, readNoise, 

150 (shapeX, shapeY)) 

151 rng2 = np.random.RandomState(randomSeedFlat2) 

152 flatData2 = rng2.normal(flatMean, flatWidth, (shapeX, shapeY)) + rng2.normal(0.0, readNoise, 

153 (shapeX, shapeY)) 

154 # Simulate BF with power law model in galsim 

155 if len(powerLawBfParams): 

156 if not len(powerLawBfParams) == 8: 

157 raise RuntimeError("Wrong number of parameters for `galsim.cdmodel.PowerLawCD`. " 

158 f"Expected 8; passed {len(powerLawBfParams)}.") 

159 cd = galsim.cdmodel.PowerLawCD(*powerLawBfParams) 

160 tempFlatData1 = galsim.Image(flatData1) 

161 temp2FlatData1 = cd.applyForward(tempFlatData1) 

162 

163 tempFlatData2 = galsim.Image(flatData2) 

164 temp2FlatData2 = cd.applyForward(tempFlatData2) 

165 

166 flatExp1.image.array[:] = temp2FlatData1.array/gain # ADU 

167 flatExp2.image.array[:] = temp2FlatData2.array/gain # ADU 

168 else: 

169 flatExp1.image.array[:] = flatData1/gain # ADU 

170 flatExp2.image.array[:] = flatData2/gain # ADU 

171 

172 visitInfoExp1 = lsst.afw.image.VisitInfo(exposureId=expId1, exposureTime=expTime) 

173 visitInfoExp2 = lsst.afw.image.VisitInfo(exposureId=expId2, exposureTime=expTime) 

174 

175 flatExp1.info.id = expId1 

176 flatExp1.getInfo().setVisitInfo(visitInfoExp1) 

177 flatExp2.info.id = expId2 

178 flatExp2.getInfo().setVisitInfo(visitInfoExp2) 

179 

180 return flatExp1, flatExp2 

181 

182 

183def countMaskedPixels(maskedIm, maskPlane): 

184 """Count the number of pixels in a given mask plane. 

185 

186 Parameters 

187 ---------- 

188 maskedIm : `~lsst.afw.image.MaskedImage` 

189 Masked image to examine. 

190 maskPlane : `str` 

191 Name of the mask plane to examine. 

192 

193 Returns 

194 ------- 

195 nPix : `int` 

196 Number of pixels in the requested mask plane. 

197 """ 

198 maskBit = maskedIm.mask.getPlaneBitMask(maskPlane) 

199 nPix = np.where(np.bitwise_and(maskedIm.mask.array, maskBit))[0].flatten().size 

200 return nPix 

201 

202 

203def irlsFit(initialParams, dataX, dataY, function, weightsY=None, weightType='Cauchy', scaleResidual=True): 

204 """Iteratively reweighted least squares fit. 

205 

206 This uses the `lsst.cp.pipe.utils.fitLeastSq`, but applies weights 

207 based on the Cauchy distribution by default. Other weight options 

208 are implemented. See e.g. Holland and Welsch, 1977, 

209 doi:10.1080/03610927708827533 

210 

211 Parameters 

212 ---------- 

213 initialParams : `list` [`float`] 

214 Starting parameters. 

215 dataX : `numpy.array`, (N,) 

216 Abscissa data. 

217 dataY : `numpy.array`, (N,) 

218 Ordinate data. 

219 function : callable 

220 Function to fit. 

221 weightsY : `numpy.array`, (N,) 

222 Weights to apply to the data. 

223 weightType : `str`, optional 

224 Type of weighting to use. One of Cauchy, Anderson, bisquare, 

225 box, Welsch, Huber, logistic, or Fair. 

226 scaleResidual : `bool`, optional 

227 If true, the residual is scaled by the sqrt of the Y values. 

228 

229 Returns 

230 ------- 

231 polyFit : `list` [`float`] 

232 Final best fit parameters. 

233 polyFitErr : `list` [`float`] 

234 Final errors on fit parameters. 

235 chiSq : `float` 

236 Reduced chi squared. 

237 weightsY : `list` [`float`] 

238 Final weights used for each point. 

239 

240 Raises 

241 ------ 

242 RuntimeError : 

243 Raised if an unknown weightType string is passed. 

244 """ 

245 if not weightsY: 

246 weightsY = np.ones_like(dataX) 

247 

248 polyFit, polyFitErr, chiSq = fitLeastSq(initialParams, dataX, dataY, function, weightsY=weightsY) 

249 for iteration in range(10): 

250 resid = np.abs(dataY - function(polyFit, dataX)) 

251 if scaleResidual: 

252 resid = resid / np.sqrt(dataY) 

253 if weightType == 'Cauchy': 

254 # Use Cauchy weighting. This is a soft weight. 

255 # At [2, 3, 5, 10] sigma, weights are [.59, .39, .19, .05]. 

256 Z = resid / 2.385 

257 weightsY = 1.0 / (1.0 + np.square(Z)) 

258 elif weightType == 'Anderson': 

259 # Anderson+1972 weighting. This is a hard weight. 

260 # At [2, 3, 5, 10] sigma, weights are [.67, .35, 0.0, 0.0]. 

261 Z = resid / (1.339 * np.pi) 

262 weightsY = np.where(Z < 1.0, np.sinc(Z), 0.0) 

263 elif weightType == 'bisquare': 

264 # Beaton and Tukey (1974) biweight. This is a hard weight. 

265 # At [2, 3, 5, 10] sigma, weights are [.81, .59, 0.0, 0.0]. 

266 Z = resid / 4.685 

267 weightsY = np.where(Z < 1.0, 1.0 - np.square(Z), 0.0) 

268 elif weightType == 'box': 

269 # Hinich and Talwar (1975). This is a hard weight. 

270 # At [2, 3, 5, 10] sigma, weights are [1.0, 0.0, 0.0, 0.0]. 

271 weightsY = np.where(resid < 2.795, 1.0, 0.0) 

272 elif weightType == 'Welsch': 

273 # Dennis and Welsch (1976). This is a hard weight. 

274 # At [2, 3, 5, 10] sigma, weights are [.64, .36, .06, 1e-5]. 

275 Z = resid / 2.985 

276 weightsY = np.exp(-1.0 * np.square(Z)) 

277 elif weightType == 'Huber': 

278 # Huber (1964) weighting. This is a soft weight. 

279 # At [2, 3, 5, 10] sigma, weights are [.67, .45, .27, .13]. 

280 Z = resid / 1.345 

281 weightsY = np.where(Z < 1.0, 1.0, 1 / Z) 

282 elif weightType == 'logistic': 

283 # Logistic weighting. This is a soft weight. 

284 # At [2, 3, 5, 10] sigma, weights are [.56, .40, .24, .12]. 

285 Z = resid / 1.205 

286 weightsY = np.tanh(Z) / Z 

287 elif weightType == 'Fair': 

288 # Fair (1974) weighting. This is a soft weight. 

289 # At [2, 3, 5, 10] sigma, weights are [.41, .32, .22, .12]. 

290 Z = resid / 1.4 

291 weightsY = (1.0 / (1.0 + (Z))) 

292 else: 

293 raise RuntimeError(f"Unknown weighting type: {weightType}") 

294 polyFit, polyFitErr, chiSq = fitLeastSq(initialParams, dataX, dataY, function, weightsY=weightsY) 

295 

296 return polyFit, polyFitErr, chiSq, weightsY 

297 

298 

299def fitLeastSq(initialParams, dataX, dataY, function, weightsY=None): 

300 """Do a fit and estimate the parameter errors using using 

301 scipy.optimize.leastq. 

302 

303 optimize.leastsq returns the fractional covariance matrix. To 

304 estimate the standard deviation of the fit parameters, multiply 

305 the entries of this matrix by the unweighted reduced chi squared 

306 and take the square root of the diagonal elements. 

307 

308 Parameters 

309 ---------- 

310 initialParams : `list` [`float`] 

311 initial values for fit parameters. For ptcFitType=POLYNOMIAL, 

312 its length determines the degree of the polynomial. 

313 dataX : `numpy.array`, (N,) 

314 Data in the abscissa axis. 

315 dataY : `numpy.array`, (N,) 

316 Data in the ordinate axis. 

317 function : callable object (function) 

318 Function to fit the data with. 

319 weightsY : `numpy.array`, (N,) 

320 Weights of the data in the ordinate axis. 

321 

322 Return 

323 ------ 

324 pFitSingleLeastSquares : `list` [`float`] 

325 List with fitted parameters. 

326 pErrSingleLeastSquares : `list` [`float`] 

327 List with errors for fitted parameters. 

328 

329 reducedChiSqSingleLeastSquares : `float` 

330 Reduced chi squared, unweighted if weightsY is not provided. 

331 """ 

332 if weightsY is None: 

333 weightsY = np.ones(len(dataX)) 

334 

335 def errFunc(p, x, y, weightsY=None): 

336 if weightsY is None: 

337 weightsY = np.ones(len(x)) 

338 return (function(p, x) - y)*weightsY 

339 

340 pFit, pCov, infoDict, errMessage, success = leastsq(errFunc, initialParams, 

341 args=(dataX, dataY, weightsY), full_output=1, 

342 epsfcn=0.0001) 

343 

344 if (len(dataY) > len(initialParams)) and pCov is not None: 

345 reducedChiSq = calculateWeightedReducedChi2(dataY, function(pFit, dataX), weightsY, len(dataY), 

346 len(initialParams)) 

347 pCov *= reducedChiSq 

348 else: 

349 pCov = np.zeros((len(initialParams), len(initialParams))) 

350 pCov[:, :] = np.nan 

351 reducedChiSq = np.nan 

352 

353 errorVec = [] 

354 for i in range(len(pFit)): 

355 errorVec.append(np.fabs(pCov[i][i])**0.5) 

356 

357 pFitSingleLeastSquares = pFit 

358 pErrSingleLeastSquares = np.array(errorVec) 

359 

360 return pFitSingleLeastSquares, pErrSingleLeastSquares, reducedChiSq 

361 

362 

363def fitBootstrap(initialParams, dataX, dataY, function, weightsY=None, confidenceSigma=1.): 

364 """Do a fit using least squares and bootstrap to estimate parameter errors. 

365 

366 The bootstrap error bars are calculated by fitting 100 random data sets. 

367 

368 Parameters 

369 ---------- 

370 initialParams : `list` [`float`] 

371 initial values for fit parameters. For ptcFitType=POLYNOMIAL, 

372 its length determines the degree of the polynomial. 

373 dataX : `numpy.array`, (N,) 

374 Data in the abscissa axis. 

375 dataY : `numpy.array`, (N,) 

376 Data in the ordinate axis. 

377 function : callable object (function) 

378 Function to fit the data with. 

379 weightsY : `numpy.array`, (N,), optional. 

380 Weights of the data in the ordinate axis. 

381 confidenceSigma : `float`, optional. 

382 Number of sigmas that determine confidence interval for the 

383 bootstrap errors. 

384 

385 Return 

386 ------ 

387 pFitBootstrap : `list` [`float`] 

388 List with fitted parameters. 

389 pErrBootstrap : `list` [`float`] 

390 List with errors for fitted parameters. 

391 reducedChiSqBootstrap : `float` 

392 Reduced chi squared, unweighted if weightsY is not provided. 

393 """ 

394 if weightsY is None: 

395 weightsY = np.ones(len(dataX)) 

396 

397 def errFunc(p, x, y, weightsY): 

398 if weightsY is None: 

399 weightsY = np.ones(len(x)) 

400 return (function(p, x) - y)*weightsY 

401 

402 # Fit first time 

403 pFit, _ = leastsq(errFunc, initialParams, args=(dataX, dataY, weightsY), full_output=0) 

404 

405 # Get the stdev of the residuals 

406 residuals = errFunc(pFit, dataX, dataY, weightsY) 

407 # 100 random data sets are generated and fitted 

408 pars = [] 

409 for i in range(100): 

410 randomDelta = np.random.normal(0., np.fabs(residuals), len(dataY)) 

411 randomDataY = dataY + randomDelta 

412 randomFit, _ = leastsq(errFunc, initialParams, 

413 args=(dataX, randomDataY, weightsY), full_output=0) 

414 pars.append(randomFit) 

415 pars = np.array(pars) 

416 meanPfit = np.mean(pars, 0) 

417 

418 # confidence interval for parameter estimates 

419 errPfit = confidenceSigma*np.std(pars, 0) 

420 pFitBootstrap = meanPfit 

421 pErrBootstrap = errPfit 

422 

423 reducedChiSq = calculateWeightedReducedChi2(dataY, function(pFitBootstrap, dataX), weightsY, len(dataY), 

424 len(initialParams)) 

425 return pFitBootstrap, pErrBootstrap, reducedChiSq 

426 

427 

428def funcPolynomial(pars, x): 

429 """Polynomial function definition 

430 Parameters 

431 ---------- 

432 params : `list` 

433 Polynomial coefficients. Its length determines the polynomial order. 

434 

435 x : `numpy.array`, (N,) 

436 Abscisa array. 

437 

438 Returns 

439 ------- 

440 y : `numpy.array`, (N,) 

441 Ordinate array after evaluating polynomial of order 

442 len(pars)-1 at `x`. 

443 """ 

444 return poly.polyval(x, [*pars]) 

445 

446 

447def funcAstier(pars, x): 

448 """Single brighter-fatter parameter model for PTC; Equation 16 of 

449 Astier+19. 

450 

451 Parameters 

452 ---------- 

453 params : `list` 

454 Parameters of the model: a00 (brightter-fatter), gain (e/ADU), 

455 and noise (e^2). 

456 x : `numpy.array`, (N,) 

457 Signal mu (ADU). 

458 

459 Returns 

460 ------- 

461 y : `numpy.array`, (N,) 

462 C_00 (variance) in ADU^2. 

463 """ 

464 a00, gain, noise = pars 

465 return 0.5/(a00*gain*gain)*(np.exp(2*a00*x*gain)-1) + noise/(gain*gain) # C_00 

466 

467 

468def arrangeFlatsByExpTime(exposureList, exposureIdList): 

469 """Arrange exposures by exposure time. 

470 

471 Parameters 

472 ---------- 

473 exposureList : `list` [`lsst.pipe.base.connections.DeferredDatasetRef`] 

474 Input list of exposure references. 

475 exposureIdList : `list` [`int`] 

476 List of exposure ids as obtained by dataId[`exposure`]. 

477 

478 Returns 

479 ------ 

480 flatsAtExpTime : `dict` [`float`, 

481 `list`[(`lsst.pipe.base.connections.DeferredDatasetRef`, 

482 `int`)]] 

483 Dictionary that groups references to flat-field exposures 

484 (and their IDs) that have the same exposure time (seconds). 

485 """ 

486 flatsAtExpTime = {} 

487 assert len(exposureList) == len(exposureIdList), "Different lengths for exp. list and exp. ID lists" 

488 for expRef, expId in zip(exposureList, exposureIdList): 

489 expTime = expRef.get(component='visitInfo').exposureTime 

490 listAtExpTime = flatsAtExpTime.setdefault(expTime, []) 

491 listAtExpTime.append((expRef, expId)) 

492 

493 return flatsAtExpTime 

494 

495 

496def arrangeFlatsByExpId(exposureList, exposureIdList): 

497 """Arrange exposures by exposure ID. 

498 

499 There is no guarantee that this will properly group exposures, but 

500 allows a sequence of flats that have different illumination 

501 (despite having the same exposure time) to be processed. 

502 

503 Parameters 

504 ---------- 

505 exposureList : `list`[`lsst.pipe.base.connections.DeferredDatasetRef`] 

506 Input list of exposure references. 

507 exposureIdList : `list`[`int`] 

508 List of exposure ids as obtained by dataId[`exposure`]. 

509 

510 Returns 

511 ------ 

512 flatsAtExpId : `dict` [`float`, 

513 `list`[(`lsst.pipe.base.connections.DeferredDatasetRef`, 

514 `int`)]] 

515 Dictionary that groups references to flat-field exposures (and their 

516 IDs) sequentially by their exposure id. 

517 

518 Notes 

519 ----- 

520 

521 This algorithm sorts the input exposure references by their exposure 

522 id, and then assigns each pair of exposure references (exp_j, exp_{j+1}) 

523 to pair k, such that 2*k = j, where j is the python index of one of the 

524 exposure references (starting from zero). By checking for the IndexError 

525 while appending, we can ensure that there will only ever be fully 

526 populated pairs. 

527 """ 

528 flatsAtExpId = {} 

529 assert len(exposureList) == len(exposureIdList), "Different lengths for exp. list and exp. ID lists" 

530 # Sort exposures by expIds, which are in the second list `exposureIdList`. 

531 sortedExposures = sorted(zip(exposureList, exposureIdList), key=lambda pair: pair[1]) 

532 

533 for jPair, expTuple in enumerate(sortedExposures): 

534 if (jPair + 1) % 2: 

535 kPair = jPair // 2 

536 listAtExpId = flatsAtExpId.setdefault(kPair, []) 

537 try: 

538 listAtExpId.append(expTuple) 

539 listAtExpId.append(sortedExposures[jPair + 1]) 

540 except IndexError: 

541 pass 

542 

543 return flatsAtExpId 

544 

545 

546class CovFastFourierTransform: 

547 """A class to compute (via FFT) the nearby pixels correlation function. 

548 

549 Implements appendix of Astier+19. 

550 

551 Parameters 

552 ---------- 

553 diff : `numpy.array` 

554 Image where to calculate the covariances (e.g., the difference 

555 image of two flats). 

556 w : `numpy.array` 

557 Weight image (mask): it should consist of 1's (good pixel) and 

558 0's (bad pixels). 

559 fftShape : `tuple` 

560 2d-tuple with the shape of the FFT 

561 maxRangeCov : `int` 

562 Maximum range for the covariances. 

563 """ 

564 

565 def __init__(self, diff, w, fftShape, maxRangeCov): 

566 # check that the zero padding implied by "fft_shape" 

567 # is large enough for the required correlation range 

568 assert(fftShape[0] > diff.shape[0]+maxRangeCov+1) 

569 assert(fftShape[1] > diff.shape[1]+maxRangeCov+1) 

570 # for some reason related to numpy.fft.rfftn, 

571 # the second dimension should be even, so 

572 if fftShape[1]%2 == 1: 

573 fftShape = (fftShape[0], fftShape[1]+1) 

574 tIm = np.fft.rfft2(diff*w, fftShape) 

575 tMask = np.fft.rfft2(w, fftShape) 

576 # sum of "squares" 

577 self.pCov = np.fft.irfft2(tIm*tIm.conjugate()) 

578 # sum of values 

579 self.pMean = np.fft.irfft2(tIm*tMask.conjugate()) 

580 # number of w!=0 pixels. 

581 self.pCount = np.fft.irfft2(tMask*tMask.conjugate()) 

582 

583 def cov(self, dx, dy): 

584 """Covariance for dx,dy averaged with dx,-dy if both non zero. 

585 

586 Implements appendix of Astier+19. 

587 

588 Parameters 

589 ---------- 

590 dx : `int` 

591 Lag in x 

592 dy : `int 

593 Lag in y 

594 

595 Returns 

596 ------- 

597 0.5*(cov1+cov2) : `float` 

598 Covariance at (dx, dy) lag 

599 npix1+npix2 : `int` 

600 Number of pixels used in covariance calculation. 

601 """ 

602 # compensate rounding errors 

603 nPix1 = int(round(self.pCount[dy, dx])) 

604 cov1 = self.pCov[dy, dx]/nPix1-self.pMean[dy, dx]*self.pMean[-dy, -dx]/(nPix1*nPix1) 

605 if (dx == 0 or dy == 0): 

606 return cov1, nPix1 

607 nPix2 = int(round(self.pCount[-dy, dx])) 

608 cov2 = self.pCov[-dy, dx]/nPix2-self.pMean[-dy, dx]*self.pMean[dy, -dx]/(nPix2*nPix2) 

609 return 0.5*(cov1+cov2), nPix1+nPix2 

610 

611 def reportCovFastFourierTransform(self, maxRange): 

612 """Produce a list of tuples with covariances. 

613 

614 Implements appendix of Astier+19. 

615 

616 Parameters 

617 ---------- 

618 maxRange : `int` 

619 Maximum range of covariances. 

620 

621 Returns 

622 ------- 

623 tupleVec : `list` 

624 List with covariance tuples. 

625 """ 

626 tupleVec = [] 

627 # (dy,dx) = (0,0) has to be first 

628 for dy in range(maxRange+1): 

629 for dx in range(maxRange+1): 

630 cov, npix = self.cov(dx, dy) 

631 if (dx == 0 and dy == 0): 

632 var = cov 

633 tupleVec.append((dx, dy, var, cov, npix)) 

634 return tupleVec 

635 

636 

637def getFitDataFromCovariances(i, j, mu, fullCov, fullCovModel, fullCovSqrtWeights, gain=1.0, 

638 divideByMu=False, returnMasked=False): 

639 """Get measured signal and covariance, cov model, weigths, and mask at 

640 covariance lag (i, j). 

641 

642 Parameters 

643 ---------- 

644 i : `int` 

645 Lag for covariance matrix. 

646 j : `int` 

647 Lag for covariance matrix. 

648 mu : `list` 

649 Mean signal values. 

650 fullCov : `list` of `numpy.array` 

651 Measured covariance matrices at each mean signal level in mu. 

652 fullCovSqrtWeights : `list` of `numpy.array` 

653 List of square root of measured covariances at each mean 

654 signal level in mu. 

655 fullCovModel : `list` of `numpy.array` 

656 List of modeled covariances at each mean signal level in mu. 

657 gain : `float`, optional 

658 Gain, in e-/ADU. If other than 1.0 (default), the returned 

659 quantities will be in electrons or powers of electrons. 

660 divideByMu : `bool`, optional 

661 Divide returned covariance, model, and weights by the mean 

662 signal mu? 

663 returnMasked : `bool`, optional 

664 Use mask (based on weights) in returned arrays (mu, 

665 covariance, and model)? 

666 

667 Returns 

668 ------- 

669 mu : `numpy.array` 

670 list of signal values at (i, j). 

671 covariance : `numpy.array` 

672 Covariance at (i, j) at each mean signal mu value (fullCov[:, i, j]). 

673 covarianceModel : `numpy.array` 

674 Covariance model at (i, j). 

675 weights : `numpy.array` 

676 Weights at (i, j). 

677 maskFromWeights : `numpy.array`, optional 

678 Boolean mask of the covariance at (i,j), where the weights 

679 differ from 0. 

680 """ 

681 mu = np.array(mu) 

682 fullCov = np.array(fullCov) 

683 fullCovModel = np.array(fullCovModel) 

684 fullCovSqrtWeights = np.array(fullCovSqrtWeights) 

685 covariance = fullCov[:, i, j]*(gain**2) 

686 covarianceModel = fullCovModel[:, i, j]*(gain**2) 

687 weights = fullCovSqrtWeights[:, i, j]/(gain**2) 

688 

689 maskFromWeights = weights != 0 

690 if returnMasked: 

691 weights = weights[maskFromWeights] 

692 covarianceModel = covarianceModel[maskFromWeights] 

693 mu = mu[maskFromWeights] 

694 covariance = covariance[maskFromWeights] 

695 

696 if divideByMu: 

697 covariance /= mu 

698 covarianceModel /= mu 

699 weights *= mu 

700 return mu, covariance, covarianceModel, weights, maskFromWeights 

701 

702 

703def symmetrize(inputArray): 

704 """ Copy array over 4 quadrants prior to convolution. 

705 

706 Parameters 

707 ---------- 

708 inputarray : `numpy.array` 

709 Input array to symmetrize. 

710 

711 Returns 

712 ------- 

713 aSym : `numpy.array` 

714 Symmetrized array. 

715 """ 

716 targetShape = list(inputArray.shape) 

717 r1, r2 = inputArray.shape[-1], inputArray.shape[-2] 

718 targetShape[-1] = 2*r1-1 

719 targetShape[-2] = 2*r2-1 

720 aSym = np.ndarray(tuple(targetShape)) 

721 aSym[..., r2-1:, r1-1:] = inputArray 

722 aSym[..., r2-1:, r1-1::-1] = inputArray 

723 aSym[..., r2-1::-1, r1-1::-1] = inputArray 

724 aSym[..., r2-1::-1, r1-1:] = inputArray 

725 

726 return aSym 

727 

728 

729def ddict2dict(d): 

730 """Convert nested default dictionaries to regular dictionaries. 

731 

732 This is needed to prevent yaml persistence issues. 

733 

734 Parameters 

735 ---------- 

736 d : `defaultdict` 

737 A possibly nested set of `defaultdict`. 

738 

739 Returns 

740 ------- 

741 dict : `dict` 

742 A possibly nested set of `dict`. 

743 """ 

744 for k, v in d.items(): 

745 if isinstance(v, dict): 

746 d[k] = ddict2dict(v) 

747 return dict(d)