Coverage for python/lsst/cp/pipe/utils.py: 10%

Shortcuts on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

218 statements  

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22 

23__all__ = ['countMaskedPixels', 'ddict2dict', 'CovFastFourierTransform'] 

24 

25import numpy as np 

26from scipy.optimize import leastsq 

27import numpy.polynomial.polynomial as poly 

28from scipy.stats import norm 

29 

30from lsst.ip.isr import isrMock 

31import lsst.afw.image 

32 

33import galsim 

34 

35 

36def sigmaClipCorrection(nSigClip): 

37 """Correct measured sigma to account for clipping. 

38 

39 If we clip our input data and then measure sigma, then the 

40 measured sigma is smaller than the true value because real 

41 points beyond the clip threshold have been removed. This is a 

42 small (1.5% at nSigClip=3) effect when nSigClip >~ 3, but the 

43 default parameters for measure crosstalk use nSigClip=2.0. 

44 This causes the measured sigma to be about 15% smaller than 

45 real. This formula corrects the issue, for the symmetric case 

46 (upper clip threshold equal to lower clip threshold). 

47 

48 Parameters 

49 ---------- 

50 nSigClip : `float` 

51 Number of sigma the measurement was clipped by. 

52 

53 Returns 

54 ------- 

55 scaleFactor : `float` 

56 Scale factor to increase the measured sigma by. 

57 """ 

58 varFactor = 1.0 - (2 * nSigClip * norm.pdf(nSigClip)) / (norm.cdf(nSigClip) - norm.cdf(-nSigClip)) 

59 return 1.0 / np.sqrt(varFactor) 

60 

61 

62def calculateWeightedReducedChi2(measured, model, weightsMeasured, nData, nParsModel): 

63 """Calculate weighted reduced chi2. 

64 

65 Parameters 

66 ---------- 

67 

68 measured : `list` 

69 List with measured data. 

70 

71 model : `list` 

72 List with modeled data. 

73 

74 weightsMeasured : `list` 

75 List with weights for the measured data. 

76 

77 nData : `int` 

78 Number of data points. 

79 

80 nParsModel : `int` 

81 Number of parameters in the model. 

82 

83 Returns 

84 ------- 

85 

86 redWeightedChi2 : `float` 

87 Reduced weighted chi2. 

88 """ 

89 wRes = (measured - model)*weightsMeasured 

90 return ((wRes*wRes).sum())/(nData-nParsModel) 

91 

92 

93def makeMockFlats(expTime, gain=1.0, readNoiseElectrons=5, fluxElectrons=1000, 

94 randomSeedFlat1=1984, randomSeedFlat2=666, powerLawBfParams=[], 

95 expId1=0, expId2=1): 

96 """Create a pair or mock flats with isrMock. 

97 

98 Parameters 

99 ---------- 

100 expTime : `float` 

101 Exposure time of the flats. 

102 

103 gain : `float`, optional 

104 Gain, in e/ADU. 

105 

106 readNoiseElectrons : `float`, optional 

107 Read noise rms, in electrons. 

108 

109 fluxElectrons : `float`, optional 

110 Flux of flats, in electrons per second. 

111 

112 randomSeedFlat1 : `int`, optional 

113 Random seed for the normal distrubutions for the mean signal 

114 and noise (flat1). 

115 

116 randomSeedFlat2 : `int`, optional 

117 Random seed for the normal distrubutions for the mean signal 

118 and noise (flat2). 

119 

120 powerLawBfParams : `list`, optional 

121 Parameters for `galsim.cdmodel.PowerLawCD` to simulate the 

122 brightter-fatter effect. 

123 

124 expId1 : `int`, optional 

125 Exposure ID for first flat. 

126 

127 expId2 : `int`, optional 

128 Exposure ID for second flat. 

129 

130 Returns 

131 ------- 

132 

133 flatExp1 : `lsst.afw.image.exposure.ExposureF` 

134 First exposure of flat field pair. 

135 

136 flatExp2 : `lsst.afw.image.exposure.ExposureF` 

137 Second exposure of flat field pair. 

138 

139 Notes 

140 ----- 

141 The parameters of `galsim.cdmodel.PowerLawCD` are `n, r0, t0, rx, 

142 tx, r, t, alpha`. For more information about their meaning, see 

143 the Galsim documentation 

144 https://galsim-developers.github.io/GalSim/_build/html/_modules/galsim/cdmodel.html # noqa: W505 

145 and Gruen+15 (1501.02802). 

146 

147 Example: galsim.cdmodel.PowerLawCD(8, 1.1e-7, 1.1e-7, 1.0e-8, 

148 1.0e-8, 1.0e-9, 1.0e-9, 2.0) 

149 """ 

150 flatFlux = fluxElectrons # e/s 

151 flatMean = flatFlux*expTime # e 

152 readNoise = readNoiseElectrons # e 

153 

154 mockImageConfig = isrMock.IsrMock.ConfigClass() 

155 

156 mockImageConfig.flatDrop = 0.99999 

157 mockImageConfig.isTrimmed = True 

158 

159 flatExp1 = isrMock.FlatMock(config=mockImageConfig).run() 

160 flatExp2 = flatExp1.clone() 

161 (shapeY, shapeX) = flatExp1.getDimensions() 

162 flatWidth = np.sqrt(flatMean) 

163 

164 rng1 = np.random.RandomState(randomSeedFlat1) 

165 flatData1 = rng1.normal(flatMean, flatWidth, (shapeX, shapeY)) + rng1.normal(0.0, readNoise, 

166 (shapeX, shapeY)) 

167 rng2 = np.random.RandomState(randomSeedFlat2) 

168 flatData2 = rng2.normal(flatMean, flatWidth, (shapeX, shapeY)) + rng2.normal(0.0, readNoise, 

169 (shapeX, shapeY)) 

170 # Simulate BF with power law model in galsim 

171 if len(powerLawBfParams): 

172 if not len(powerLawBfParams) == 8: 

173 raise RuntimeError("Wrong number of parameters for `galsim.cdmodel.PowerLawCD`. " 

174 f"Expected 8; passed {len(powerLawBfParams)}.") 

175 cd = galsim.cdmodel.PowerLawCD(*powerLawBfParams) 

176 tempFlatData1 = galsim.Image(flatData1) 

177 temp2FlatData1 = cd.applyForward(tempFlatData1) 

178 

179 tempFlatData2 = galsim.Image(flatData2) 

180 temp2FlatData2 = cd.applyForward(tempFlatData2) 

181 

182 flatExp1.image.array[:] = temp2FlatData1.array/gain # ADU 

183 flatExp2.image.array[:] = temp2FlatData2.array/gain # ADU 

184 else: 

185 flatExp1.image.array[:] = flatData1/gain # ADU 

186 flatExp2.image.array[:] = flatData2/gain # ADU 

187 

188 visitInfoExp1 = lsst.afw.image.VisitInfo(exposureId=expId1, exposureTime=expTime) 

189 visitInfoExp2 = lsst.afw.image.VisitInfo(exposureId=expId2, exposureTime=expTime) 

190 

191 flatExp1.info.id = expId1 

192 flatExp1.getInfo().setVisitInfo(visitInfoExp1) 

193 flatExp2.info.id = expId2 

194 flatExp2.getInfo().setVisitInfo(visitInfoExp2) 

195 

196 return flatExp1, flatExp2 

197 

198 

199def countMaskedPixels(maskedIm, maskPlane): 

200 """Count the number of pixels in a given mask plane. 

201 

202 Parameters 

203 ---------- 

204 maskedIm : `~lsst.afw.image.MaskedImage` 

205 Masked image to examine. 

206 maskPlane : `str` 

207 Name of the mask plane to examine. 

208 

209 Returns 

210 ------- 

211 nPix : `int` 

212 Number of pixels in the requested mask plane. 

213 """ 

214 maskBit = maskedIm.mask.getPlaneBitMask(maskPlane) 

215 nPix = np.where(np.bitwise_and(maskedIm.mask.array, maskBit))[0].flatten().size 

216 return nPix 

217 

218 

219def irlsFit(initialParams, dataX, dataY, function, weightsY=None, weightType='Cauchy', scaleResidual=True): 

220 """Iteratively reweighted least squares fit. 

221 

222 This uses the `lsst.cp.pipe.utils.fitLeastSq`, but applies weights 

223 based on the Cauchy distribution by default. Other weight options 

224 are implemented. See e.g. Holland and Welsch, 1977, 

225 doi:10.1080/03610927708827533 

226 

227 Parameters 

228 ---------- 

229 initialParams : `list` [`float`] 

230 Starting parameters. 

231 dataX : `numpy.array`, (N,) 

232 Abscissa data. 

233 dataY : `numpy.array`, (N,) 

234 Ordinate data. 

235 function : callable 

236 Function to fit. 

237 weightsY : `numpy.array`, (N,) 

238 Weights to apply to the data. 

239 weightType : `str`, optional 

240 Type of weighting to use. One of Cauchy, Anderson, bisquare, 

241 box, Welsch, Huber, logistic, or Fair. 

242 scaleResidual : `bool`, optional 

243 If true, the residual is scaled by the sqrt of the Y values. 

244 

245 Returns 

246 ------- 

247 polyFit : `list` [`float`] 

248 Final best fit parameters. 

249 polyFitErr : `list` [`float`] 

250 Final errors on fit parameters. 

251 chiSq : `float` 

252 Reduced chi squared. 

253 weightsY : `list` [`float`] 

254 Final weights used for each point. 

255 

256 Raises 

257 ------ 

258 RuntimeError : 

259 Raised if an unknown weightType string is passed. 

260 """ 

261 if not weightsY: 

262 weightsY = np.ones_like(dataX) 

263 

264 polyFit, polyFitErr, chiSq = fitLeastSq(initialParams, dataX, dataY, function, weightsY=weightsY) 

265 for iteration in range(10): 

266 resid = np.abs(dataY - function(polyFit, dataX)) 

267 if scaleResidual: 

268 resid = resid / np.sqrt(dataY) 

269 if weightType == 'Cauchy': 

270 # Use Cauchy weighting. This is a soft weight. 

271 # At [2, 3, 5, 10] sigma, weights are [.59, .39, .19, .05]. 

272 Z = resid / 2.385 

273 weightsY = 1.0 / (1.0 + np.square(Z)) 

274 elif weightType == 'Anderson': 

275 # Anderson+1972 weighting. This is a hard weight. 

276 # At [2, 3, 5, 10] sigma, weights are [.67, .35, 0.0, 0.0]. 

277 Z = resid / (1.339 * np.pi) 

278 weightsY = np.where(Z < 1.0, np.sinc(Z), 0.0) 

279 elif weightType == 'bisquare': 

280 # Beaton and Tukey (1974) biweight. This is a hard weight. 

281 # At [2, 3, 5, 10] sigma, weights are [.81, .59, 0.0, 0.0]. 

282 Z = resid / 4.685 

283 weightsY = np.where(Z < 1.0, 1.0 - np.square(Z), 0.0) 

284 elif weightType == 'box': 

285 # Hinich and Talwar (1975). This is a hard weight. 

286 # At [2, 3, 5, 10] sigma, weights are [1.0, 0.0, 0.0, 0.0]. 

287 weightsY = np.where(resid < 2.795, 1.0, 0.0) 

288 elif weightType == 'Welsch': 

289 # Dennis and Welsch (1976). This is a hard weight. 

290 # At [2, 3, 5, 10] sigma, weights are [.64, .36, .06, 1e-5]. 

291 Z = resid / 2.985 

292 weightsY = np.exp(-1.0 * np.square(Z)) 

293 elif weightType == 'Huber': 

294 # Huber (1964) weighting. This is a soft weight. 

295 # At [2, 3, 5, 10] sigma, weights are [.67, .45, .27, .13]. 

296 Z = resid / 1.345 

297 weightsY = np.where(Z < 1.0, 1.0, 1 / Z) 

298 elif weightType == 'logistic': 

299 # Logistic weighting. This is a soft weight. 

300 # At [2, 3, 5, 10] sigma, weights are [.56, .40, .24, .12]. 

301 Z = resid / 1.205 

302 weightsY = np.tanh(Z) / Z 

303 elif weightType == 'Fair': 

304 # Fair (1974) weighting. This is a soft weight. 

305 # At [2, 3, 5, 10] sigma, weights are [.41, .32, .22, .12]. 

306 Z = resid / 1.4 

307 weightsY = (1.0 / (1.0 + (Z))) 

308 else: 

309 raise RuntimeError(f"Unknown weighting type: {weightType}") 

310 polyFit, polyFitErr, chiSq = fitLeastSq(initialParams, dataX, dataY, function, weightsY=weightsY) 

311 

312 return polyFit, polyFitErr, chiSq, weightsY 

313 

314 

315def fitLeastSq(initialParams, dataX, dataY, function, weightsY=None): 

316 """Do a fit and estimate the parameter errors using using 

317 scipy.optimize.leastq. 

318 

319 optimize.leastsq returns the fractional covariance matrix. To 

320 estimate the standard deviation of the fit parameters, multiply 

321 the entries of this matrix by the unweighted reduced chi squared 

322 and take the square root of the diagonal elements. 

323 

324 Parameters 

325 ---------- 

326 initialParams : `list` [`float`] 

327 initial values for fit parameters. For ptcFitType=POLYNOMIAL, 

328 its length determines the degree of the polynomial. 

329 

330 dataX : `numpy.array`, (N,) 

331 Data in the abscissa axis. 

332 

333 dataY : `numpy.array`, (N,) 

334 Data in the ordinate axis. 

335 

336 function : callable object (function) 

337 Function to fit the data with. 

338 

339 weightsY : `numpy.array`, (N,) 

340 Weights of the data in the ordinate axis. 

341 

342 Return 

343 ------ 

344 pFitSingleLeastSquares : `list` [`float`] 

345 List with fitted parameters. 

346 

347 pErrSingleLeastSquares : `list` [`float`] 

348 List with errors for fitted parameters. 

349 

350 reducedChiSqSingleLeastSquares : `float` 

351 Reduced chi squared, unweighted if weightsY is not provided. 

352 """ 

353 if weightsY is None: 

354 weightsY = np.ones(len(dataX)) 

355 

356 def errFunc(p, x, y, weightsY=None): 

357 if weightsY is None: 

358 weightsY = np.ones(len(x)) 

359 return (function(p, x) - y)*weightsY 

360 

361 pFit, pCov, infoDict, errMessage, success = leastsq(errFunc, initialParams, 

362 args=(dataX, dataY, weightsY), full_output=1, 

363 epsfcn=0.0001) 

364 

365 if (len(dataY) > len(initialParams)) and pCov is not None: 

366 reducedChiSq = calculateWeightedReducedChi2(dataY, function(pFit, dataX), weightsY, len(dataY), 

367 len(initialParams)) 

368 pCov *= reducedChiSq 

369 else: 

370 pCov = np.zeros((len(initialParams), len(initialParams))) 

371 pCov[:, :] = np.nan 

372 reducedChiSq = np.nan 

373 

374 errorVec = [] 

375 for i in range(len(pFit)): 

376 errorVec.append(np.fabs(pCov[i][i])**0.5) 

377 

378 pFitSingleLeastSquares = pFit 

379 pErrSingleLeastSquares = np.array(errorVec) 

380 

381 return pFitSingleLeastSquares, pErrSingleLeastSquares, reducedChiSq 

382 

383 

384def fitBootstrap(initialParams, dataX, dataY, function, weightsY=None, confidenceSigma=1.): 

385 """Do a fit using least squares and bootstrap to estimate parameter errors. 

386 

387 The bootstrap error bars are calculated by fitting 100 random data sets. 

388 

389 Parameters 

390 ---------- 

391 initialParams : `list` [`float`] 

392 initial values for fit parameters. For ptcFitType=POLYNOMIAL, 

393 its length determines the degree of the polynomial. 

394 

395 dataX : `numpy.array`, (N,) 

396 Data in the abscissa axis. 

397 

398 dataY : `numpy.array`, (N,) 

399 Data in the ordinate axis. 

400 

401 function : callable object (function) 

402 Function to fit the data with. 

403 

404 weightsY : `numpy.array`, (N,), optional. 

405 Weights of the data in the ordinate axis. 

406 

407 confidenceSigma : `float`, optional. 

408 Number of sigmas that determine confidence interval for the 

409 bootstrap errors. 

410 

411 Return 

412 ------ 

413 pFitBootstrap : `list` [`float`] 

414 List with fitted parameters. 

415 

416 pErrBootstrap : `list` [`float`] 

417 List with errors for fitted parameters. 

418 

419 reducedChiSqBootstrap : `float` 

420 Reduced chi squared, unweighted if weightsY is not provided. 

421 """ 

422 if weightsY is None: 

423 weightsY = np.ones(len(dataX)) 

424 

425 def errFunc(p, x, y, weightsY): 

426 if weightsY is None: 

427 weightsY = np.ones(len(x)) 

428 return (function(p, x) - y)*weightsY 

429 

430 # Fit first time 

431 pFit, _ = leastsq(errFunc, initialParams, args=(dataX, dataY, weightsY), full_output=0) 

432 

433 # Get the stdev of the residuals 

434 residuals = errFunc(pFit, dataX, dataY, weightsY) 

435 # 100 random data sets are generated and fitted 

436 pars = [] 

437 for i in range(100): 

438 randomDelta = np.random.normal(0., np.fabs(residuals), len(dataY)) 

439 randomDataY = dataY + randomDelta 

440 randomFit, _ = leastsq(errFunc, initialParams, 

441 args=(dataX, randomDataY, weightsY), full_output=0) 

442 pars.append(randomFit) 

443 pars = np.array(pars) 

444 meanPfit = np.mean(pars, 0) 

445 

446 # confidence interval for parameter estimates 

447 errPfit = confidenceSigma*np.std(pars, 0) 

448 pFitBootstrap = meanPfit 

449 pErrBootstrap = errPfit 

450 

451 reducedChiSq = calculateWeightedReducedChi2(dataY, function(pFitBootstrap, dataX), weightsY, len(dataY), 

452 len(initialParams)) 

453 return pFitBootstrap, pErrBootstrap, reducedChiSq 

454 

455 

456def funcPolynomial(pars, x): 

457 """Polynomial function definition 

458 Parameters 

459 ---------- 

460 params : `list` 

461 Polynomial coefficients. Its length determines the polynomial order. 

462 

463 x : `numpy.array`, (N,) 

464 Abscisa array. 

465 

466 Returns 

467 ------- 

468 y : `numpy.array`, (N,) 

469 Ordinate array after evaluating polynomial of order 

470 len(pars)-1 at `x`. 

471 """ 

472 return poly.polyval(x, [*pars]) 

473 

474 

475def funcAstier(pars, x): 

476 """Single brighter-fatter parameter model for PTC; Equation 16 of 

477 Astier+19. 

478 

479 Parameters 

480 ---------- 

481 params : `list` 

482 Parameters of the model: a00 (brightter-fatter), gain (e/ADU), 

483 and noise (e^2). 

484 

485 x : `numpy.array`, (N,) 

486 Signal mu (ADU). 

487 

488 Returns 

489 ------- 

490 y : `numpy.array`, (N,) 

491 C_00 (variance) in ADU^2. 

492 """ 

493 a00, gain, noise = pars 

494 return 0.5/(a00*gain*gain)*(np.exp(2*a00*x*gain)-1) + noise/(gain*gain) # C_00 

495 

496 

497def arrangeFlatsByExpTime(exposureList, exposureIdList): 

498 """Arrange exposures by exposure time. 

499 

500 Parameters 

501 ---------- 

502 exposureList : `list` [`lsst.pipe.base.connections.DeferredDatasetRef`] 

503 Input list of exposure references. 

504 

505 exposureIdList : `list` [`int`] 

506 List of exposure ids as obtained by dataId[`exposure`]. 

507 

508 Returns 

509 ------ 

510 flatsAtExpTime : `dict` [`float`, 

511 `list`[(`lsst.pipe.base.connections.DeferredDatasetRef`, 

512 `int`)]] 

513 Dictionary that groups references to flat-field exposures 

514 (and their IDs) that have the same exposure time (seconds). 

515 """ 

516 flatsAtExpTime = {} 

517 assert len(exposureList) == len(exposureIdList), "Different lengths for exp. list and exp. ID lists" 

518 for expRef, expId in zip(exposureList, exposureIdList): 

519 expTime = expRef.get(component='visitInfo').exposureTime 

520 listAtExpTime = flatsAtExpTime.setdefault(expTime, []) 

521 listAtExpTime.append((expRef, expId)) 

522 

523 return flatsAtExpTime 

524 

525 

526def arrangeFlatsByExpId(exposureList, exposureIdList): 

527 """Arrange exposures by exposure ID. 

528 

529 There is no guarantee that this will properly group exposures, but 

530 allows a sequence of flats that have different illumination 

531 (despite having the same exposure time) to be processed. 

532 

533 Parameters 

534 ---------- 

535 exposureList : `list`[`lsst.pipe.base.connections.DeferredDatasetRef`] 

536 Input list of exposure references. 

537 

538 exposureIdList : `list`[`int`] 

539 List of exposure ids as obtained by dataId[`exposure`]. 

540 

541 Returns 

542 ------ 

543 flatsAtExpId : `dict` [`float`, 

544 `list`[(`lsst.pipe.base.connections.DeferredDatasetRef`, 

545 `int`)]] 

546 Dictionary that groups references to flat-field exposures (and their 

547 IDs) sequentially by their exposure id. 

548 

549 Notes 

550 ----- 

551 

552 This algorithm sorts the input exposure references by their exposure 

553 id, and then assigns each pair of exposure references (exp_j, exp_{j+1}) 

554 to pair k, such that 2*k = j, where j is the python index of one of the 

555 exposure references (starting from zero). By checking for the IndexError 

556 while appending, we can ensure that there will only ever be fully 

557 populated pairs. 

558 """ 

559 flatsAtExpId = {} 

560 assert len(exposureList) == len(exposureIdList), "Different lengths for exp. list and exp. ID lists" 

561 # Sort exposures by expIds, which are in the second list `exposureIdList`. 

562 sortedExposures = sorted(zip(exposureList, exposureIdList), key=lambda pair: pair[1]) 

563 

564 for jPair, expTuple in enumerate(sortedExposures): 

565 if (jPair + 1) % 2: 

566 kPair = jPair // 2 

567 listAtExpId = flatsAtExpId.setdefault(kPair, []) 

568 try: 

569 listAtExpId.append(expTuple) 

570 listAtExpId.append(sortedExposures[jPair + 1]) 

571 except IndexError: 

572 pass 

573 

574 return flatsAtExpId 

575 

576 

577class CovFastFourierTransform: 

578 """A class to compute (via FFT) the nearby pixels correlation function. 

579 

580 Implements appendix of Astier+19. 

581 

582 Parameters 

583 ---------- 

584 diff : `numpy.array` 

585 Image where to calculate the covariances (e.g., the difference 

586 image of two flats). 

587 

588 w : `numpy.array` 

589 Weight image (mask): it should consist of 1's (good pixel) and 

590 0's (bad pixels). 

591 

592 fftShape : `tuple` 

593 2d-tuple with the shape of the FFT 

594 

595 maxRangeCov : `int` 

596 Maximum range for the covariances. 

597 """ 

598 

599 def __init__(self, diff, w, fftShape, maxRangeCov): 

600 # check that the zero padding implied by "fft_shape" 

601 # is large enough for the required correlation range 

602 assert(fftShape[0] > diff.shape[0]+maxRangeCov+1) 

603 assert(fftShape[1] > diff.shape[1]+maxRangeCov+1) 

604 # for some reason related to numpy.fft.rfftn, 

605 # the second dimension should be even, so 

606 if fftShape[1]%2 == 1: 

607 fftShape = (fftShape[0], fftShape[1]+1) 

608 tIm = np.fft.rfft2(diff*w, fftShape) 

609 tMask = np.fft.rfft2(w, fftShape) 

610 # sum of "squares" 

611 self.pCov = np.fft.irfft2(tIm*tIm.conjugate()) 

612 # sum of values 

613 self.pMean = np.fft.irfft2(tIm*tMask.conjugate()) 

614 # number of w!=0 pixels. 

615 self.pCount = np.fft.irfft2(tMask*tMask.conjugate()) 

616 

617 def cov(self, dx, dy): 

618 """Covariance for dx,dy averaged with dx,-dy if both non zero. 

619 

620 Implements appendix of Astier+19. 

621 

622 Parameters 

623 ---------- 

624 dx : `int` 

625 Lag in x 

626 

627 dy : `int 

628 Lag in y 

629 

630 Returns 

631 ------- 

632 0.5*(cov1+cov2) : `float` 

633 Covariance at (dx, dy) lag 

634 

635 npix1+npix2 : `int` 

636 Number of pixels used in covariance calculation. 

637 """ 

638 # compensate rounding errors 

639 nPix1 = int(round(self.pCount[dy, dx])) 

640 cov1 = self.pCov[dy, dx]/nPix1-self.pMean[dy, dx]*self.pMean[-dy, -dx]/(nPix1*nPix1) 

641 if (dx == 0 or dy == 0): 

642 return cov1, nPix1 

643 nPix2 = int(round(self.pCount[-dy, dx])) 

644 cov2 = self.pCov[-dy, dx]/nPix2-self.pMean[-dy, dx]*self.pMean[dy, -dx]/(nPix2*nPix2) 

645 return 0.5*(cov1+cov2), nPix1+nPix2 

646 

647 def reportCovFastFourierTransform(self, maxRange): 

648 """Produce a list of tuples with covariances. 

649 

650 Implements appendix of Astier+19. 

651 

652 Parameters 

653 ---------- 

654 maxRange : `int` 

655 Maximum range of covariances. 

656 

657 Returns 

658 ------- 

659 tupleVec : `list` 

660 List with covariance tuples. 

661 """ 

662 tupleVec = [] 

663 # (dy,dx) = (0,0) has to be first 

664 for dy in range(maxRange+1): 

665 for dx in range(maxRange+1): 

666 cov, npix = self.cov(dx, dy) 

667 if (dx == 0 and dy == 0): 

668 var = cov 

669 tupleVec.append((dx, dy, var, cov, npix)) 

670 return tupleVec 

671 

672 

673def getFitDataFromCovariances(i, j, mu, fullCov, fullCovModel, fullCovSqrtWeights, gain=1.0, 

674 divideByMu=False, returnMasked=False): 

675 """Get measured signal and covariance, cov model, weigths, and mask at 

676 covariance lag (i, j). 

677 

678 Parameters 

679 ---------- 

680 i : `int` 

681 Lag for covariance matrix. 

682 

683 j : `int` 

684 Lag for covariance matrix. 

685 

686 mu : `list` 

687 Mean signal values. 

688 

689 fullCov : `list` of `numpy.array` 

690 Measured covariance matrices at each mean signal level in mu. 

691 

692 fullCovSqrtWeights : `list` of `numpy.array` 

693 List of square root of measured covariances at each mean 

694 signal level in mu. 

695 

696 fullCovModel : `list` of `numpy.array` 

697 List of modeled covariances at each mean signal level in mu. 

698 

699 gain : `float`, optional 

700 Gain, in e-/ADU. If other than 1.0 (default), the returned 

701 quantities will be in electrons or powers of electrons. 

702 

703 divideByMu : `bool`, optional 

704 Divide returned covariance, model, and weights by the mean 

705 signal mu? 

706 

707 returnMasked : `bool`, optional 

708 Use mask (based on weights) in returned arrays (mu, 

709 covariance, and model)? 

710 

711 Returns 

712 ------- 

713 mu : `numpy.array` 

714 list of signal values at (i, j). 

715 

716 covariance : `numpy.array` 

717 Covariance at (i, j) at each mean signal mu value (fullCov[:, i, j]). 

718 

719 covarianceModel : `numpy.array` 

720 Covariance model at (i, j). 

721 

722 weights : `numpy.array` 

723 Weights at (i, j). 

724 

725 maskFromWeights : `numpy.array`, optional 

726 Boolean mask of the covariance at (i,j), where the weights 

727 differ from 0. 

728 """ 

729 mu = np.array(mu) 

730 fullCov = np.array(fullCov) 

731 fullCovModel = np.array(fullCovModel) 

732 fullCovSqrtWeights = np.array(fullCovSqrtWeights) 

733 covariance = fullCov[:, i, j]*(gain**2) 

734 covarianceModel = fullCovModel[:, i, j]*(gain**2) 

735 weights = fullCovSqrtWeights[:, i, j]/(gain**2) 

736 

737 maskFromWeights = weights != 0 

738 if returnMasked: 

739 weights = weights[maskFromWeights] 

740 covarianceModel = covarianceModel[maskFromWeights] 

741 mu = mu[maskFromWeights] 

742 covariance = covariance[maskFromWeights] 

743 

744 if divideByMu: 

745 covariance /= mu 

746 covarianceModel /= mu 

747 weights *= mu 

748 return mu, covariance, covarianceModel, weights, maskFromWeights 

749 

750 

751def symmetrize(inputArray): 

752 """ Copy array over 4 quadrants prior to convolution. 

753 

754 Parameters 

755 ---------- 

756 inputarray : `numpy.array` 

757 Input array to symmetrize. 

758 

759 Returns 

760 ------- 

761 aSym : `numpy.array` 

762 Symmetrized array. 

763 """ 

764 targetShape = list(inputArray.shape) 

765 r1, r2 = inputArray.shape[-1], inputArray.shape[-2] 

766 targetShape[-1] = 2*r1-1 

767 targetShape[-2] = 2*r2-1 

768 aSym = np.ndarray(tuple(targetShape)) 

769 aSym[..., r2-1:, r1-1:] = inputArray 

770 aSym[..., r2-1:, r1-1::-1] = inputArray 

771 aSym[..., r2-1::-1, r1-1::-1] = inputArray 

772 aSym[..., r2-1::-1, r1-1:] = inputArray 

773 

774 return aSym 

775 

776 

777def ddict2dict(d): 

778 """Convert nested default dictionaries to regular dictionaries. 

779 

780 This is needed to prevent yaml persistence issues. 

781 

782 Parameters 

783 ---------- 

784 d : `defaultdict` 

785 A possibly nested set of `defaultdict`. 

786 

787 Returns 

788 ------- 

789 dict : `dict` 

790 A possibly nested set of `dict`. 

791 """ 

792 for k, v in d.items(): 

793 if isinstance(v, dict): 

794 d[k] = ddict2dict(v) 

795 return dict(d)