Coverage for python/lsst/cp/pipe/ptc/cpExtractPtcTask.py: 12%

238 statements  

« prev     ^ index     » next       coverage.py v6.5.0, created at 2023-03-03 02:35 -0800

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22import numpy as np 

23 

24import lsst.afw.math as afwMath 

25import lsst.pex.config as pexConfig 

26import lsst.pipe.base as pipeBase 

27from lsst.cp.pipe.utils import (arrangeFlatsByExpTime, arrangeFlatsByExpId, 

28 arrangeFlatsByExpFlux, sigmaClipCorrection, 

29 CovFastFourierTransform) 

30 

31import lsst.pipe.base.connectionTypes as cT 

32 

33from lsst.ip.isr import PhotonTransferCurveDataset 

34from lsst.ip.isr import IsrTask 

35 

36__all__ = ['PhotonTransferCurveExtractConfig', 'PhotonTransferCurveExtractTask'] 

37 

38 

39class PhotonTransferCurveExtractConnections(pipeBase.PipelineTaskConnections, 

40 dimensions=("instrument", "detector")): 

41 

42 inputExp = cT.Input( 

43 name="ptcInputExposurePairs", 

44 doc="Input post-ISR processed exposure pairs (flats) to" 

45 "measure covariances from.", 

46 storageClass="Exposure", 

47 dimensions=("instrument", "exposure", "detector"), 

48 multiple=True, 

49 deferLoad=True, 

50 ) 

51 taskMetadata = cT.Input( 

52 name="isr_metadata", 

53 doc="Input task metadata to extract statistics from.", 

54 storageClass="TaskMetadata", 

55 dimensions=("instrument", "exposure", "detector"), 

56 multiple=True, 

57 ) 

58 outputCovariances = cT.Output( 

59 name="ptcCovariances", 

60 doc="Extracted flat (co)variances.", 

61 storageClass="PhotonTransferCurveDataset", 

62 dimensions=("instrument", "exposure", "detector"), 

63 isCalibration=True, 

64 multiple=True, 

65 ) 

66 

67 

68class PhotonTransferCurveExtractConfig(pipeBase.PipelineTaskConfig, 

69 pipelineConnections=PhotonTransferCurveExtractConnections): 

70 """Configuration for the measurement of covariances from flats. 

71 """ 

72 matchExposuresType = pexConfig.ChoiceField( 

73 dtype=str, 

74 doc="Match input exposures by time, flux, or expId", 

75 default='TIME', 

76 allowed={ 

77 "TIME": "Match exposures by exposure time.", 

78 "FLUX": "Match exposures by target flux. Use header keyword" 

79 " in matchExposuresByFluxKeyword to find the flux.", 

80 "EXPID": "Match exposures by exposure ID." 

81 } 

82 ) 

83 matchExposuresByFluxKeyword = pexConfig.Field( 

84 dtype=str, 

85 doc="Header keyword for flux if matchExposuresType is FLUX.", 

86 default='CCOBFLUX', 

87 ) 

88 maximumRangeCovariancesAstier = pexConfig.Field( 

89 dtype=int, 

90 doc="Maximum range of covariances as in Astier+19", 

91 default=8, 

92 ) 

93 binSize = pexConfig.Field( 

94 dtype=int, 

95 doc="Bin the image by this factor in both dimensions.", 

96 default=1, 

97 ) 

98 minMeanSignal = pexConfig.DictField( 

99 keytype=str, 

100 itemtype=float, 

101 doc="Minimum values (inclusive) of mean signal (in ADU) per amp to use." 

102 " The same cut is applied to all amps if this parameter [`dict`] is passed as " 

103 " {'ALL_AMPS': value}", 

104 default={'ALL_AMPS': 0.0}, 

105 ) 

106 maxMeanSignal = pexConfig.DictField( 

107 keytype=str, 

108 itemtype=float, 

109 doc="Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp." 

110 " The same cut is applied to all amps if this dictionary is of the form" 

111 " {'ALL_AMPS': value}", 

112 default={'ALL_AMPS': 1e6}, 

113 ) 

114 maskNameList = pexConfig.ListField( 

115 dtype=str, 

116 doc="Mask list to exclude from statistics calculations.", 

117 default=['SUSPECT', 'BAD', 'NO_DATA', 'SAT'], 

118 ) 

119 nSigmaClipPtc = pexConfig.Field( 

120 dtype=float, 

121 doc="Sigma cut for afwMath.StatisticsControl()", 

122 default=5.5, 

123 ) 

124 nIterSigmaClipPtc = pexConfig.Field( 

125 dtype=int, 

126 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()", 

127 default=3, 

128 ) 

129 minNumberGoodPixelsForCovariance = pexConfig.Field( 

130 dtype=int, 

131 doc="Minimum number of acceptable good pixels per amp to calculate the covariances (via FFT or" 

132 " direclty).", 

133 default=10000, 

134 ) 

135 thresholdDiffAfwVarVsCov00 = pexConfig.Field( 

136 dtype=float, 

137 doc="If the absolute fractional differece between afwMath.VARIANCECLIP and Cov00 " 

138 "for a region of a difference image is greater than this threshold (percentage), " 

139 "a warning will be issued.", 

140 default=1., 

141 ) 

142 detectorMeasurementRegion = pexConfig.ChoiceField( 

143 dtype=str, 

144 doc="Region of each exposure where to perform the calculations (amplifier or full image).", 

145 default='AMP', 

146 allowed={ 

147 "AMP": "Amplifier of the detector.", 

148 "FULL": "Full image." 

149 } 

150 ) 

151 numEdgeSuspect = pexConfig.Field( 

152 dtype=int, 

153 doc="Number of edge pixels to be flagged as untrustworthy.", 

154 default=0, 

155 ) 

156 edgeMaskLevel = pexConfig.ChoiceField( 

157 dtype=str, 

158 doc="Mask edge pixels in which coordinate frame: DETECTOR or AMP?", 

159 default="DETECTOR", 

160 allowed={ 

161 'DETECTOR': 'Mask only the edges of the full detector.', 

162 'AMP': 'Mask edges of each amplifier.', 

163 }, 

164 ) 

165 doGain = pexConfig.Field( 

166 dtype=bool, 

167 doc="Calculate a gain per input flat pair.", 

168 default=True, 

169 ) 

170 gainCorrectionType = pexConfig.ChoiceField( 

171 dtype=str, 

172 doc="Correction type for the gain.", 

173 default='FULL', 

174 allowed={ 

175 'NONE': 'No correction.', 

176 'SIMPLE': 'First order correction.', 

177 'FULL': 'Second order correction.' 

178 } 

179 ) 

180 

181 

182class PhotonTransferCurveExtractTask(pipeBase.PipelineTask): 

183 """Task to measure covariances from flat fields. 

184 

185 This task receives as input a list of flat-field images 

186 (flats), and sorts these flats in pairs taken at the 

187 same time (the task will raise if there is one one flat 

188 at a given exposure time, and it will discard extra flats if 

189 there are more than two per exposure time). This task measures 

190 the mean, variance, and covariances from a region (e.g., 

191 an amplifier) of the difference image of the two flats with 

192 the same exposure time (alternatively, all input images could have 

193 the same exposure time but their flux changed). 

194 

195 The variance is calculated via afwMath, and the covariance 

196 via the methods in Astier+19 (appendix A). In theory, 

197 var = covariance[0,0]. This should be validated, and in the 

198 future, we may decide to just keep one (covariance). 

199 At this moment, if the two values differ by more than the value 

200 of `thresholdDiffAfwVarVsCov00` (default: 1%), a warning will 

201 be issued. 

202 

203 The measured covariances at a given exposure time (along with 

204 other quantities such as the mean) are stored in a PTC dataset 

205 object (`~lsst.ip.isr.PhotonTransferCurveDataset`), which gets 

206 partially filled at this stage (the remainder of the attributes 

207 of the dataset will be filled after running the second task of 

208 the PTC-measurement pipeline, `~PhotonTransferCurveSolveTask`). 

209 

210 The number of partially-filled 

211 `~lsst.ip.isr.PhotonTransferCurveDataset` objects will be less 

212 than the number of input exposures because the task combines 

213 input flats in pairs. However, it is required at this moment 

214 that the number of input dimensions matches 

215 bijectively the number of output dimensions. Therefore, a number 

216 of "dummy" PTC datasets are inserted in the output list. This 

217 output list will then be used as input of the next task in the 

218 PTC-measurement pipeline, `PhotonTransferCurveSolveTask`, 

219 which will assemble the multiple `PhotonTransferCurveDataset` 

220 objects into a single one in order to fit the measured covariances 

221 as a function of flux to one of three models 

222 (see `PhotonTransferCurveSolveTask` for details). 

223 

224 Reference: Astier+19: "The Shape of the Photon Transfer Curve of CCD 

225 sensors", arXiv:1905.08677. 

226 """ 

227 

228 ConfigClass = PhotonTransferCurveExtractConfig 

229 _DefaultName = 'cpPtcExtract' 

230 

231 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

232 """Ensure that the input and output dimensions are passed along. 

233 

234 Parameters 

235 ---------- 

236 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext` 

237 Butler to operate on. 

238 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection` 

239 Input data refs to load. 

240 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection` 

241 Output data refs to persist. 

242 """ 

243 inputs = butlerQC.get(inputRefs) 

244 # Ids of input list of exposure references 

245 # (deferLoad=True in the input connections) 

246 inputs['inputDims'] = [expRef.datasetRef.dataId['exposure'] for expRef in inputRefs.inputExp] 

247 

248 # Dictionary, keyed by expTime (or expFlux or expId), with tuples 

249 # containing flat exposures and their IDs. 

250 matchType = self.config.matchExposuresType 

251 if matchType == 'TIME': 

252 inputs['inputExp'] = arrangeFlatsByExpTime(inputs['inputExp'], inputs['inputDims']) 

253 elif matchType == 'FLUX': 

254 inputs['inputExp'] = arrangeFlatsByExpFlux(inputs['inputExp'], inputs['inputDims'], 

255 self.config.matchExposuresByFluxKeyword) 

256 else: 

257 inputs['inputExp'] = arrangeFlatsByExpId(inputs['inputExp'], inputs['inputDims']) 

258 

259 outputs = self.run(**inputs) 

260 butlerQC.put(outputs, outputRefs) 

261 

262 def run(self, inputExp, inputDims, taskMetadata): 

263 """Measure covariances from difference of flat pairs 

264 

265 Parameters 

266 ---------- 

267 inputExp : `dict` [`float`, `list` 

268 [`~lsst.pipe.base.connections.DeferredDatasetRef`]] 

269 Dictionary that groups references to flat-field exposures that 

270 have the same exposure time (seconds), or that groups them 

271 sequentially by their exposure id. 

272 inputDims : `list` 

273 List of exposure IDs. 

274 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`] 

275 List of exposures metadata from ISR. 

276 

277 Returns 

278 ------- 

279 results : `lsst.pipe.base.Struct` 

280 The resulting Struct contains: 

281 

282 ``outputCovariances`` 

283 A list containing the per-pair PTC measurements (`list` 

284 [`lsst.ip.isr.PhotonTransferCurveDataset`]) 

285 """ 

286 # inputExp.values() returns a view, which we turn into a list. We then 

287 # access the first exposure-ID tuple to get the detector. 

288 # The first "get()" retrieves the exposure from the exposure reference. 

289 detector = list(inputExp.values())[0][0][0].get(component='detector') 

290 detNum = detector.getId() 

291 amps = detector.getAmplifiers() 

292 ampNames = [amp.getName() for amp in amps] 

293 

294 # Each amp may have a different min and max ADU signal 

295 # specified in the config. 

296 maxMeanSignalDict = {ampName: 1e6 for ampName in ampNames} 

297 minMeanSignalDict = {ampName: 0.0 for ampName in ampNames} 

298 for ampName in ampNames: 

299 if 'ALL_AMPS' in self.config.maxMeanSignal: 

300 maxMeanSignalDict[ampName] = self.config.maxMeanSignal['ALL_AMPS'] 

301 elif ampName in self.config.maxMeanSignal: 

302 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[ampName] 

303 

304 if 'ALL_AMPS' in self.config.minMeanSignal: 

305 minMeanSignalDict[ampName] = self.config.minMeanSignal['ALL_AMPS'] 

306 elif ampName in self.config.minMeanSignal: 

307 minMeanSignalDict[ampName] = self.config.minMeanSignal[ampName] 

308 # These are the column names for `tupleRows` below. 

309 tags = [('mu', '<f8'), ('afwVar', '<f8'), ('i', '<i8'), ('j', '<i8'), ('var', '<f8'), 

310 ('cov', '<f8'), ('npix', '<i8'), ('ext', '<i8'), ('expTime', '<f8'), ('ampName', '<U3')] 

311 # Create a dummy ptcDataset. Dummy datasets will be 

312 # used to ensure that the number of output and input 

313 # dimensions match. 

314 dummyPtcDataset = PhotonTransferCurveDataset(ampNames, 'DUMMY', 

315 self.config.maximumRangeCovariancesAstier) 

316 

317 readNoiseDict = {ampName: 0.0 for ampName in ampNames} 

318 for ampName in ampNames: 

319 # Initialize amps of `dummyPtcDatset`. 

320 dummyPtcDataset.setAmpValues(ampName) 

321 # Overscan readnoise from post-ISR exposure metadata. 

322 # It will be used to estimate the gain from a pair of flats. 

323 readNoiseDict[ampName] = self.getReadNoiseFromMetadata(taskMetadata, ampName) 

324 

325 # Output list with PTC datasets. 

326 partialPtcDatasetList = [] 

327 # The number of output references needs to match that of input 

328 # references: initialize outputlist with dummy PTC datasets. 

329 for i in range(len(inputDims)): 

330 partialPtcDatasetList.append(dummyPtcDataset) 

331 

332 if self.config.numEdgeSuspect > 0: 

333 isrTask = IsrTask() 

334 self.log.info("Masking %d pixels from the edges of all %ss as SUSPECT.", 

335 self.config.numEdgeSuspect, self.config.edgeMaskLevel) 

336 

337 # Depending on the value of config.matchExposuresType 

338 # 'expTime' can stand for exposure time, flux, or ID. 

339 for expTime in inputExp: 

340 exposures = inputExp[expTime] 

341 if len(exposures) == 1: 

342 self.log.warning("Only one exposure found at %s %f. Dropping exposure %d.", 

343 self.config.matchExposuresType, expTime, exposures[0][1]) 

344 continue 

345 else: 

346 # Only use the first two exposures at expTime. Each 

347 # element is a tuple (exposure, expId) 

348 expRef1, expId1 = exposures[0] 

349 expRef2, expId2 = exposures[1] 

350 # use get() to obtain `lsst.afw.image.Exposure` 

351 exp1, exp2 = expRef1.get(), expRef2.get() 

352 

353 if len(exposures) > 2: 

354 self.log.warning("Already found 2 exposures at %s %f. Ignoring exposures: %s", 

355 self.config.matchExposuresType, expTime, 

356 ", ".join(str(i[1]) for i in exposures[2:])) 

357 # Mask pixels at the edge of the detector or of each amp 

358 if self.config.numEdgeSuspect > 0: 

359 isrTask.maskEdges(exp1, numEdgePixels=self.config.numEdgeSuspect, 

360 maskPlane="SUSPECT", level=self.config.edgeMaskLevel) 

361 isrTask.maskEdges(exp2, numEdgePixels=self.config.numEdgeSuspect, 

362 maskPlane="SUSPECT", level=self.config.edgeMaskLevel) 

363 

364 nAmpsNan = 0 

365 partialPtcDataset = PhotonTransferCurveDataset(ampNames, 'PARTIAL', 

366 self.config.maximumRangeCovariancesAstier) 

367 for ampNumber, amp in enumerate(detector): 

368 ampName = amp.getName() 

369 if self.config.detectorMeasurementRegion == 'AMP': 

370 region = amp.getBBox() 

371 elif self.config.detectorMeasurementRegion == 'FULL': 

372 region = None 

373 

374 # Get masked image regions, masking planes, statistic control 

375 # objects, and clipped means. Calculate once to reuse in 

376 # `measureMeanVarCov` and `getGainFromFlatPair`. 

377 im1Area, im2Area, imStatsCtrl, mu1, mu2 = self.getImageAreasMasksStats(exp1, exp2, 

378 region=region) 

379 

380 # `measureMeanVarCov` is the function that measures 

381 # the variance and covariances from a region of 

382 # the difference image of two flats at the same 

383 # exposure time. The variable `covAstier` that is 

384 # returned is of the form: 

385 # [(i, j, var (cov[0,0]), cov, npix) for (i,j) in 

386 # {maxLag, maxLag}^2]. 

387 muDiff, varDiff, covAstier = self.measureMeanVarCov(im1Area, im2Area, imStatsCtrl, mu1, mu2) 

388 # Estimate the gain from the flat pair 

389 if self.config.doGain: 

390 gain = self.getGainFromFlatPair(im1Area, im2Area, imStatsCtrl, mu1, mu2, 

391 correctionType=self.config.gainCorrectionType, 

392 readNoise=readNoiseDict[ampName]) 

393 else: 

394 gain = np.nan 

395 

396 # Correction factor for bias introduced by sigma 

397 # clipping. 

398 # Function returns 1/sqrt(varFactor), so it needs 

399 # to be squared. varDiff is calculated via 

400 # afwMath.VARIANCECLIP. 

401 varFactor = sigmaClipCorrection(self.config.nSigmaClipPtc)**2 

402 varDiff *= varFactor 

403 

404 expIdMask = True 

405 # Mask data point at this mean signal level if 

406 # the signal, variance, or covariance calculations 

407 # from `measureMeanVarCov` resulted in NaNs. 

408 if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None): 

409 self.log.warning("NaN mean or var, or None cov in amp %s in exposure pair %d, %d of " 

410 "detector %d.", ampName, expId1, expId2, detNum) 

411 nAmpsNan += 1 

412 expIdMask = False 

413 covArray = np.full((1, self.config.maximumRangeCovariancesAstier, 

414 self.config.maximumRangeCovariancesAstier), np.nan) 

415 covSqrtWeights = np.full_like(covArray, np.nan) 

416 

417 # Mask data point if it is outside of the 

418 # specified mean signal range. 

419 if (muDiff <= minMeanSignalDict[ampName]) or (muDiff >= maxMeanSignalDict[ampName]): 

420 expIdMask = False 

421 

422 if covAstier is not None: 

423 # Turn the tuples with the measured information 

424 # into covariance arrays. 

425 # covrow: (i, j, var (cov[0,0]), cov, npix) 

426 tupleRows = [(muDiff, varDiff) + covRow + (ampNumber, expTime, 

427 ampName) for covRow in covAstier] 

428 tempStructArray = np.array(tupleRows, dtype=tags) 

429 covArray, vcov, _ = self.makeCovArray(tempStructArray, 

430 self.config.maximumRangeCovariancesAstier) 

431 covSqrtWeights = np.nan_to_num(1./np.sqrt(vcov)) 

432 

433 # Correct covArray for sigma clipping: 

434 # 1) Apply varFactor twice for the whole covariance matrix 

435 covArray *= varFactor**2 

436 # 2) But, only once for the variance element of the 

437 # matrix, covArray[0,0] (so divide one factor out). 

438 covArray[0, 0] /= varFactor 

439 

440 partialPtcDataset.setAmpValues(ampName, rawExpTime=[expTime], rawMean=[muDiff], 

441 rawVar=[varDiff], inputExpIdPair=[(expId1, expId2)], 

442 expIdMask=[expIdMask], covArray=covArray, 

443 covSqrtWeights=covSqrtWeights, gain=gain, 

444 noise=readNoiseDict[ampName]) 

445 # Use location of exp1 to save PTC dataset from (exp1, exp2) pair. 

446 # Below, np.where(expId1 == np.array(inputDims)) returns a tuple 

447 # with a single-element array, so [0][0] 

448 # is necessary to extract the required index. 

449 datasetIndex = np.where(expId1 == np.array(inputDims))[0][0] 

450 # `partialPtcDatasetList` is a list of 

451 # `PhotonTransferCurveDataset` objects. Some of them 

452 # will be dummy datasets (to match length of input 

453 # and output references), and the rest will have 

454 # datasets with the mean signal, variance, and 

455 # covariance measurements at a given exposure 

456 # time. The next ppart of the PTC-measurement 

457 # pipeline, `solve`, will take this list as input, 

458 # and assemble the measurements in the datasets 

459 # in an addecuate manner for fitting a PTC 

460 # model. 

461 partialPtcDataset.updateMetadataFromExposures([exp1, exp2]) 

462 partialPtcDataset.updateMetadata(setDate=True, detector=detector) 

463 partialPtcDatasetList[datasetIndex] = partialPtcDataset 

464 

465 if nAmpsNan == len(ampNames): 

466 msg = f"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}." 

467 self.log.warning(msg) 

468 

469 return pipeBase.Struct( 

470 outputCovariances=partialPtcDatasetList, 

471 ) 

472 

473 def makeCovArray(self, inputTuple, maxRangeFromTuple): 

474 """Make covariances array from tuple. 

475 

476 Parameters 

477 ---------- 

478 inputTuple : `numpy.ndarray` 

479 Structured array with rows with at least 

480 (mu, afwVar, cov, var, i, j, npix), where: 

481 mu : `float` 

482 0.5*(m1 + m2), where mu1 is the mean value of flat1 

483 and mu2 is the mean value of flat2. 

484 afwVar : `float` 

485 Variance of difference flat, calculated with afw. 

486 cov : `float` 

487 Covariance value at lag(i, j) 

488 var : `float` 

489 Variance(covariance value at lag(0, 0)) 

490 i : `int` 

491 Lag in dimension "x". 

492 j : `int` 

493 Lag in dimension "y". 

494 npix : `int` 

495 Number of pixels used for covariance calculation. 

496 maxRangeFromTuple : `int` 

497 Maximum range to select from tuple. 

498 

499 Returns 

500 ------- 

501 cov : `numpy.array` 

502 Covariance arrays, indexed by mean signal mu. 

503 vCov : `numpy.array` 

504 Variance of the [co]variance arrays, indexed by mean signal mu. 

505 muVals : `numpy.array` 

506 List of mean signal values. 

507 """ 

508 if maxRangeFromTuple is not None: 

509 cut = (inputTuple['i'] < maxRangeFromTuple) & (inputTuple['j'] < maxRangeFromTuple) 

510 cutTuple = inputTuple[cut] 

511 else: 

512 cutTuple = inputTuple 

513 # increasing mu order, so that we can group measurements with the 

514 # same mu 

515 muTemp = cutTuple['mu'] 

516 ind = np.argsort(muTemp) 

517 

518 cutTuple = cutTuple[ind] 

519 # should group measurements on the same image pairs(same average) 

520 mu = cutTuple['mu'] 

521 xx = np.hstack(([mu[0]], mu)) 

522 delta = xx[1:] - xx[:-1] 

523 steps, = np.where(delta > 0) 

524 ind = np.zeros_like(mu, dtype=int) 

525 ind[steps] = 1 

526 ind = np.cumsum(ind) # this acts as an image pair index. 

527 # now fill the 3-d cov array(and variance) 

528 muVals = np.array(np.unique(mu)) 

529 i = cutTuple['i'].astype(int) 

530 j = cutTuple['j'].astype(int) 

531 c = 0.5*cutTuple['cov'] 

532 n = cutTuple['npix'] 

533 v = 0.5*cutTuple['var'] 

534 # book and fill 

535 cov = np.ndarray((len(muVals), np.max(i)+1, np.max(j)+1)) 

536 var = np.zeros_like(cov) 

537 cov[ind, i, j] = c 

538 var[ind, i, j] = v**2/n 

539 var[:, 0, 0] *= 2 # var(v) = 2*v**2/N 

540 

541 return cov, var, muVals 

542 

543 def measureMeanVarCov(self, im1Area, im2Area, imStatsCtrl, mu1, mu2): 

544 """Calculate the mean of each of two exposures and the variance 

545 and covariance of their difference. The variance is calculated 

546 via afwMath, and the covariance via the methods in Astier+19 

547 (appendix A). In theory, var = covariance[0,0]. This should 

548 be validated, and in the future, we may decide to just keep 

549 one (covariance). 

550 

551 Parameters 

552 ---------- 

553 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

554 Masked image from exposure 1. 

555 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

556 Masked image from exposure 2. 

557 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

558 Statistics control object. 

559 mu1: `float` 

560 Clipped mean of im1Area (ADU). 

561 mu2: `float` 

562 Clipped mean of im2Area (ADU). 

563 

564 Returns 

565 ------- 

566 mu : `float` or `NaN` 

567 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means 

568 of the regions in both exposures. If either mu1 or m2 are 

569 NaN's, the returned value is NaN. 

570 varDiff : `float` or `NaN` 

571 Half of the clipped variance of the difference of the 

572 regions inthe two input exposures. If either mu1 or m2 are 

573 NaN's, the returned value is NaN. 

574 covDiffAstier : `list` or `NaN` 

575 List with tuples of the form (dx, dy, var, cov, npix), where: 

576 dx : `int` 

577 Lag in x 

578 dy : `int` 

579 Lag in y 

580 var : `float` 

581 Variance at (dx, dy). 

582 cov : `float` 

583 Covariance at (dx, dy). 

584 nPix : `int` 

585 Number of pixel pairs used to evaluate var and cov. 

586 

587 If either mu1 or m2 are NaN's, the returned value is NaN. 

588 """ 

589 if np.isnan(mu1) or np.isnan(mu2): 

590 self.log.warning("Mean of amp in image 1 or 2 is NaN: %f, %f.", mu1, mu2) 

591 return np.nan, np.nan, None 

592 mu = 0.5*(mu1 + mu2) 

593 

594 # Take difference of pairs 

595 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2)) 

596 temp = im2Area.clone() 

597 temp *= mu1 

598 diffIm = im1Area.clone() 

599 diffIm *= mu2 

600 diffIm -= temp 

601 diffIm /= mu 

602 

603 if self.config.binSize > 1: 

604 diffIm = afwMath.binImage(diffIm, self.config.binSize) 

605 

606 # Variance calculation via afwMath 

607 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue()) 

608 

609 # Covariances calculations 

610 # Get the pixels that were not clipped 

611 varClip = afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue() 

612 meanClip = afwMath.makeStatistics(diffIm, afwMath.MEANCLIP, imStatsCtrl).getValue() 

613 cut = meanClip + self.config.nSigmaClipPtc*np.sqrt(varClip) 

614 unmasked = np.where(np.fabs(diffIm.image.array) <= cut, 1, 0) 

615 

616 # Get the pixels in the mask planes of the difference image 

617 # that were ignored by the clipping algorithm 

618 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0) 

619 # Combine the two sets of pixels ('1': use; '0': don't use) 

620 # into a final weight matrix to be used in the covariance 

621 # calculations below. 

622 w = unmasked*wDiff 

623 

624 if np.sum(w) < self.config.minNumberGoodPixelsForCovariance/(self.config.binSize**2): 

625 self.log.warning("Number of good points for covariance calculation (%s) is less " 

626 "(than threshold %s)", np.sum(w), 

627 self.config.minNumberGoodPixelsForCovariance/(self.config.binSize**2)) 

628 return np.nan, np.nan, None 

629 

630 maxRangeCov = self.config.maximumRangeCovariancesAstier 

631 

632 # Calculate covariances via FFT. 

633 shapeDiff = np.array(diffIm.image.array.shape) 

634 # Calculate the sizes of FFT dimensions. 

635 s = shapeDiff + maxRangeCov 

636 tempSize = np.array(np.log(s)/np.log(2.)).astype(int) 

637 fftSize = np.array(2**(tempSize+1)).astype(int) 

638 fftShape = (fftSize[0], fftSize[1]) 

639 c = CovFastFourierTransform(diffIm.image.array, w, fftShape, maxRangeCov) 

640 # np.sum(w) is the same as npix[0][0] returned in covDiffAstier 

641 covDiffAstier = c.reportCovFastFourierTransform(maxRangeCov) 

642 

643 # Compare Cov[0,0] and afwMath.VARIANCECLIP covDiffAstier[0] 

644 # is the Cov[0,0] element, [3] is the variance, and there's a 

645 # factor of 0.5 difference with afwMath.VARIANCECLIP. 

646 thresholdPercentage = self.config.thresholdDiffAfwVarVsCov00 

647 fractionalDiff = 100*np.fabs(1 - varDiff/(covDiffAstier[0][3]*0.5)) 

648 if fractionalDiff >= thresholdPercentage: 

649 self.log.warning("Absolute fractional difference between afwMatch.VARIANCECLIP and Cov[0,0] " 

650 "is more than %f%%: %f", thresholdPercentage, fractionalDiff) 

651 

652 return mu, varDiff, covDiffAstier 

653 

654 def getImageAreasMasksStats(self, exposure1, exposure2, region=None): 

655 """Get image areas in a region as well as masks and statistic objects. 

656 

657 Parameters 

658 ---------- 

659 exposure1 : `lsst.afw.image.exposure.ExposureF` 

660 First exposure of flat field pair. 

661 exposure2 : `lsst.afw.image.exposure.ExposureF` 

662 Second exposure of flat field pair. 

663 region : `lsst.geom.Box2I`, optional 

664 Region of each exposure where to perform the calculations 

665 (e.g, an amplifier). 

666 

667 Returns 

668 ------- 

669 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

670 Masked image from exposure 1. 

671 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

672 Masked image from exposure 2. 

673 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

674 Statistics control object. 

675 mu1: `float` 

676 Clipped mean of im1Area (ADU). 

677 mu2: `float` 

678 Clipped mean of im2Area (ADU). 

679 """ 

680 if region is not None: 

681 im1Area = exposure1.maskedImage[region] 

682 im2Area = exposure2.maskedImage[region] 

683 else: 

684 im1Area = exposure1.maskedImage 

685 im2Area = exposure2.maskedImage 

686 

687 # Get mask planes and construct statistics control object from one 

688 # of the exposures 

689 imMaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList) 

690 imStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc, 

691 self.config.nIterSigmaClipPtc, 

692 imMaskVal) 

693 imStatsCtrl.setNanSafe(True) 

694 imStatsCtrl.setAndMask(imMaskVal) 

695 

696 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, imStatsCtrl).getValue() 

697 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, imStatsCtrl).getValue() 

698 

699 return (im1Area, im2Area, imStatsCtrl, mu1, mu2) 

700 

701 def getGainFromFlatPair(self, im1Area, im2Area, imStatsCtrl, mu1, mu2, 

702 correctionType='NONE', readNoise=None): 

703 """Estimate the gain from a single pair of flats. 

704 

705 The basic premise is 1/g = <(I1 - I2)^2/(I1 + I2)> = 1/const, 

706 where I1 and I2 correspond to flats 1 and 2, respectively. 

707 Corrections for the variable QE and the read-noise are then 

708 made following the derivation in Robert Lupton's forthcoming 

709 book, which gets 

710 

711 1/g = <(I1 - I2)^2/(I1 + I2)> - 1/mu(sigma^2 - 1/2g^2). 

712 

713 This is a quadratic equation, whose solutions are given by: 

714 

715 g = mu +/- sqrt(2*sigma^2 - 2*const*mu + mu^2)/(2*const*mu*2 

716 - 2*sigma^2) 

717 

718 where 'mu' is the average signal level and 'sigma' is the 

719 amplifier's readnoise. The positive solution will be used. 

720 The way the correction is applied depends on the value 

721 supplied for correctionType. 

722 

723 correctionType is one of ['NONE', 'SIMPLE' or 'FULL'] 

724 'NONE' : uses the 1/g = <(I1 - I2)^2/(I1 + I2)> formula. 

725 'SIMPLE' : uses the gain from the 'NONE' method for the 

726 1/2g^2 term. 

727 'FULL' : solves the full equation for g, discarding the 

728 non-physical solution to the resulting quadratic. 

729 

730 Parameters 

731 ---------- 

732 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

733 Masked image from exposure 1. 

734 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

735 Masked image from exposure 2. 

736 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

737 Statistics control object. 

738 mu1: `float` 

739 Clipped mean of im1Area (ADU). 

740 mu2: `float` 

741 Clipped mean of im2Area (ADU). 

742 correctionType : `str`, optional 

743 The correction applied, one of ['NONE', 'SIMPLE', 'FULL'] 

744 readNoise : `float`, optional 

745 Amplifier readout noise (ADU). 

746 

747 Returns 

748 ------- 

749 gain : `float` 

750 Gain, in e/ADU. 

751 

752 Raises 

753 ------ 

754 RuntimeError 

755 Raise if `correctionType` is not one of 'NONE', 

756 'SIMPLE', or 'FULL'. 

757 """ 

758 if correctionType not in ['NONE', 'SIMPLE', 'FULL']: 

759 raise RuntimeError("Unknown correction type: %s" % correctionType) 

760 

761 if correctionType != 'NONE' and not np.isfinite(readNoise): 

762 self.log.warning("'correctionType' in 'getGainFromFlatPair' is %s, " 

763 "but 'readNoise' is NaN. Setting 'correctionType' " 

764 "to 'NONE', so a gain value will be estimated without " 

765 "corrections." % correctionType) 

766 correctionType = 'NONE' 

767 

768 mu = 0.5*(mu1 + mu2) 

769 

770 # ratioIm = (I1 - I2)^2 / (I1 + I2) 

771 temp = im2Area.clone() 

772 ratioIm = im1Area.clone() 

773 ratioIm -= temp 

774 ratioIm *= ratioIm 

775 

776 # Sum of pairs 

777 sumIm = im1Area.clone() 

778 sumIm += temp 

779 

780 ratioIm /= sumIm 

781 

782 const = afwMath.makeStatistics(ratioIm, afwMath.MEAN, imStatsCtrl).getValue() 

783 gain = 1. / const 

784 

785 if correctionType == 'SIMPLE': 

786 gain = 1/(const - (1/mu)*(readNoise**2 - (1/2*gain**2))) 

787 elif correctionType == 'FULL': 

788 root = np.sqrt(mu**2 - 2*mu*const + 2*readNoise**2) 

789 denom = (2*const*mu - 2*readNoise**2) 

790 positiveSolution = (root + mu)/denom 

791 gain = positiveSolution 

792 

793 return gain 

794 

795 def getReadNoiseFromMetadata(self, taskMetadata, ampName): 

796 """Gets readout noise for an amp from ISR metadata. 

797 

798 Parameters 

799 ---------- 

800 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`] 

801 List of exposures metadata from ISR. 

802 ampName : `str` 

803 Amplifier name. 

804 

805 Returns 

806 ------- 

807 readNoise : `float` 

808 Median of the overscan readnoise in the 

809 post-ISR metadata of the input exposures (ADU). 

810 Returns 'None' if the median could not be calculated. 

811 """ 

812 # Empirical readout noise [ADU] measured from an 

813 # overscan-subtracted overscan during ISR. 

814 expectedKey = f"RESIDUAL STDEV {ampName}" 

815 

816 readNoises = [] 

817 for expMetadata in taskMetadata: 

818 if 'isr' in expMetadata: 

819 overscanNoise = expMetadata['isr'][expectedKey] 

820 else: 

821 continue 

822 readNoises.append(overscanNoise) 

823 

824 if len(readNoises): 

825 readNoise = np.nanmedian(np.array(readNoises)) 

826 else: 

827 self.log.warning("Median readout noise from ISR metadata for amp %s " 

828 "could not be calculated." % ampName) 

829 readNoise = np.nan 

830 

831 return readNoise