Coverage for python/lsst/cp/pipe/ptc/cpExtractPtcTask.py: 14%

234 statements  

« prev     ^ index     » next       coverage.py v6.4.4, created at 2022-08-18 12:43 -0700

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22import numpy as np 

23 

24import lsst.afw.math as afwMath 

25import lsst.pex.config as pexConfig 

26import lsst.pipe.base as pipeBase 

27from lsst.cp.pipe.utils import (arrangeFlatsByExpTime, arrangeFlatsByExpId, 

28 sigmaClipCorrection, CovFastFourierTransform) 

29 

30import lsst.pipe.base.connectionTypes as cT 

31 

32from lsst.ip.isr import PhotonTransferCurveDataset 

33from lsst.ip.isr import IsrTask 

34 

35__all__ = ['PhotonTransferCurveExtractConfig', 'PhotonTransferCurveExtractTask'] 

36 

37 

38class PhotonTransferCurveExtractConnections(pipeBase.PipelineTaskConnections, 

39 dimensions=("instrument", "detector")): 

40 

41 inputExp = cT.Input( 

42 name="ptcInputExposurePairs", 

43 doc="Input post-ISR processed exposure pairs (flats) to" 

44 "measure covariances from.", 

45 storageClass="Exposure", 

46 dimensions=("instrument", "exposure", "detector"), 

47 multiple=True, 

48 deferLoad=True, 

49 ) 

50 taskMetadata = cT.Input( 

51 name="isrTask_metadata", 

52 doc="Input task metadata to extract statistics from.", 

53 storageClass="TaskMetadata", 

54 dimensions=("instrument", "exposure", "detector"), 

55 multiple=True, 

56 ) 

57 outputCovariances = cT.Output( 

58 name="ptcCovariances", 

59 doc="Extracted flat (co)variances.", 

60 storageClass="PhotonTransferCurveDataset", 

61 dimensions=("instrument", "exposure", "detector"), 

62 multiple=True, 

63 ) 

64 

65 

66class PhotonTransferCurveExtractConfig(pipeBase.PipelineTaskConfig, 

67 pipelineConnections=PhotonTransferCurveExtractConnections): 

68 """Configuration for the measurement of covariances from flats. 

69 """ 

70 

71 matchByExposureId = pexConfig.Field( 

72 dtype=bool, 

73 doc="Should exposures be matched by ID rather than exposure time?", 

74 default=False, 

75 ) 

76 maximumRangeCovariancesAstier = pexConfig.Field( 

77 dtype=int, 

78 doc="Maximum range of covariances as in Astier+19", 

79 default=8, 

80 ) 

81 binSize = pexConfig.Field( 

82 dtype=int, 

83 doc="Bin the image by this factor in both dimensions.", 

84 default=1, 

85 ) 

86 minMeanSignal = pexConfig.DictField( 

87 keytype=str, 

88 itemtype=float, 

89 doc="Minimum values (inclusive) of mean signal (in ADU) per amp to use." 

90 " The same cut is applied to all amps if this parameter [`dict`] is passed as " 

91 " {'ALL_AMPS': value}", 

92 default={'ALL_AMPS': 0.0}, 

93 ) 

94 maxMeanSignal = pexConfig.DictField( 

95 keytype=str, 

96 itemtype=float, 

97 doc="Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp." 

98 " The same cut is applied to all amps if this dictionary is of the form" 

99 " {'ALL_AMPS': value}", 

100 default={'ALL_AMPS': 1e6}, 

101 ) 

102 maskNameList = pexConfig.ListField( 

103 dtype=str, 

104 doc="Mask list to exclude from statistics calculations.", 

105 default=['SUSPECT', 'BAD', 'NO_DATA', 'SAT'], 

106 ) 

107 nSigmaClipPtc = pexConfig.Field( 

108 dtype=float, 

109 doc="Sigma cut for afwMath.StatisticsControl()", 

110 default=5.5, 

111 ) 

112 nIterSigmaClipPtc = pexConfig.Field( 

113 dtype=int, 

114 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()", 

115 default=3, 

116 ) 

117 minNumberGoodPixelsForCovariance = pexConfig.Field( 

118 dtype=int, 

119 doc="Minimum number of acceptable good pixels per amp to calculate the covariances (via FFT or" 

120 " direclty).", 

121 default=10000, 

122 ) 

123 thresholdDiffAfwVarVsCov00 = pexConfig.Field( 

124 dtype=float, 

125 doc="If the absolute fractional differece between afwMath.VARIANCECLIP and Cov00 " 

126 "for a region of a difference image is greater than this threshold (percentage), " 

127 "a warning will be issued.", 

128 default=1., 

129 ) 

130 detectorMeasurementRegion = pexConfig.ChoiceField( 

131 dtype=str, 

132 doc="Region of each exposure where to perform the calculations (amplifier or full image).", 

133 default='AMP', 

134 allowed={ 

135 "AMP": "Amplifier of the detector.", 

136 "FULL": "Full image." 

137 } 

138 ) 

139 numEdgeSuspect = pexConfig.Field( 

140 dtype=int, 

141 doc="Number of edge pixels to be flagged as untrustworthy.", 

142 default=0, 

143 ) 

144 edgeMaskLevel = pexConfig.ChoiceField( 

145 dtype=str, 

146 doc="Mask edge pixels in which coordinate frame: DETECTOR or AMP?", 

147 default="DETECTOR", 

148 allowed={ 

149 'DETECTOR': 'Mask only the edges of the full detector.', 

150 'AMP': 'Mask edges of each amplifier.', 

151 }, 

152 ) 

153 doGain = pexConfig.Field( 

154 dtype=bool, 

155 doc="Calculate a gain per input flat pair.", 

156 default=True, 

157 ) 

158 gainCorrectionType = pexConfig.ChoiceField( 

159 dtype=str, 

160 doc="Correction type for the gain.", 

161 default='FULL', 

162 allowed={ 

163 'NONE': 'No correction.', 

164 'SIMPLE': 'First order correction.', 

165 'FULL': 'Second order correction.' 

166 } 

167 ) 

168 

169 

170class PhotonTransferCurveExtractTask(pipeBase.PipelineTask): 

171 """Task to measure covariances from flat fields. 

172 

173 This task receives as input a list of flat-field images 

174 (flats), and sorts these flats in pairs taken at the 

175 same time (the task will raise if there is one one flat 

176 at a given exposure time, and it will discard extra flats if 

177 there are more than two per exposure time). This task measures 

178 the mean, variance, and covariances from a region (e.g., 

179 an amplifier) of the difference image of the two flats with 

180 the same exposure time. 

181 

182 The variance is calculated via afwMath, and the covariance 

183 via the methods in Astier+19 (appendix A). In theory, 

184 var = covariance[0,0]. This should be validated, and in the 

185 future, we may decide to just keep one (covariance). 

186 At this moment, if the two values differ by more than the value 

187 of `thresholdDiffAfwVarVsCov00` (default: 1%), a warning will 

188 be issued. 

189 

190 The measured covariances at a given exposure time (along with 

191 other quantities such as the mean) are stored in a PTC dataset 

192 object (`~lsst.ip.isr.PhotonTransferCurveDataset`), which gets 

193 partially filled at this stage (the remainder of the attributes 

194 of the dataset will be filled after running the second task of 

195 the PTC-measurement pipeline, `~PhotonTransferCurveSolveTask`). 

196 

197 The number of partially-filled 

198 `~lsst.ip.isr.PhotonTransferCurveDataset` objects will be less 

199 than the number of input exposures because the task combines 

200 input flats in pairs. However, it is required at this moment 

201 that the number of input dimensions matches 

202 bijectively the number of output dimensions. Therefore, a number 

203 of "dummy" PTC datasets are inserted in the output list. This 

204 output list will then be used as input of the next task in the 

205 PTC-measurement pipeline, `PhotonTransferCurveSolveTask`, 

206 which will assemble the multiple `PhotonTransferCurveDataset` 

207 objects into a single one in order to fit the measured covariances 

208 as a function of flux to one of three models 

209 (see `PhotonTransferCurveSolveTask` for details). 

210 

211 Reference: Astier+19: "The Shape of the Photon Transfer Curve of CCD 

212 sensors", arXiv:1905.08677. 

213 """ 

214 

215 ConfigClass = PhotonTransferCurveExtractConfig 

216 _DefaultName = 'cpPtcExtract' 

217 

218 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

219 """Ensure that the input and output dimensions are passed along. 

220 

221 Parameters 

222 ---------- 

223 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext` 

224 Butler to operate on. 

225 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection` 

226 Input data refs to load. 

227 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection` 

228 Output data refs to persist. 

229 """ 

230 inputs = butlerQC.get(inputRefs) 

231 # Ids of input list of exposure references 

232 # (deferLoad=True in the input connections) 

233 inputs['inputDims'] = [expRef.datasetRef.dataId['exposure'] for expRef in inputRefs.inputExp] 

234 

235 # Dictionary, keyed by expTime, with tuples containing flat 

236 # exposures and their IDs. 

237 if self.config.matchByExposureId: 

238 inputs['inputExp'] = arrangeFlatsByExpId(inputs['inputExp'], inputs['inputDims']) 

239 else: 

240 inputs['inputExp'] = arrangeFlatsByExpTime(inputs['inputExp'], inputs['inputDims']) 

241 

242 outputs = self.run(**inputs) 

243 butlerQC.put(outputs, outputRefs) 

244 

245 def run(self, inputExp, inputDims, taskMetadata): 

246 """Measure covariances from difference of flat pairs 

247 

248 Parameters 

249 ---------- 

250 inputExp : `dict` [`float`, `list` 

251 [`~lsst.pipe.base.connections.DeferredDatasetRef`]] 

252 Dictionary that groups references to flat-field exposures that 

253 have the same exposure time (seconds), or that groups them 

254 sequentially by their exposure id. 

255 inputDims : `list` 

256 List of exposure IDs. 

257 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`] 

258 List of exposures metadata from ISR. 

259 

260 Returns 

261 ------- 

262 results : `lsst.pipe.base.Struct` 

263 The resulting Struct contains: 

264 ``outputCovariances`` 

265 A list containing the per-pair PTC measurements (`list` 

266 [`lsst.ip.isr.PhotonTransferCurveDataset`]) 

267 """ 

268 # inputExp.values() returns a view, which we turn into a list. We then 

269 # access the first exposure-ID tuple to get the detector. 

270 # The first "get()" retrieves the exposure from the exposure reference. 

271 detector = list(inputExp.values())[0][0][0].get(component='detector') 

272 detNum = detector.getId() 

273 amps = detector.getAmplifiers() 

274 ampNames = [amp.getName() for amp in amps] 

275 

276 # Each amp may have a different min and max ADU signal 

277 # specified in the config. 

278 maxMeanSignalDict = {ampName: 1e6 for ampName in ampNames} 

279 minMeanSignalDict = {ampName: 0.0 for ampName in ampNames} 

280 for ampName in ampNames: 

281 if 'ALL_AMPS' in self.config.maxMeanSignal: 

282 maxMeanSignalDict[ampName] = self.config.maxMeanSignal['ALL_AMPS'] 

283 elif ampName in self.config.maxMeanSignal: 

284 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[ampName] 

285 

286 if 'ALL_AMPS' in self.config.minMeanSignal: 

287 minMeanSignalDict[ampName] = self.config.minMeanSignal['ALL_AMPS'] 

288 elif ampName in self.config.minMeanSignal: 

289 minMeanSignalDict[ampName] = self.config.minMeanSignal[ampName] 

290 # These are the column names for `tupleRows` below. 

291 tags = [('mu', '<f8'), ('afwVar', '<f8'), ('i', '<i8'), ('j', '<i8'), ('var', '<f8'), 

292 ('cov', '<f8'), ('npix', '<i8'), ('ext', '<i8'), ('expTime', '<f8'), ('ampName', '<U3')] 

293 # Create a dummy ptcDataset. Dummy datasets will be 

294 # used to ensure that the number of output and input 

295 # dimensions match. 

296 dummyPtcDataset = PhotonTransferCurveDataset(ampNames, 'DUMMY', 

297 self.config.maximumRangeCovariancesAstier) 

298 

299 readNoiseDict = {ampName: 0.0 for ampName in ampNames} 

300 for ampName in ampNames: 

301 # Initialize amps of `dummyPtcDatset`. 

302 dummyPtcDataset.setAmpValues(ampName) 

303 # Overscan readnoise from post-ISR exposure metadata. 

304 # It will be used to estimate the gain from a pair of flats. 

305 readNoiseDict[ampName] = self.getReadNoiseFromMetadata(taskMetadata, ampName) 

306 

307 # Output list with PTC datasets. 

308 partialPtcDatasetList = [] 

309 # The number of output references needs to match that of input 

310 # references: initialize outputlist with dummy PTC datasets. 

311 for i in range(len(inputDims)): 

312 partialPtcDatasetList.append(dummyPtcDataset) 

313 

314 if self.config.numEdgeSuspect > 0: 

315 isrTask = IsrTask() 

316 self.log.info("Masking %d pixels from the edges of all exposures as SUSPECT.", 

317 self.config.numEdgeSuspect) 

318 

319 for expTime in inputExp: 

320 exposures = inputExp[expTime] 

321 if len(exposures) == 1: 

322 self.log.warning("Only one exposure found at expTime %f. Dropping exposure %d.", 

323 expTime, exposures[0][1]) 

324 continue 

325 else: 

326 # Only use the first two exposures at expTime. Each 

327 # element is a tuple (exposure, expId) 

328 expRef1, expId1 = exposures[0] 

329 expRef2, expId2 = exposures[1] 

330 # use get() to obtain `lsst.afw.image.Exposure` 

331 exp1, exp2 = expRef1.get(), expRef2.get() 

332 

333 if len(exposures) > 2: 

334 self.log.warning("Already found 2 exposures at expTime %f. Ignoring exposures: %s", 

335 expTime, ", ".join(str(i[1]) for i in exposures[2:])) 

336 # Mask pixels at the edge of the detector or of each amp 

337 if self.config.numEdgeSuspect > 0: 

338 isrTask.maskEdges(exp1, numEdgePixels=self.config.numEdgeSuspect, 

339 maskPlane="SUSPECT", level=self.config.edgeMaskLevel) 

340 isrTask.maskEdges(exp2, numEdgePixels=self.config.numEdgeSuspect, 

341 maskPlane="SUSPECT", level=self.config.edgeMaskLevel) 

342 

343 nAmpsNan = 0 

344 partialPtcDataset = PhotonTransferCurveDataset(ampNames, 'PARTIAL', 

345 self.config.maximumRangeCovariancesAstier) 

346 for ampNumber, amp in enumerate(detector): 

347 ampName = amp.getName() 

348 if self.config.detectorMeasurementRegion == 'AMP': 

349 region = amp.getBBox() 

350 elif self.config.detectorMeasurementRegion == 'FULL': 

351 region = None 

352 

353 # Get masked image regions, masking planes, statistic control 

354 # objects, and clipped means. Calculate once to reuse in 

355 # `measureMeanVarCov` and `getGainFromFlatPair`. 

356 im1Area, im2Area, imStatsCtrl, mu1, mu2 = self.getImageAreasMasksStats(exp1, exp2, 

357 region=region) 

358 

359 # `measureMeanVarCov` is the function that measures 

360 # the variance and covariances from a region of 

361 # the difference image of two flats at the same 

362 # exposure time. The variable `covAstier` that is 

363 # returned is of the form: 

364 # [(i, j, var (cov[0,0]), cov, npix) for (i,j) in 

365 # {maxLag, maxLag}^2]. 

366 muDiff, varDiff, covAstier = self.measureMeanVarCov(im1Area, im2Area, imStatsCtrl, mu1, mu2) 

367 # Estimate the gain from the flat pair 

368 if self.config.doGain: 

369 gain = self.getGainFromFlatPair(im1Area, im2Area, imStatsCtrl, mu1, mu2, 

370 correctionType=self.config.gainCorrectionType, 

371 readNoise=readNoiseDict[ampName]) 

372 else: 

373 gain = np.nan 

374 

375 # Correction factor for bias introduced by sigma 

376 # clipping. 

377 # Function returns 1/sqrt(varFactor), so it needs 

378 # to be squared. varDiff is calculated via 

379 # afwMath.VARIANCECLIP. 

380 varFactor = sigmaClipCorrection(self.config.nSigmaClipPtc)**2 

381 varDiff *= varFactor 

382 

383 expIdMask = True 

384 # Mask data point at this mean signal level if 

385 # the signal, variance, or covariance calculations 

386 # from `measureMeanVarCov` resulted in NaNs. 

387 if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None): 

388 self.log.warning("NaN mean or var, or None cov in amp %s in exposure pair %d, %d of " 

389 "detector %d.", ampName, expId1, expId2, detNum) 

390 nAmpsNan += 1 

391 expIdMask = False 

392 covArray = np.full((1, self.config.maximumRangeCovariancesAstier, 

393 self.config.maximumRangeCovariancesAstier), np.nan) 

394 covSqrtWeights = np.full_like(covArray, np.nan) 

395 

396 # Mask data point if it is outside of the 

397 # specified mean signal range. 

398 if (muDiff <= minMeanSignalDict[ampName]) or (muDiff >= maxMeanSignalDict[ampName]): 

399 expIdMask = False 

400 

401 if covAstier is not None: 

402 # Turn the tuples with the measured information 

403 # into covariance arrays. 

404 # covrow: (i, j, var (cov[0,0]), cov, npix) 

405 tupleRows = [(muDiff, varDiff) + covRow + (ampNumber, expTime, 

406 ampName) for covRow in covAstier] 

407 tempStructArray = np.array(tupleRows, dtype=tags) 

408 covArray, vcov, _ = self.makeCovArray(tempStructArray, 

409 self.config.maximumRangeCovariancesAstier) 

410 covSqrtWeights = np.nan_to_num(1./np.sqrt(vcov)) 

411 

412 # Correct covArray for sigma clipping: 

413 # 1) Apply varFactor twice for the whole covariance matrix 

414 covArray *= varFactor**2 

415 # 2) But, only once for the variance element of the 

416 # matrix, covArray[0,0] (so divide one factor out). 

417 covArray[0, 0] /= varFactor 

418 

419 partialPtcDataset.setAmpValues(ampName, rawExpTime=[expTime], rawMean=[muDiff], 

420 rawVar=[varDiff], inputExpIdPair=[(expId1, expId2)], 

421 expIdMask=[expIdMask], covArray=covArray, 

422 covSqrtWeights=covSqrtWeights, gain=gain, 

423 noise=readNoiseDict[ampName]) 

424 # Use location of exp1 to save PTC dataset from (exp1, exp2) pair. 

425 # Below, np.where(expId1 == np.array(inputDims)) returns a tuple 

426 # with a single-element array, so [0][0] 

427 # is necessary to extract the required index. 

428 datasetIndex = np.where(expId1 == np.array(inputDims))[0][0] 

429 # `partialPtcDatasetList` is a list of 

430 # `PhotonTransferCurveDataset` objects. Some of them 

431 # will be dummy datasets (to match length of input 

432 # and output references), and the rest will have 

433 # datasets with the mean signal, variance, and 

434 # covariance measurements at a given exposure 

435 # time. The next ppart of the PTC-measurement 

436 # pipeline, `solve`, will take this list as input, 

437 # and assemble the measurements in the datasets 

438 # in an addecuate manner for fitting a PTC 

439 # model. 

440 partialPtcDataset.updateMetadata(setDate=True, detector=detector) 

441 partialPtcDatasetList[datasetIndex] = partialPtcDataset 

442 

443 if nAmpsNan == len(ampNames): 

444 msg = f"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}." 

445 self.log.warning(msg) 

446 return pipeBase.Struct( 

447 outputCovariances=partialPtcDatasetList, 

448 ) 

449 

450 def makeCovArray(self, inputTuple, maxRangeFromTuple): 

451 """Make covariances array from tuple. 

452 

453 Parameters 

454 ---------- 

455 inputTuple : `numpy.ndarray` 

456 Structured array with rows with at least 

457 (mu, afwVar, cov, var, i, j, npix), where: 

458 mu : `float` 

459 0.5*(m1 + m2), where mu1 is the mean value of flat1 

460 and mu2 is the mean value of flat2. 

461 afwVar : `float` 

462 Variance of difference flat, calculated with afw. 

463 cov : `float` 

464 Covariance value at lag(i, j) 

465 var : `float` 

466 Variance(covariance value at lag(0, 0)) 

467 i : `int` 

468 Lag in dimension "x". 

469 j : `int` 

470 Lag in dimension "y". 

471 npix : `int` 

472 Number of pixels used for covariance calculation. 

473 maxRangeFromTuple : `int` 

474 Maximum range to select from tuple. 

475 

476 Returns 

477 ------- 

478 cov : `numpy.array` 

479 Covariance arrays, indexed by mean signal mu. 

480 vCov : `numpy.array` 

481 Variance of the [co]variance arrays, indexed by mean signal mu. 

482 muVals : `numpy.array` 

483 List of mean signal values. 

484 """ 

485 if maxRangeFromTuple is not None: 

486 cut = (inputTuple['i'] < maxRangeFromTuple) & (inputTuple['j'] < maxRangeFromTuple) 

487 cutTuple = inputTuple[cut] 

488 else: 

489 cutTuple = inputTuple 

490 # increasing mu order, so that we can group measurements with the 

491 # same mu 

492 muTemp = cutTuple['mu'] 

493 ind = np.argsort(muTemp) 

494 

495 cutTuple = cutTuple[ind] 

496 # should group measurements on the same image pairs(same average) 

497 mu = cutTuple['mu'] 

498 xx = np.hstack(([mu[0]], mu)) 

499 delta = xx[1:] - xx[:-1] 

500 steps, = np.where(delta > 0) 

501 ind = np.zeros_like(mu, dtype=int) 

502 ind[steps] = 1 

503 ind = np.cumsum(ind) # this acts as an image pair index. 

504 # now fill the 3-d cov array(and variance) 

505 muVals = np.array(np.unique(mu)) 

506 i = cutTuple['i'].astype(int) 

507 j = cutTuple['j'].astype(int) 

508 c = 0.5*cutTuple['cov'] 

509 n = cutTuple['npix'] 

510 v = 0.5*cutTuple['var'] 

511 # book and fill 

512 cov = np.ndarray((len(muVals), np.max(i)+1, np.max(j)+1)) 

513 var = np.zeros_like(cov) 

514 cov[ind, i, j] = c 

515 var[ind, i, j] = v**2/n 

516 var[:, 0, 0] *= 2 # var(v) = 2*v**2/N 

517 

518 return cov, var, muVals 

519 

520 def measureMeanVarCov(self, im1Area, im2Area, imStatsCtrl, mu1, mu2): 

521 """Calculate the mean of each of two exposures and the variance 

522 and covariance of their difference. The variance is calculated 

523 via afwMath, and the covariance via the methods in Astier+19 

524 (appendix A). In theory, var = covariance[0,0]. This should 

525 be validated, and in the future, we may decide to just keep 

526 one (covariance). 

527 

528 Parameters 

529 ---------- 

530 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

531 Masked image from exposure 1. 

532 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

533 Masked image from exposure 2. 

534 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

535 Statistics control object. 

536 mu1: `float` 

537 Clipped mean of im1Area (ADU). 

538 mu2: `float` 

539 Clipped mean of im2Area (ADU). 

540 

541 Returns 

542 ------- 

543 mu : `float` or `NaN` 

544 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means 

545 of the regions in both exposures. If either mu1 or m2 are 

546 NaN's, the returned value is NaN. 

547 varDiff : `float` or `NaN` 

548 Half of the clipped variance of the difference of the 

549 regions inthe two input exposures. If either mu1 or m2 are 

550 NaN's, the returned value is NaN. 

551 covDiffAstier : `list` or `NaN` 

552 List with tuples of the form (dx, dy, var, cov, npix), where: 

553 dx : `int` 

554 Lag in x 

555 dy : `int` 

556 Lag in y 

557 var : `float` 

558 Variance at (dx, dy). 

559 cov : `float` 

560 Covariance at (dx, dy). 

561 nPix : `int` 

562 Number of pixel pairs used to evaluate var and cov. 

563 

564 If either mu1 or m2 are NaN's, the returned value is NaN. 

565 """ 

566 if np.isnan(mu1) or np.isnan(mu2): 

567 self.log.warning("Mean of amp in image 1 or 2 is NaN: %f, %f.", mu1, mu2) 

568 return np.nan, np.nan, None 

569 mu = 0.5*(mu1 + mu2) 

570 

571 # Take difference of pairs 

572 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2)) 

573 temp = im2Area.clone() 

574 temp *= mu1 

575 diffIm = im1Area.clone() 

576 diffIm *= mu2 

577 diffIm -= temp 

578 diffIm /= mu 

579 

580 # Variance calculation via afwMath 

581 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue()) 

582 

583 # Covariances calculations 

584 # Get the pixels that were not clipped 

585 varClip = afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue() 

586 meanClip = afwMath.makeStatistics(diffIm, afwMath.MEANCLIP, imStatsCtrl).getValue() 

587 cut = meanClip + self.config.nSigmaClipPtc*np.sqrt(varClip) 

588 unmasked = np.where(np.fabs(diffIm.image.array) <= cut, 1, 0) 

589 

590 # Get the pixels in the mask planes of the difference image 

591 # that were ignored by the clipping algorithm 

592 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0) 

593 # Combine the two sets of pixels ('1': use; '0': don't use) 

594 # into a final weight matrix to be used in the covariance 

595 # calculations below. 

596 w = unmasked*wDiff 

597 

598 if np.sum(w) < self.config.minNumberGoodPixelsForCovariance: 

599 self.log.warning("Number of good points for covariance calculation (%s) is less " 

600 "(than threshold %s)", np.sum(w), self.config.minNumberGoodPixelsForCovariance) 

601 return np.nan, np.nan, None 

602 

603 maxRangeCov = self.config.maximumRangeCovariancesAstier 

604 

605 # Calculate covariances via FFT. 

606 shapeDiff = np.array(diffIm.image.array.shape) 

607 # Calculate the sizes of FFT dimensions. 

608 s = shapeDiff + maxRangeCov 

609 tempSize = np.array(np.log(s)/np.log(2.)).astype(int) 

610 fftSize = np.array(2**(tempSize+1)).astype(int) 

611 fftShape = (fftSize[0], fftSize[1]) 

612 c = CovFastFourierTransform(diffIm.image.array, w, fftShape, maxRangeCov) 

613 # np.sum(w) is the same as npix[0][0] returned in covDiffAstier 

614 covDiffAstier = c.reportCovFastFourierTransform(maxRangeCov) 

615 

616 # Compare Cov[0,0] and afwMath.VARIANCECLIP covDiffAstier[0] 

617 # is the Cov[0,0] element, [3] is the variance, and there's a 

618 # factor of 0.5 difference with afwMath.VARIANCECLIP. 

619 thresholdPercentage = self.config.thresholdDiffAfwVarVsCov00 

620 fractionalDiff = 100*np.fabs(1 - varDiff/(covDiffAstier[0][3]*0.5)) 

621 if fractionalDiff >= thresholdPercentage: 

622 self.log.warning("Absolute fractional difference between afwMatch.VARIANCECLIP and Cov[0,0] " 

623 "is more than %f%%: %f", thresholdPercentage, fractionalDiff) 

624 

625 return mu, varDiff, covDiffAstier 

626 

627 def getImageAreasMasksStats(self, exposure1, exposure2, region=None): 

628 """Get image areas in a region as well as masks and statistic objects. 

629 

630 Parameters 

631 ---------- 

632 exposure1 : `lsst.afw.image.exposure.ExposureF` 

633 First exposure of flat field pair. 

634 exposure2 : `lsst.afw.image.exposure.ExposureF` 

635 Second exposure of flat field pair. 

636 region : `lsst.geom.Box2I`, optional 

637 Region of each exposure where to perform the calculations 

638 (e.g, an amplifier). 

639 

640 Returns 

641 ------- 

642 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

643 Masked image from exposure 1. 

644 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

645 Masked image from exposure 2. 

646 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

647 Statistics control object. 

648 mu1: `float` 

649 Clipped mean of im1Area (ADU). 

650 mu2: `float` 

651 Clipped mean of im2Area (ADU). 

652 """ 

653 if region is not None: 

654 im1Area = exposure1.maskedImage[region] 

655 im2Area = exposure2.maskedImage[region] 

656 else: 

657 im1Area = exposure1.maskedImage 

658 im2Area = exposure2.maskedImage 

659 

660 if self.config.binSize > 1: 

661 im1Area = afwMath.binImage(im1Area, self.config.binSize) 

662 im2Area = afwMath.binImage(im2Area, self.config.binSize) 

663 

664 # Get mask planes and construct statistics control object from one 

665 # of the exposures 

666 imMaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList) 

667 imStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc, 

668 self.config.nIterSigmaClipPtc, 

669 imMaskVal) 

670 imStatsCtrl.setNanSafe(True) 

671 imStatsCtrl.setAndMask(imMaskVal) 

672 

673 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, imStatsCtrl).getValue() 

674 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, imStatsCtrl).getValue() 

675 

676 return (im1Area, im2Area, imStatsCtrl, mu1, mu2) 

677 

678 def getGainFromFlatPair(self, im1Area, im2Area, imStatsCtrl, mu1, mu2, 

679 correctionType='NONE', readNoise=None): 

680 """Estimate the gain from a single pair of flats. 

681 

682 The basic premise is 1/g = <(I1 - I2)^2/(I1 + I2)> = 1/const, 

683 where I1 and I2 correspond to flats 1 and 2, respectively. 

684 Corrections for the variable QE and the read-noise are then 

685 made following the derivation in Robert Lupton's forthcoming 

686 book, which gets 

687 

688 1/g = <(I1 - I2)^2/(I1 + I2)> - 1/mu(sigma^2 - 1/2g^2). 

689 

690 This is a quadratic equation, whose solutions are given by: 

691 

692 g = mu +/- sqrt(2*sigma^2 - 2*const*mu + mu^2)/(2*const*mu*2 

693 - 2*sigma^2) 

694 

695 where 'mu' is the average signal level and 'sigma' is the 

696 amplifier's readnoise. The positive solution will be used. 

697 The way the correction is applied depends on the value 

698 supplied for correctionType. 

699 

700 correctionType is one of ['NONE', 'SIMPLE' or 'FULL'] 

701 'NONE' : uses the 1/g = <(I1 - I2)^2/(I1 + I2)> formula. 

702 'SIMPLE' : uses the gain from the 'NONE' method for the 

703 1/2g^2 term. 

704 'FULL' : solves the full equation for g, discarding the 

705 non-physical solution to the resulting quadratic. 

706 

707 Parameters 

708 ---------- 

709 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

710 Masked image from exposure 1. 

711 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

712 Masked image from exposure 2. 

713 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

714 Statistics control object. 

715 mu1: `float` 

716 Clipped mean of im1Area (ADU). 

717 mu2: `float` 

718 Clipped mean of im2Area (ADU). 

719 correctionType : `str`, optional 

720 The correction applied, one of ['NONE', 'SIMPLE', 'FULL'] 

721 readNoise : `float`, optional 

722 Amplifier readout noise (ADU). 

723 

724 Returns 

725 ------- 

726 gain : `float` 

727 Gain, in e/ADU. 

728 

729 Raises 

730 ------ 

731 RuntimeError: if `correctionType` is not one of 'NONE', 

732 'SIMPLE', or 'FULL'. 

733 """ 

734 if correctionType not in ['NONE', 'SIMPLE', 'FULL']: 

735 raise RuntimeError("Unknown correction type: %s" % correctionType) 

736 

737 if correctionType != 'NONE' and readNoise is None: 

738 self.log.warning("'correctionType' in 'getGainFromFlatPair' is %s, " 

739 "but 'readNoise' is 'None'. Setting 'correctionType' " 

740 "to 'NONE', so a gain value will be estimated without " 

741 "corrections." % correctionType) 

742 correctionType = 'NONE' 

743 

744 mu = 0.5*(mu1 + mu2) 

745 

746 # ratioIm = (I1 - I2)^2 / (I1 + I2) 

747 temp = im2Area.clone() 

748 ratioIm = im1Area.clone() 

749 ratioIm -= temp 

750 ratioIm *= ratioIm 

751 

752 # Sum of pairs 

753 sumIm = im1Area.clone() 

754 sumIm += temp 

755 

756 ratioIm /= sumIm 

757 

758 const = afwMath.makeStatistics(ratioIm, afwMath.MEAN, imStatsCtrl).getValue() 

759 gain = 1. / const 

760 

761 if correctionType == 'SIMPLE': 

762 gain = 1/(const - (1/mu)*(readNoise**2 - (1/2*gain**2))) 

763 elif correctionType == 'FULL': 

764 root = np.sqrt(mu**2 - 2*mu*const + 2*readNoise**2) 

765 denom = (2*const*mu - 2*readNoise**2) 

766 positiveSolution = (root + mu)/denom 

767 gain = positiveSolution 

768 

769 return gain 

770 

771 def getReadNoiseFromMetadata(self, taskMetadata, ampName): 

772 """Gets readout noise for an amp from ISR metadata. 

773 

774 Parameters 

775 ---------- 

776 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`] 

777 List of exposures metadata from ISR. 

778 ampName : `str` 

779 Amplifier name. 

780 

781 Returns 

782 ------- 

783 readNoise : `float` 

784 Median of the overscan readnoise in the 

785 post-ISR metadata of the input exposures (ADU). 

786 Returns 'None' if the median could not be calculated. 

787 """ 

788 # Empirical readout noise [ADU] measured from an 

789 # overscan-subtracted overscan during ISR. 

790 expectedKey = f"RESIDUAL STDEV {ampName}" 

791 

792 readNoises = [] 

793 for expMetadata in taskMetadata: 

794 if 'isr' in expMetadata: 

795 overscanNoise = expMetadata['isr'][expectedKey] 

796 else: 

797 continue 

798 readNoises.append(overscanNoise) 

799 

800 if len(readNoises): 

801 readNoise = np.median(np.array(readNoises)) 

802 else: 

803 self.log.warning("Median readout noise from ISR metadata for amp %s " 

804 "could not be calculated." % ampName) 

805 readNoise = None 

806 

807 return readNoise