Coverage for python/lsst/cp/pipe/ptc/cpExtractPtcTask.py: 13%

Shortcuts on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

236 statements  

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22import numpy as np 

23 

24import lsst.afw.math as afwMath 

25import lsst.pex.config as pexConfig 

26import lsst.pipe.base as pipeBase 

27from lsst.cp.pipe.utils import (arrangeFlatsByExpTime, arrangeFlatsByExpId, 

28 sigmaClipCorrection, CovFastFourierTransform) 

29 

30import lsst.pipe.base.connectionTypes as cT 

31 

32from lsst.ip.isr import PhotonTransferCurveDataset 

33from lsst.ip.isr import IsrTask 

34 

35__all__ = ['PhotonTransferCurveExtractConfig', 'PhotonTransferCurveExtractTask'] 

36 

37 

38class PhotonTransferCurveExtractConnections(pipeBase.PipelineTaskConnections, 

39 dimensions=("instrument", "detector")): 

40 

41 inputExp = cT.Input( 

42 name="ptcInputExposurePairs", 

43 doc="Input post-ISR processed exposure pairs (flats) to" 

44 "measure covariances from.", 

45 storageClass="Exposure", 

46 dimensions=("instrument", "exposure", "detector"), 

47 multiple=True, 

48 deferLoad=True, 

49 ) 

50 taskMetadata = cT.Input( 

51 name="isrTask_metadata", 

52 doc="Input task metadata to extract statistics from.", 

53 storageClass="TaskMetadata", 

54 dimensions=("instrument", "exposure", "detector"), 

55 multiple=True, 

56 ) 

57 outputCovariances = cT.Output( 

58 name="ptcCovariances", 

59 doc="Extracted flat (co)variances.", 

60 storageClass="PhotonTransferCurveDataset", 

61 dimensions=("instrument", "exposure", "detector"), 

62 multiple=True, 

63 ) 

64 

65 

66class PhotonTransferCurveExtractConfig(pipeBase.PipelineTaskConfig, 

67 pipelineConnections=PhotonTransferCurveExtractConnections): 

68 """Configuration for the measurement of covariances from flats. 

69 """ 

70 

71 matchByExposureId = pexConfig.Field( 

72 dtype=bool, 

73 doc="Should exposures be matched by ID rather than exposure time?", 

74 default=False, 

75 ) 

76 maximumRangeCovariancesAstier = pexConfig.Field( 

77 dtype=int, 

78 doc="Maximum range of covariances as in Astier+19", 

79 default=8, 

80 ) 

81 binSize = pexConfig.Field( 

82 dtype=int, 

83 doc="Bin the image by this factor in both dimensions.", 

84 default=1, 

85 ) 

86 minMeanSignal = pexConfig.DictField( 

87 keytype=str, 

88 itemtype=float, 

89 doc="Minimum values (inclusive) of mean signal (in ADU) per amp to use." 

90 " The same cut is applied to all amps if this parameter [`dict`] is passed as " 

91 " {'ALL_AMPS': value}", 

92 default={'ALL_AMPS': 0.0}, 

93 ) 

94 maxMeanSignal = pexConfig.DictField( 

95 keytype=str, 

96 itemtype=float, 

97 doc="Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp." 

98 " The same cut is applied to all amps if this dictionary is of the form" 

99 " {'ALL_AMPS': value}", 

100 default={'ALL_AMPS': 1e6}, 

101 ) 

102 maskNameList = pexConfig.ListField( 

103 dtype=str, 

104 doc="Mask list to exclude from statistics calculations.", 

105 default=['SUSPECT', 'BAD', 'NO_DATA', 'SAT'], 

106 ) 

107 nSigmaClipPtc = pexConfig.Field( 

108 dtype=float, 

109 doc="Sigma cut for afwMath.StatisticsControl()", 

110 default=5.5, 

111 ) 

112 nIterSigmaClipPtc = pexConfig.Field( 

113 dtype=int, 

114 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()", 

115 default=3, 

116 ) 

117 minNumberGoodPixelsForCovariance = pexConfig.Field( 

118 dtype=int, 

119 doc="Minimum number of acceptable good pixels per amp to calculate the covariances (via FFT or" 

120 " direclty).", 

121 default=10000, 

122 ) 

123 thresholdDiffAfwVarVsCov00 = pexConfig.Field( 

124 dtype=float, 

125 doc="If the absolute fractional differece between afwMath.VARIANCECLIP and Cov00 " 

126 "for a region of a difference image is greater than this threshold (percentage), " 

127 "a warning will be issued.", 

128 default=1., 

129 ) 

130 detectorMeasurementRegion = pexConfig.ChoiceField( 

131 dtype=str, 

132 doc="Region of each exposure where to perform the calculations (amplifier or full image).", 

133 default='AMP', 

134 allowed={ 

135 "AMP": "Amplifier of the detector.", 

136 "FULL": "Full image." 

137 } 

138 ) 

139 numEdgeSuspect = pexConfig.Field( 

140 dtype=int, 

141 doc="Number of edge pixels to be flagged as untrustworthy.", 

142 default=0, 

143 ) 

144 edgeMaskLevel = pexConfig.ChoiceField( 

145 dtype=str, 

146 doc="Mask edge pixels in which coordinate frame: DETECTOR or AMP?", 

147 default="DETECTOR", 

148 allowed={ 

149 'DETECTOR': 'Mask only the edges of the full detector.', 

150 'AMP': 'Mask edges of each amplifier.', 

151 }, 

152 ) 

153 doGain = pexConfig.Field( 

154 dtype=bool, 

155 doc="Calculate a gain per input flat pair.", 

156 default=True, 

157 ) 

158 gainCorrectionType = pexConfig.ChoiceField( 

159 dtype=str, 

160 doc="Correction type for the gain.", 

161 default='FULL', 

162 allowed={ 

163 'NONE': 'No correction.', 

164 'SIMPLE': 'First order correction.', 

165 'FULL': 'Second order correction.' 

166 } 

167 ) 

168 

169 

170class PhotonTransferCurveExtractTask(pipeBase.PipelineTask, 

171 pipeBase.CmdLineTask): 

172 """Task to measure covariances from flat fields. 

173 

174 This task receives as input a list of flat-field images 

175 (flats), and sorts these flats in pairs taken at the 

176 same time (the task will raise if there is one one flat 

177 at a given exposure time, and it will discard extra flats if 

178 there are more than two per exposure time). This task measures 

179 the mean, variance, and covariances from a region (e.g., 

180 an amplifier) of the difference image of the two flats with 

181 the same exposure time. 

182 

183 The variance is calculated via afwMath, and the covariance 

184 via the methods in Astier+19 (appendix A). In theory, 

185 var = covariance[0,0]. This should be validated, and in the 

186 future, we may decide to just keep one (covariance). 

187 At this moment, if the two values differ by more than the value 

188 of `thresholdDiffAfwVarVsCov00` (default: 1%), a warning will 

189 be issued. 

190 

191 The measured covariances at a given exposure time (along with 

192 other quantities such as the mean) are stored in a PTC dataset 

193 object (`~lsst.ip.isr.PhotonTransferCurveDataset`), which gets 

194 partially filled at this stage (the remainder of the attributes 

195 of the dataset will be filled after running the second task of 

196 the PTC-measurement pipeline, `~PhotonTransferCurveSolveTask`). 

197 

198 The number of partially-filled 

199 `~lsst.ip.isr.PhotonTransferCurveDataset` objects will be less 

200 than the number of input exposures because the task combines 

201 input flats in pairs. However, it is required at this moment 

202 that the number of input dimensions matches 

203 bijectively the number of output dimensions. Therefore, a number 

204 of "dummy" PTC datasets are inserted in the output list. This 

205 output list will then be used as input of the next task in the 

206 PTC-measurement pipeline, `PhotonTransferCurveSolveTask`, 

207 which will assemble the multiple `PhotonTransferCurveDataset` 

208 objects into a single one in order to fit the measured covariances 

209 as a function of flux to one of three models 

210 (see `PhotonTransferCurveSolveTask` for details). 

211 

212 Reference: Astier+19: "The Shape of the Photon Transfer Curve of CCD 

213 sensors", arXiv:1905.08677. 

214 """ 

215 

216 ConfigClass = PhotonTransferCurveExtractConfig 

217 _DefaultName = 'cpPtcExtract' 

218 

219 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

220 """Ensure that the input and output dimensions are passed along. 

221 

222 Parameters 

223 ---------- 

224 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext` 

225 Butler to operate on. 

226 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection` 

227 Input data refs to load. 

228 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection` 

229 Output data refs to persist. 

230 """ 

231 inputs = butlerQC.get(inputRefs) 

232 # Ids of input list of exposure references 

233 # (deferLoad=True in the input connections) 

234 inputs['inputDims'] = [expRef.datasetRef.dataId['exposure'] for expRef in inputRefs.inputExp] 

235 

236 # Dictionary, keyed by expTime, with tuples containing flat 

237 # exposures and their IDs. 

238 if self.config.matchByExposureId: 

239 inputs['inputExp'] = arrangeFlatsByExpId(inputs['inputExp'], inputs['inputDims']) 

240 else: 

241 inputs['inputExp'] = arrangeFlatsByExpTime(inputs['inputExp'], inputs['inputDims']) 

242 

243 outputs = self.run(**inputs) 

244 butlerQC.put(outputs, outputRefs) 

245 

246 def run(self, inputExp, inputDims, taskMetadata): 

247 """Measure covariances from difference of flat pairs 

248 

249 Parameters 

250 ---------- 

251 inputExp : `dict` [`float`, `list` 

252 [`~lsst.pipe.base.connections.DeferredDatasetRef`]] 

253 Dictionary that groups references to flat-field exposures that 

254 have the same exposure time (seconds), or that groups them 

255 sequentially by their exposure id. 

256 inputDims : `list` 

257 List of exposure IDs. 

258 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`] 

259 List of exposures metadata from ISR. 

260 

261 Returns 

262 ------- 

263 results : `lsst.pipe.base.Struct` 

264 The resulting Struct contains: 

265 ``outputCovariances`` 

266 A list containing the per-pair PTC measurements (`list` 

267 [`lsst.ip.isr.PhotonTransferCurveDataset`]) 

268 """ 

269 # inputExp.values() returns a view, which we turn into a list. We then 

270 # access the first exposure-ID tuple to get the detector. 

271 # The first "get()" retrieves the exposure from the exposure reference. 

272 detector = list(inputExp.values())[0][0][0].get(component='detector') 

273 detNum = detector.getId() 

274 amps = detector.getAmplifiers() 

275 ampNames = [amp.getName() for amp in amps] 

276 

277 # Each amp may have a different min and max ADU signal 

278 # specified in the config. 

279 maxMeanSignalDict = {ampName: 1e6 for ampName in ampNames} 

280 minMeanSignalDict = {ampName: 0.0 for ampName in ampNames} 

281 for ampName in ampNames: 

282 if 'ALL_AMPS' in self.config.maxMeanSignal: 

283 maxMeanSignalDict[ampName] = self.config.maxMeanSignal['ALL_AMPS'] 

284 elif ampName in self.config.maxMeanSignal: 

285 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[ampName] 

286 

287 if 'ALL_AMPS' in self.config.minMeanSignal: 

288 minMeanSignalDict[ampName] = self.config.minMeanSignal['ALL_AMPS'] 

289 elif ampName in self.config.minMeanSignal: 

290 minMeanSignalDict[ampName] = self.config.minMeanSignal[ampName] 

291 # These are the column names for `tupleRows` below. 

292 tags = [('mu', '<f8'), ('afwVar', '<f8'), ('i', '<i8'), ('j', '<i8'), ('var', '<f8'), 

293 ('cov', '<f8'), ('npix', '<i8'), ('ext', '<i8'), ('expTime', '<f8'), ('ampName', '<U3')] 

294 # Create a dummy ptcDataset. Dummy datasets will be 

295 # used to ensure that the number of output and input 

296 # dimensions match. 

297 dummyPtcDataset = PhotonTransferCurveDataset(ampNames, 'DUMMY', 

298 self.config.maximumRangeCovariancesAstier) 

299 

300 readNoiseDict = {ampName: 0.0 for ampName in ampNames} 

301 for ampName in ampNames: 

302 # Initialize amps of `dummyPtcDatset`. 

303 dummyPtcDataset.setAmpValues(ampName) 

304 # Overscan readnoise from post-ISR exposure metadata. 

305 # It will be used to estimate the gain from a pair of flats. 

306 readNoiseDict[ampName] = self.getReadNoiseFromMetadata(taskMetadata, ampName) 

307 

308 # Output list with PTC datasets. 

309 partialPtcDatasetList = [] 

310 # The number of output references needs to match that of input 

311 # references: initialize outputlist with dummy PTC datasets. 

312 for i in range(len(inputDims)): 

313 partialPtcDatasetList.append(dummyPtcDataset) 

314 

315 if self.config.numEdgeSuspect > 0: 

316 isrTask = IsrTask() 

317 self.log.info("Masking %d pixels from the edges of all exposures as SUSPECT.", 

318 self.config.numEdgeSuspect) 

319 

320 for expTime in inputExp: 

321 exposures = inputExp[expTime] 

322 if len(exposures) == 1: 

323 self.log.warning("Only one exposure found at expTime %f. Dropping exposure %d.", 

324 expTime, exposures[0][1]) 

325 continue 

326 else: 

327 # Only use the first two exposures at expTime. Each 

328 # element is a tuple (exposure, expId) 

329 expRef1, expId1 = exposures[0] 

330 expRef2, expId2 = exposures[1] 

331 # use get() to obtain `lsst.afw.image.Exposure` 

332 exp1, exp2 = expRef1.get(), expRef2.get() 

333 

334 if len(exposures) > 2: 

335 self.log.warning("Already found 2 exposures at expTime %f. Ignoring exposures: %s", 

336 expTime, ", ".join(str(i[1]) for i in exposures[2:])) 

337 # Mask pixels at the edge of the detector or of each amp 

338 if self.config.numEdgeSuspect > 0: 

339 isrTask.maskEdges(exp1, numEdgePixels=self.config.numEdgeSuspect, 

340 maskPlane="SUSPECT", level=self.config.edgeMaskLevel) 

341 isrTask.maskEdges(exp2, numEdgePixels=self.config.numEdgeSuspect, 

342 maskPlane="SUSPECT", level=self.config.edgeMaskLevel) 

343 

344 nAmpsNan = 0 

345 partialPtcDataset = PhotonTransferCurveDataset(ampNames, 'PARTIAL', 

346 self.config.maximumRangeCovariancesAstier) 

347 for ampNumber, amp in enumerate(detector): 

348 ampName = amp.getName() 

349 # covAstier: [(i, j, var (cov[0,0]), cov, npix) for 

350 # (i,j) in {maxLag, maxLag}^2] 

351 if self.config.detectorMeasurementRegion == 'AMP': 

352 region = amp.getBBox() 

353 elif self.config.detectorMeasurementRegion == 'FULL': 

354 region = None 

355 # `measureMeanVarCov` is the function that measures 

356 # the variance and covariances from a region of 

357 # the difference image of two flats at the same 

358 # exposure time. The variable `covAstier` that is 

359 # returned is of the form: 

360 # [(i, j, var (cov[0,0]), cov, npix) for (i,j) in 

361 # {maxLag, maxLag}^2]. 

362 muDiff, varDiff, covAstier = self.measureMeanVarCov(exp1, exp2, region=region) 

363 

364 # Estimate the gain from the flat pair 

365 if self.config.doGain: 

366 gain = self.getGainFromFlatPair(exp1, exp2, 

367 correctionType=self.config.gainCorrectionType, 

368 readNoise=readNoiseDict[ampName], region=region) 

369 else: 

370 gain = np.nan 

371 # Correction factor for bias introduced by sigma 

372 # clipping. 

373 # Function returns 1/sqrt(varFactor), so it needs 

374 # to be squared. varDiff is calculated via 

375 # afwMath.VARIANCECLIP. 

376 varFactor = sigmaClipCorrection(self.config.nSigmaClipPtc)**2 

377 varDiff *= varFactor 

378 

379 expIdMask = True 

380 # Mask data point at this mean signal level if 

381 # the signal, variance, or covariance calculations 

382 # from `measureMeanVarCov` resulted in NaNs. 

383 if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None): 

384 self.log.warning("NaN mean or var, or None cov in amp %s in exposure pair %d, %d of " 

385 "detector %d.", ampName, expId1, expId2, detNum) 

386 nAmpsNan += 1 

387 expIdMask = False 

388 covArray = np.full((1, self.config.maximumRangeCovariancesAstier, 

389 self.config.maximumRangeCovariancesAstier), np.nan) 

390 covSqrtWeights = np.full_like(covArray, np.nan) 

391 

392 # Mask data point if it is outside of the 

393 # specified mean signal range. 

394 if (muDiff <= minMeanSignalDict[ampName]) or (muDiff >= maxMeanSignalDict[ampName]): 

395 expIdMask = False 

396 

397 if covAstier is not None: 

398 # Turn the tuples with the measured information 

399 # into covariance arrays. 

400 tupleRows = [(muDiff, varDiff) + covRow + (ampNumber, expTime, 

401 ampName) for covRow in covAstier] 

402 tempStructArray = np.array(tupleRows, dtype=tags) 

403 covArray, vcov, _ = self.makeCovArray(tempStructArray, 

404 self.config.maximumRangeCovariancesAstier) 

405 covSqrtWeights = np.nan_to_num(1./np.sqrt(vcov)) 

406 

407 # Correct covArray for sigma clipping: 

408 # 1) Apply varFactor twice for the whole covariance matrix 

409 covArray *= varFactor**2 

410 # 2) But, only once for the variance element of the 

411 # matrix, covArray[0,0] (so divide one factor out). 

412 covArray[0, 0] /= varFactor 

413 

414 partialPtcDataset.setAmpValues(ampName, rawExpTime=[expTime], rawMean=[muDiff], 

415 rawVar=[varDiff], inputExpIdPair=[(expId1, expId2)], 

416 expIdMask=[expIdMask], covArray=covArray, 

417 covSqrtWeights=covSqrtWeights, gain=gain, 

418 noise=readNoiseDict[ampName]) 

419 # Use location of exp1 to save PTC dataset from (exp1, exp2) pair. 

420 # Below, np.where(expId1 == np.array(inputDims)) returns a tuple 

421 # with a single-element array, so [0][0] 

422 # is necessary to extract the required index. 

423 datasetIndex = np.where(expId1 == np.array(inputDims))[0][0] 

424 # `partialPtcDatasetList` is a list of 

425 # `PhotonTransferCurveDataset` objects. Some of them 

426 # will be dummy datasets (to match length of input 

427 # and output references), and the rest will have 

428 # datasets with the mean signal, variance, and 

429 # covariance measurements at a given exposure 

430 # time. The next ppart of the PTC-measurement 

431 # pipeline, `solve`, will take this list as input, 

432 # and assemble the measurements in the datasets 

433 # in an addecuate manner for fitting a PTC 

434 # model. 

435 partialPtcDatasetList[datasetIndex] = partialPtcDataset 

436 

437 if nAmpsNan == len(ampNames): 

438 msg = f"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}." 

439 self.log.warning(msg) 

440 return pipeBase.Struct( 

441 outputCovariances=partialPtcDatasetList, 

442 ) 

443 

444 def makeCovArray(self, inputTuple, maxRangeFromTuple): 

445 """Make covariances array from tuple. 

446 

447 Parameters 

448 ---------- 

449 inputTuple : `numpy.ndarray` 

450 Structured array with rows with at least 

451 (mu, afwVar, cov, var, i, j, npix), where: 

452 mu : `float` 

453 0.5*(m1 + m2), where mu1 is the mean value of flat1 

454 and mu2 is the mean value of flat2. 

455 afwVar : `float` 

456 Variance of difference flat, calculated with afw. 

457 cov : `float` 

458 Covariance value at lag(i, j) 

459 var : `float` 

460 Variance(covariance value at lag(0, 0)) 

461 i : `int` 

462 Lag in dimension "x". 

463 j : `int` 

464 Lag in dimension "y". 

465 npix : `int` 

466 Number of pixels used for covariance calculation. 

467 maxRangeFromTuple : `int` 

468 Maximum range to select from tuple. 

469 

470 Returns 

471 ------- 

472 cov : `numpy.array` 

473 Covariance arrays, indexed by mean signal mu. 

474 vCov : `numpy.array` 

475 Variance arrays, indexed by mean signal mu. 

476 muVals : `numpy.array` 

477 List of mean signal values. 

478 """ 

479 if maxRangeFromTuple is not None: 

480 cut = (inputTuple['i'] < maxRangeFromTuple) & (inputTuple['j'] < maxRangeFromTuple) 

481 cutTuple = inputTuple[cut] 

482 else: 

483 cutTuple = inputTuple 

484 # increasing mu order, so that we can group measurements with the 

485 # same mu 

486 muTemp = cutTuple['mu'] 

487 ind = np.argsort(muTemp) 

488 

489 cutTuple = cutTuple[ind] 

490 # should group measurements on the same image pairs(same average) 

491 mu = cutTuple['mu'] 

492 xx = np.hstack(([mu[0]], mu)) 

493 delta = xx[1:] - xx[:-1] 

494 steps, = np.where(delta > 0) 

495 ind = np.zeros_like(mu, dtype=int) 

496 ind[steps] = 1 

497 ind = np.cumsum(ind) # this acts as an image pair index. 

498 # now fill the 3-d cov array(and variance) 

499 muVals = np.array(np.unique(mu)) 

500 i = cutTuple['i'].astype(int) 

501 j = cutTuple['j'].astype(int) 

502 c = 0.5*cutTuple['cov'] 

503 n = cutTuple['npix'] 

504 v = 0.5*cutTuple['var'] 

505 # book and fill 

506 cov = np.ndarray((len(muVals), np.max(i)+1, np.max(j)+1)) 

507 var = np.zeros_like(cov) 

508 cov[ind, i, j] = c 

509 var[ind, i, j] = v**2/n 

510 var[:, 0, 0] *= 2 # var(v) = 2*v**2/N 

511 

512 return cov, var, muVals 

513 

514 def measureMeanVarCov(self, exposure1, exposure2, region=None): 

515 """Calculate the mean of each of two exposures and the variance 

516 and covariance of their difference. The variance is calculated 

517 via afwMath, and the covariance via the methods in Astier+19 

518 (appendix A). In theory, var = covariance[0,0]. This should 

519 be validated, and in the future, we may decide to just keep 

520 one (covariance). 

521 

522 Parameters 

523 ---------- 

524 exposure1 : `lsst.afw.image.exposure.ExposureF` 

525 First exposure of flat field pair. 

526 exposure2 : `lsst.afw.image.exposure.ExposureF` 

527 Second exposure of flat field pair. 

528 region : `lsst.geom.Box2I`, optional 

529 Region of each exposure where to perform the calculations 

530 (e.g, an amplifier). 

531 

532 Returns 

533 ------- 

534 mu : `float` or `NaN` 

535 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means 

536 of the regions in both exposures. If either mu1 or m2 are 

537 NaN's, the returned value is NaN. 

538 varDiff : `float` or `NaN` 

539 Half of the clipped variance of the difference of the 

540 regions inthe two input exposures. If either mu1 or m2 are 

541 NaN's, the returned value is NaN. 

542 covDiffAstier : `list` or `NaN` 

543 List with tuples of the form (dx, dy, var, cov, npix), where: 

544 dx : `int` 

545 Lag in x 

546 dy : `int` 

547 Lag in y 

548 var : `float` 

549 Variance at (dx, dy). 

550 cov : `float` 

551 Covariance at (dx, dy). 

552 nPix : `int` 

553 Number of pixel pairs used to evaluate var and cov. 

554 

555 If either mu1 or m2 are NaN's, the returned value is NaN. 

556 """ 

557 if region is not None: 

558 im1Area = exposure1.maskedImage[region] 

559 im2Area = exposure2.maskedImage[region] 

560 else: 

561 im1Area = exposure1.maskedImage 

562 im2Area = exposure2.maskedImage 

563 

564 if self.config.binSize > 1: 

565 im1Area = afwMath.binImage(im1Area, self.config.binSize) 

566 im2Area = afwMath.binImage(im2Area, self.config.binSize) 

567 

568 im1MaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList) 

569 im1StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc, 

570 self.config.nIterSigmaClipPtc, 

571 im1MaskVal) 

572 im1StatsCtrl.setNanSafe(True) 

573 im1StatsCtrl.setAndMask(im1MaskVal) 

574 

575 im2MaskVal = exposure2.getMask().getPlaneBitMask(self.config.maskNameList) 

576 im2StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc, 

577 self.config.nIterSigmaClipPtc, 

578 im2MaskVal) 

579 im2StatsCtrl.setNanSafe(True) 

580 im2StatsCtrl.setAndMask(im2MaskVal) 

581 

582 # Clipped mean of images; then average of mean. 

583 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, im1StatsCtrl).getValue() 

584 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, im2StatsCtrl).getValue() 

585 if np.isnan(mu1) or np.isnan(mu2): 

586 self.log.warning("Mean of amp in image 1 or 2 is NaN: %f, %f.", mu1, mu2) 

587 return np.nan, np.nan, None 

588 mu = 0.5*(mu1 + mu2) 

589 

590 # Take difference of pairs 

591 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2)) 

592 temp = im2Area.clone() 

593 temp *= mu1 

594 diffIm = im1Area.clone() 

595 diffIm *= mu2 

596 diffIm -= temp 

597 diffIm /= mu 

598 

599 diffImMaskVal = diffIm.getMask().getPlaneBitMask(self.config.maskNameList) 

600 diffImStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc, 

601 self.config.nIterSigmaClipPtc, 

602 diffImMaskVal) 

603 diffImStatsCtrl.setNanSafe(True) 

604 diffImStatsCtrl.setAndMask(diffImMaskVal) 

605 

606 # Variance calculation via afwMath 

607 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, diffImStatsCtrl).getValue()) 

608 

609 # Covariances calculations 

610 # Get the pixels that were not clipped 

611 varClip = afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, diffImStatsCtrl).getValue() 

612 meanClip = afwMath.makeStatistics(diffIm, afwMath.MEANCLIP, diffImStatsCtrl).getValue() 

613 cut = meanClip + self.config.nSigmaClipPtc*np.sqrt(varClip) 

614 unmasked = np.where(np.fabs(diffIm.image.array) <= cut, 1, 0) 

615 

616 # Get the pixels in the mask planes of the difference image 

617 # that were ignored by the clipping algorithm 

618 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0) 

619 # Combine the two sets of pixels ('1': use; '0': don't use) 

620 # into a final weight matrix to be used in the covariance 

621 # calculations below. 

622 w = unmasked*wDiff 

623 

624 if np.sum(w) < self.config.minNumberGoodPixelsForCovariance: 

625 self.log.warning("Number of good points for covariance calculation (%s) is less " 

626 "(than threshold %s)", np.sum(w), self.config.minNumberGoodPixelsForCovariance) 

627 return np.nan, np.nan, None 

628 

629 maxRangeCov = self.config.maximumRangeCovariancesAstier 

630 

631 # Calculate covariances via FFT. 

632 shapeDiff = np.array(diffIm.image.array.shape) 

633 # Calculate the sizes of FFT dimensions. 

634 s = shapeDiff + maxRangeCov 

635 tempSize = np.array(np.log(s)/np.log(2.)).astype(int) 

636 fftSize = np.array(2**(tempSize+1)).astype(int) 

637 fftShape = (fftSize[0], fftSize[1]) 

638 

639 c = CovFastFourierTransform(diffIm.image.array, w, fftShape, maxRangeCov) 

640 covDiffAstier = c.reportCovFastFourierTransform(maxRangeCov) 

641 

642 # Compare Cov[0,0] and afwMath.VARIANCECLIP covDiffAstier[0] 

643 # is the Cov[0,0] element, [3] is the variance, and there's a 

644 # factor of 0.5 difference with afwMath.VARIANCECLIP. 

645 thresholdPercentage = self.config.thresholdDiffAfwVarVsCov00 

646 fractionalDiff = 100*np.fabs(1 - varDiff/(covDiffAstier[0][3]*0.5)) 

647 if fractionalDiff >= thresholdPercentage: 

648 self.log.warning("Absolute fractional difference between afwMatch.VARIANCECLIP and Cov[0,0] " 

649 "is more than %f%%: %f", thresholdPercentage, fractionalDiff) 

650 

651 return mu, varDiff, covDiffAstier 

652 

653 def getGainFromFlatPair(self, exposure1, exposure2, correctionType='NONE', 

654 readNoise=None, region=None): 

655 """Estimate the gain from a single pair of flats. 

656 

657 The basic premise is 1/g = <(I1 - I2)^2/(I1 + I2)> = 1/const, 

658 where I1 and I2 correspond to flats 1 and 2, respectively. 

659 Corrections for the variable QE and the read-noise are then 

660 made following the derivation in Robert Lupton's forthcoming 

661 book, which gets 

662 

663 1/g = <(I1 - I2)^2/(I1 + I2)> - 1/mu(sigma^2 - 1/2g^2). 

664 

665 This is a quadratic equation, whose solutions are given by: 

666 

667 g = mu +/- sqrt(2*sigma^2 - 2*const*mu + mu^2)/(2*const*mu*2 

668 - 2*sigma^2) 

669 

670 where 'mu' is the average signal level and 'sigma' is the 

671 amplifier's readnoise. The positive solution will be used. 

672 The way the correction is applied depends on the value 

673 supplied for correctionType. 

674 

675 correctionType is one of ['NONE', 'SIMPLE' or 'FULL'] 

676 'NONE' : uses the 1/g = <(I1 - I2)^2/(I1 + I2)> formula. 

677 'SIMPLE' : uses the gain from the 'NONE' method for the 

678 1/2g^2 term. 

679 'FULL' : solves the full equation for g, discarding the 

680 non-physical solution to the resulting quadratic. 

681 

682 Parameters 

683 ---------- 

684 exposure1 : `lsst.afw.image.exposure.ExposureF` 

685 First exposure of flat field pair. 

686 exposure2 : `lsst.afw.image.exposure.ExposureF` 

687 Second exposure of flat field pair. 

688 correctionType : `str`, optional 

689 The correction applied, one of ['NONE', 'SIMPLE', 'FULL'] 

690 readNoise : `float`, optional 

691 Amplifier readout noise (ADU). 

692 region : `lsst.geom.Box2I`, optional 

693 Region of each exposure where to perform the calculations 

694 (e.g, an amplifier). 

695 

696 Returns 

697 ------- 

698 gain : `float` 

699 Gain, in e/ADU. 

700 

701 Raises 

702 ------ 

703 RuntimeError: if `correctionType` is not one of 'NONE', 

704 'SIMPLE', or 'FULL'. 

705 RuntimeError: if a readout noise value is not provided 

706 when `correctionType` is different from 'NONE'. 

707 """ 

708 if correctionType not in ['NONE', 'SIMPLE', 'FULL']: 

709 raise RuntimeError("Unknown correction type: %s" % correctionType) 

710 

711 if correctionType != 'NONE' and readNoise is None: 

712 self.log.warning("'correctionType' in 'getGainFromFlatPair' is %s, " 

713 "but 'readNoise' is 'None'. Setting 'correctionType' " 

714 "to 'NONE', so a gain value will be estimated without " 

715 "corrections." % correctionType) 

716 correctionType = 'NONE' 

717 if region is not None: 

718 im1Area = exposure1.getImage()[region].getArray() 

719 im2Area = exposure2.getImage()[region].getArray() 

720 else: 

721 im1Area = exposure1.getImage().getArray() 

722 im2Area = exposure2.getImage().getArray() 

723 

724 const = np.mean((im1Area - im2Area)**2 / (im1Area + im2Area)) 

725 gain = 1. / const 

726 

727 mu = 0.5*(np.mean(im1Area) + np.mean(im2Area)) 

728 

729 if correctionType == 'SIMPLE': 

730 gain = 1/(const - (1/mu)*(readNoise**2 - (1/2*gain**2))) 

731 elif correctionType == 'FULL': 

732 root = np.sqrt(mu**2 - 2*mu*const + 2*readNoise**2) 

733 denom = (2*const*mu - 2*readNoise**2) 

734 positiveSolution = (root + mu)/denom 

735 gain = positiveSolution 

736 

737 return gain 

738 

739 def getReadNoiseFromMetadata(self, taskMetadata, ampName): 

740 """Gets readout noise for an amp from ISR metadata. 

741 

742 Parameters 

743 ---------- 

744 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`] 

745 List of exposures metadata from ISR. 

746 ampName : `str` 

747 Amplifier name. 

748 

749 Returns 

750 ------- 

751 readNoise : `float` 

752 Median of the overscan readnoise in the 

753 post-ISR metadata of the input exposures (ADU). 

754 Returns 'None' if the median could not be calculated. 

755 """ 

756 # Empirical readout noise [ADU] measured from an 

757 # overscan-subtracted overscan during ISR. 

758 expectedKey = f"RESIDUAL STDEV {ampName}" 

759 

760 readNoises = [] 

761 for expMetadata in taskMetadata: 

762 if 'isr' in expMetadata: 

763 overscanNoise = expMetadata['isr'][expectedKey] 

764 else: 

765 continue 

766 readNoises.append(overscanNoise) 

767 

768 if len(readNoises): 

769 readNoise = np.median(np.array(readNoises)) 

770 else: 

771 self.log.warning("Median readout noise from ISR metadata for amp %s " 

772 "could not be calculated." % ampName) 

773 readNoise = None 

774 

775 return readNoise