Coverage for python/lsst/cp/pipe/ptc/cpExtractPtcTask.py: 14%

234 statements  

« prev     ^ index     » next       coverage.py v6.4.4, created at 2022-08-23 03:20 -0700

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22import numpy as np 

23 

24import lsst.afw.math as afwMath 

25import lsst.pex.config as pexConfig 

26import lsst.pipe.base as pipeBase 

27from lsst.cp.pipe.utils import (arrangeFlatsByExpTime, arrangeFlatsByExpId, 

28 sigmaClipCorrection, CovFastFourierTransform) 

29 

30import lsst.pipe.base.connectionTypes as cT 

31 

32from lsst.ip.isr import PhotonTransferCurveDataset 

33from lsst.ip.isr import IsrTask 

34 

35__all__ = ['PhotonTransferCurveExtractConfig', 'PhotonTransferCurveExtractTask'] 

36 

37 

38class PhotonTransferCurveExtractConnections(pipeBase.PipelineTaskConnections, 

39 dimensions=("instrument", "detector")): 

40 

41 inputExp = cT.Input( 

42 name="ptcInputExposurePairs", 

43 doc="Input post-ISR processed exposure pairs (flats) to" 

44 "measure covariances from.", 

45 storageClass="Exposure", 

46 dimensions=("instrument", "exposure", "detector"), 

47 multiple=True, 

48 deferLoad=True, 

49 ) 

50 taskMetadata = cT.Input( 

51 name="isr_metadata", 

52 doc="Input task metadata to extract statistics from.", 

53 storageClass="TaskMetadata", 

54 dimensions=("instrument", "exposure", "detector"), 

55 multiple=True, 

56 ) 

57 outputCovariances = cT.Output( 

58 name="ptcCovariances", 

59 doc="Extracted flat (co)variances.", 

60 storageClass="PhotonTransferCurveDataset", 

61 dimensions=("instrument", "exposure", "detector"), 

62 isCalibration=True, 

63 multiple=True, 

64 ) 

65 

66 

67class PhotonTransferCurveExtractConfig(pipeBase.PipelineTaskConfig, 

68 pipelineConnections=PhotonTransferCurveExtractConnections): 

69 """Configuration for the measurement of covariances from flats. 

70 """ 

71 

72 matchByExposureId = pexConfig.Field( 

73 dtype=bool, 

74 doc="Should exposures be matched by ID rather than exposure time?", 

75 default=False, 

76 ) 

77 maximumRangeCovariancesAstier = pexConfig.Field( 

78 dtype=int, 

79 doc="Maximum range of covariances as in Astier+19", 

80 default=8, 

81 ) 

82 binSize = pexConfig.Field( 

83 dtype=int, 

84 doc="Bin the image by this factor in both dimensions.", 

85 default=1, 

86 ) 

87 minMeanSignal = pexConfig.DictField( 

88 keytype=str, 

89 itemtype=float, 

90 doc="Minimum values (inclusive) of mean signal (in ADU) per amp to use." 

91 " The same cut is applied to all amps if this parameter [`dict`] is passed as " 

92 " {'ALL_AMPS': value}", 

93 default={'ALL_AMPS': 0.0}, 

94 ) 

95 maxMeanSignal = pexConfig.DictField( 

96 keytype=str, 

97 itemtype=float, 

98 doc="Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp." 

99 " The same cut is applied to all amps if this dictionary is of the form" 

100 " {'ALL_AMPS': value}", 

101 default={'ALL_AMPS': 1e6}, 

102 ) 

103 maskNameList = pexConfig.ListField( 

104 dtype=str, 

105 doc="Mask list to exclude from statistics calculations.", 

106 default=['SUSPECT', 'BAD', 'NO_DATA', 'SAT'], 

107 ) 

108 nSigmaClipPtc = pexConfig.Field( 

109 dtype=float, 

110 doc="Sigma cut for afwMath.StatisticsControl()", 

111 default=5.5, 

112 ) 

113 nIterSigmaClipPtc = pexConfig.Field( 

114 dtype=int, 

115 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()", 

116 default=3, 

117 ) 

118 minNumberGoodPixelsForCovariance = pexConfig.Field( 

119 dtype=int, 

120 doc="Minimum number of acceptable good pixels per amp to calculate the covariances (via FFT or" 

121 " direclty).", 

122 default=10000, 

123 ) 

124 thresholdDiffAfwVarVsCov00 = pexConfig.Field( 

125 dtype=float, 

126 doc="If the absolute fractional differece between afwMath.VARIANCECLIP and Cov00 " 

127 "for a region of a difference image is greater than this threshold (percentage), " 

128 "a warning will be issued.", 

129 default=1., 

130 ) 

131 detectorMeasurementRegion = pexConfig.ChoiceField( 

132 dtype=str, 

133 doc="Region of each exposure where to perform the calculations (amplifier or full image).", 

134 default='AMP', 

135 allowed={ 

136 "AMP": "Amplifier of the detector.", 

137 "FULL": "Full image." 

138 } 

139 ) 

140 numEdgeSuspect = pexConfig.Field( 

141 dtype=int, 

142 doc="Number of edge pixels to be flagged as untrustworthy.", 

143 default=0, 

144 ) 

145 edgeMaskLevel = pexConfig.ChoiceField( 

146 dtype=str, 

147 doc="Mask edge pixels in which coordinate frame: DETECTOR or AMP?", 

148 default="DETECTOR", 

149 allowed={ 

150 'DETECTOR': 'Mask only the edges of the full detector.', 

151 'AMP': 'Mask edges of each amplifier.', 

152 }, 

153 ) 

154 doGain = pexConfig.Field( 

155 dtype=bool, 

156 doc="Calculate a gain per input flat pair.", 

157 default=True, 

158 ) 

159 gainCorrectionType = pexConfig.ChoiceField( 

160 dtype=str, 

161 doc="Correction type for the gain.", 

162 default='FULL', 

163 allowed={ 

164 'NONE': 'No correction.', 

165 'SIMPLE': 'First order correction.', 

166 'FULL': 'Second order correction.' 

167 } 

168 ) 

169 

170 

171class PhotonTransferCurveExtractTask(pipeBase.PipelineTask): 

172 """Task to measure covariances from flat fields. 

173 

174 This task receives as input a list of flat-field images 

175 (flats), and sorts these flats in pairs taken at the 

176 same time (the task will raise if there is one one flat 

177 at a given exposure time, and it will discard extra flats if 

178 there are more than two per exposure time). This task measures 

179 the mean, variance, and covariances from a region (e.g., 

180 an amplifier) of the difference image of the two flats with 

181 the same exposure time. 

182 

183 The variance is calculated via afwMath, and the covariance 

184 via the methods in Astier+19 (appendix A). In theory, 

185 var = covariance[0,0]. This should be validated, and in the 

186 future, we may decide to just keep one (covariance). 

187 At this moment, if the two values differ by more than the value 

188 of `thresholdDiffAfwVarVsCov00` (default: 1%), a warning will 

189 be issued. 

190 

191 The measured covariances at a given exposure time (along with 

192 other quantities such as the mean) are stored in a PTC dataset 

193 object (`~lsst.ip.isr.PhotonTransferCurveDataset`), which gets 

194 partially filled at this stage (the remainder of the attributes 

195 of the dataset will be filled after running the second task of 

196 the PTC-measurement pipeline, `~PhotonTransferCurveSolveTask`). 

197 

198 The number of partially-filled 

199 `~lsst.ip.isr.PhotonTransferCurveDataset` objects will be less 

200 than the number of input exposures because the task combines 

201 input flats in pairs. However, it is required at this moment 

202 that the number of input dimensions matches 

203 bijectively the number of output dimensions. Therefore, a number 

204 of "dummy" PTC datasets are inserted in the output list. This 

205 output list will then be used as input of the next task in the 

206 PTC-measurement pipeline, `PhotonTransferCurveSolveTask`, 

207 which will assemble the multiple `PhotonTransferCurveDataset` 

208 objects into a single one in order to fit the measured covariances 

209 as a function of flux to one of three models 

210 (see `PhotonTransferCurveSolveTask` for details). 

211 

212 Reference: Astier+19: "The Shape of the Photon Transfer Curve of CCD 

213 sensors", arXiv:1905.08677. 

214 """ 

215 

216 ConfigClass = PhotonTransferCurveExtractConfig 

217 _DefaultName = 'cpPtcExtract' 

218 

219 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

220 """Ensure that the input and output dimensions are passed along. 

221 

222 Parameters 

223 ---------- 

224 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext` 

225 Butler to operate on. 

226 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection` 

227 Input data refs to load. 

228 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection` 

229 Output data refs to persist. 

230 """ 

231 inputs = butlerQC.get(inputRefs) 

232 # Ids of input list of exposure references 

233 # (deferLoad=True in the input connections) 

234 inputs['inputDims'] = [expRef.datasetRef.dataId['exposure'] for expRef in inputRefs.inputExp] 

235 

236 # Dictionary, keyed by expTime, with tuples containing flat 

237 # exposures and their IDs. 

238 if self.config.matchByExposureId: 

239 inputs['inputExp'] = arrangeFlatsByExpId(inputs['inputExp'], inputs['inputDims']) 

240 else: 

241 inputs['inputExp'] = arrangeFlatsByExpTime(inputs['inputExp'], inputs['inputDims']) 

242 

243 outputs = self.run(**inputs) 

244 butlerQC.put(outputs, outputRefs) 

245 

246 def run(self, inputExp, inputDims, taskMetadata): 

247 """Measure covariances from difference of flat pairs 

248 

249 Parameters 

250 ---------- 

251 inputExp : `dict` [`float`, `list` 

252 [`~lsst.pipe.base.connections.DeferredDatasetRef`]] 

253 Dictionary that groups references to flat-field exposures that 

254 have the same exposure time (seconds), or that groups them 

255 sequentially by their exposure id. 

256 inputDims : `list` 

257 List of exposure IDs. 

258 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`] 

259 List of exposures metadata from ISR. 

260 

261 Returns 

262 ------- 

263 results : `lsst.pipe.base.Struct` 

264 The resulting Struct contains: 

265 ``outputCovariances`` 

266 A list containing the per-pair PTC measurements (`list` 

267 [`lsst.ip.isr.PhotonTransferCurveDataset`]) 

268 """ 

269 # inputExp.values() returns a view, which we turn into a list. We then 

270 # access the first exposure-ID tuple to get the detector. 

271 # The first "get()" retrieves the exposure from the exposure reference. 

272 detector = list(inputExp.values())[0][0][0].get(component='detector') 

273 detNum = detector.getId() 

274 amps = detector.getAmplifiers() 

275 ampNames = [amp.getName() for amp in amps] 

276 

277 # Each amp may have a different min and max ADU signal 

278 # specified in the config. 

279 maxMeanSignalDict = {ampName: 1e6 for ampName in ampNames} 

280 minMeanSignalDict = {ampName: 0.0 for ampName in ampNames} 

281 for ampName in ampNames: 

282 if 'ALL_AMPS' in self.config.maxMeanSignal: 

283 maxMeanSignalDict[ampName] = self.config.maxMeanSignal['ALL_AMPS'] 

284 elif ampName in self.config.maxMeanSignal: 

285 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[ampName] 

286 

287 if 'ALL_AMPS' in self.config.minMeanSignal: 

288 minMeanSignalDict[ampName] = self.config.minMeanSignal['ALL_AMPS'] 

289 elif ampName in self.config.minMeanSignal: 

290 minMeanSignalDict[ampName] = self.config.minMeanSignal[ampName] 

291 # These are the column names for `tupleRows` below. 

292 tags = [('mu', '<f8'), ('afwVar', '<f8'), ('i', '<i8'), ('j', '<i8'), ('var', '<f8'), 

293 ('cov', '<f8'), ('npix', '<i8'), ('ext', '<i8'), ('expTime', '<f8'), ('ampName', '<U3')] 

294 # Create a dummy ptcDataset. Dummy datasets will be 

295 # used to ensure that the number of output and input 

296 # dimensions match. 

297 dummyPtcDataset = PhotonTransferCurveDataset(ampNames, 'DUMMY', 

298 self.config.maximumRangeCovariancesAstier) 

299 

300 readNoiseDict = {ampName: 0.0 for ampName in ampNames} 

301 for ampName in ampNames: 

302 # Initialize amps of `dummyPtcDatset`. 

303 dummyPtcDataset.setAmpValues(ampName) 

304 # Overscan readnoise from post-ISR exposure metadata. 

305 # It will be used to estimate the gain from a pair of flats. 

306 readNoiseDict[ampName] = self.getReadNoiseFromMetadata(taskMetadata, ampName) 

307 

308 # Output list with PTC datasets. 

309 partialPtcDatasetList = [] 

310 # The number of output references needs to match that of input 

311 # references: initialize outputlist with dummy PTC datasets. 

312 for i in range(len(inputDims)): 

313 partialPtcDatasetList.append(dummyPtcDataset) 

314 

315 if self.config.numEdgeSuspect > 0: 

316 isrTask = IsrTask() 

317 self.log.info("Masking %d pixels from the edges of all exposures as SUSPECT.", 

318 self.config.numEdgeSuspect) 

319 

320 for expTime in inputExp: 

321 exposures = inputExp[expTime] 

322 if len(exposures) == 1: 

323 self.log.warning("Only one exposure found at expTime %f. Dropping exposure %d.", 

324 expTime, exposures[0][1]) 

325 continue 

326 else: 

327 # Only use the first two exposures at expTime. Each 

328 # element is a tuple (exposure, expId) 

329 expRef1, expId1 = exposures[0] 

330 expRef2, expId2 = exposures[1] 

331 # use get() to obtain `lsst.afw.image.Exposure` 

332 exp1, exp2 = expRef1.get(), expRef2.get() 

333 

334 if len(exposures) > 2: 

335 self.log.warning("Already found 2 exposures at expTime %f. Ignoring exposures: %s", 

336 expTime, ", ".join(str(i[1]) for i in exposures[2:])) 

337 # Mask pixels at the edge of the detector or of each amp 

338 if self.config.numEdgeSuspect > 0: 

339 isrTask.maskEdges(exp1, numEdgePixels=self.config.numEdgeSuspect, 

340 maskPlane="SUSPECT", level=self.config.edgeMaskLevel) 

341 isrTask.maskEdges(exp2, numEdgePixels=self.config.numEdgeSuspect, 

342 maskPlane="SUSPECT", level=self.config.edgeMaskLevel) 

343 

344 nAmpsNan = 0 

345 partialPtcDataset = PhotonTransferCurveDataset(ampNames, 'PARTIAL', 

346 self.config.maximumRangeCovariancesAstier) 

347 for ampNumber, amp in enumerate(detector): 

348 ampName = amp.getName() 

349 if self.config.detectorMeasurementRegion == 'AMP': 

350 region = amp.getBBox() 

351 elif self.config.detectorMeasurementRegion == 'FULL': 

352 region = None 

353 

354 # Get masked image regions, masking planes, statistic control 

355 # objects, and clipped means. Calculate once to reuse in 

356 # `measureMeanVarCov` and `getGainFromFlatPair`. 

357 im1Area, im2Area, imStatsCtrl, mu1, mu2 = self.getImageAreasMasksStats(exp1, exp2, 

358 region=region) 

359 

360 # `measureMeanVarCov` is the function that measures 

361 # the variance and covariances from a region of 

362 # the difference image of two flats at the same 

363 # exposure time. The variable `covAstier` that is 

364 # returned is of the form: 

365 # [(i, j, var (cov[0,0]), cov, npix) for (i,j) in 

366 # {maxLag, maxLag}^2]. 

367 muDiff, varDiff, covAstier = self.measureMeanVarCov(im1Area, im2Area, imStatsCtrl, mu1, mu2) 

368 # Estimate the gain from the flat pair 

369 if self.config.doGain: 

370 gain = self.getGainFromFlatPair(im1Area, im2Area, imStatsCtrl, mu1, mu2, 

371 correctionType=self.config.gainCorrectionType, 

372 readNoise=readNoiseDict[ampName]) 

373 else: 

374 gain = np.nan 

375 

376 # Correction factor for bias introduced by sigma 

377 # clipping. 

378 # Function returns 1/sqrt(varFactor), so it needs 

379 # to be squared. varDiff is calculated via 

380 # afwMath.VARIANCECLIP. 

381 varFactor = sigmaClipCorrection(self.config.nSigmaClipPtc)**2 

382 varDiff *= varFactor 

383 

384 expIdMask = True 

385 # Mask data point at this mean signal level if 

386 # the signal, variance, or covariance calculations 

387 # from `measureMeanVarCov` resulted in NaNs. 

388 if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None): 

389 self.log.warning("NaN mean or var, or None cov in amp %s in exposure pair %d, %d of " 

390 "detector %d.", ampName, expId1, expId2, detNum) 

391 nAmpsNan += 1 

392 expIdMask = False 

393 covArray = np.full((1, self.config.maximumRangeCovariancesAstier, 

394 self.config.maximumRangeCovariancesAstier), np.nan) 

395 covSqrtWeights = np.full_like(covArray, np.nan) 

396 

397 # Mask data point if it is outside of the 

398 # specified mean signal range. 

399 if (muDiff <= minMeanSignalDict[ampName]) or (muDiff >= maxMeanSignalDict[ampName]): 

400 expIdMask = False 

401 

402 if covAstier is not None: 

403 # Turn the tuples with the measured information 

404 # into covariance arrays. 

405 # covrow: (i, j, var (cov[0,0]), cov, npix) 

406 tupleRows = [(muDiff, varDiff) + covRow + (ampNumber, expTime, 

407 ampName) for covRow in covAstier] 

408 tempStructArray = np.array(tupleRows, dtype=tags) 

409 covArray, vcov, _ = self.makeCovArray(tempStructArray, 

410 self.config.maximumRangeCovariancesAstier) 

411 covSqrtWeights = np.nan_to_num(1./np.sqrt(vcov)) 

412 

413 # Correct covArray for sigma clipping: 

414 # 1) Apply varFactor twice for the whole covariance matrix 

415 covArray *= varFactor**2 

416 # 2) But, only once for the variance element of the 

417 # matrix, covArray[0,0] (so divide one factor out). 

418 covArray[0, 0] /= varFactor 

419 

420 partialPtcDataset.setAmpValues(ampName, rawExpTime=[expTime], rawMean=[muDiff], 

421 rawVar=[varDiff], inputExpIdPair=[(expId1, expId2)], 

422 expIdMask=[expIdMask], covArray=covArray, 

423 covSqrtWeights=covSqrtWeights, gain=gain, 

424 noise=readNoiseDict[ampName]) 

425 # Use location of exp1 to save PTC dataset from (exp1, exp2) pair. 

426 # Below, np.where(expId1 == np.array(inputDims)) returns a tuple 

427 # with a single-element array, so [0][0] 

428 # is necessary to extract the required index. 

429 datasetIndex = np.where(expId1 == np.array(inputDims))[0][0] 

430 # `partialPtcDatasetList` is a list of 

431 # `PhotonTransferCurveDataset` objects. Some of them 

432 # will be dummy datasets (to match length of input 

433 # and output references), and the rest will have 

434 # datasets with the mean signal, variance, and 

435 # covariance measurements at a given exposure 

436 # time. The next ppart of the PTC-measurement 

437 # pipeline, `solve`, will take this list as input, 

438 # and assemble the measurements in the datasets 

439 # in an addecuate manner for fitting a PTC 

440 # model. 

441 partialPtcDataset.updateMetadata(setDate=True, detector=detector) 

442 partialPtcDatasetList[datasetIndex] = partialPtcDataset 

443 

444 if nAmpsNan == len(ampNames): 

445 msg = f"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}." 

446 self.log.warning(msg) 

447 return pipeBase.Struct( 

448 outputCovariances=partialPtcDatasetList, 

449 ) 

450 

451 def makeCovArray(self, inputTuple, maxRangeFromTuple): 

452 """Make covariances array from tuple. 

453 

454 Parameters 

455 ---------- 

456 inputTuple : `numpy.ndarray` 

457 Structured array with rows with at least 

458 (mu, afwVar, cov, var, i, j, npix), where: 

459 mu : `float` 

460 0.5*(m1 + m2), where mu1 is the mean value of flat1 

461 and mu2 is the mean value of flat2. 

462 afwVar : `float` 

463 Variance of difference flat, calculated with afw. 

464 cov : `float` 

465 Covariance value at lag(i, j) 

466 var : `float` 

467 Variance(covariance value at lag(0, 0)) 

468 i : `int` 

469 Lag in dimension "x". 

470 j : `int` 

471 Lag in dimension "y". 

472 npix : `int` 

473 Number of pixels used for covariance calculation. 

474 maxRangeFromTuple : `int` 

475 Maximum range to select from tuple. 

476 

477 Returns 

478 ------- 

479 cov : `numpy.array` 

480 Covariance arrays, indexed by mean signal mu. 

481 vCov : `numpy.array` 

482 Variance of the [co]variance arrays, indexed by mean signal mu. 

483 muVals : `numpy.array` 

484 List of mean signal values. 

485 """ 

486 if maxRangeFromTuple is not None: 

487 cut = (inputTuple['i'] < maxRangeFromTuple) & (inputTuple['j'] < maxRangeFromTuple) 

488 cutTuple = inputTuple[cut] 

489 else: 

490 cutTuple = inputTuple 

491 # increasing mu order, so that we can group measurements with the 

492 # same mu 

493 muTemp = cutTuple['mu'] 

494 ind = np.argsort(muTemp) 

495 

496 cutTuple = cutTuple[ind] 

497 # should group measurements on the same image pairs(same average) 

498 mu = cutTuple['mu'] 

499 xx = np.hstack(([mu[0]], mu)) 

500 delta = xx[1:] - xx[:-1] 

501 steps, = np.where(delta > 0) 

502 ind = np.zeros_like(mu, dtype=int) 

503 ind[steps] = 1 

504 ind = np.cumsum(ind) # this acts as an image pair index. 

505 # now fill the 3-d cov array(and variance) 

506 muVals = np.array(np.unique(mu)) 

507 i = cutTuple['i'].astype(int) 

508 j = cutTuple['j'].astype(int) 

509 c = 0.5*cutTuple['cov'] 

510 n = cutTuple['npix'] 

511 v = 0.5*cutTuple['var'] 

512 # book and fill 

513 cov = np.ndarray((len(muVals), np.max(i)+1, np.max(j)+1)) 

514 var = np.zeros_like(cov) 

515 cov[ind, i, j] = c 

516 var[ind, i, j] = v**2/n 

517 var[:, 0, 0] *= 2 # var(v) = 2*v**2/N 

518 

519 return cov, var, muVals 

520 

521 def measureMeanVarCov(self, im1Area, im2Area, imStatsCtrl, mu1, mu2): 

522 """Calculate the mean of each of two exposures and the variance 

523 and covariance of their difference. The variance is calculated 

524 via afwMath, and the covariance via the methods in Astier+19 

525 (appendix A). In theory, var = covariance[0,0]. This should 

526 be validated, and in the future, we may decide to just keep 

527 one (covariance). 

528 

529 Parameters 

530 ---------- 

531 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

532 Masked image from exposure 1. 

533 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

534 Masked image from exposure 2. 

535 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

536 Statistics control object. 

537 mu1: `float` 

538 Clipped mean of im1Area (ADU). 

539 mu2: `float` 

540 Clipped mean of im2Area (ADU). 

541 

542 Returns 

543 ------- 

544 mu : `float` or `NaN` 

545 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means 

546 of the regions in both exposures. If either mu1 or m2 are 

547 NaN's, the returned value is NaN. 

548 varDiff : `float` or `NaN` 

549 Half of the clipped variance of the difference of the 

550 regions inthe two input exposures. If either mu1 or m2 are 

551 NaN's, the returned value is NaN. 

552 covDiffAstier : `list` or `NaN` 

553 List with tuples of the form (dx, dy, var, cov, npix), where: 

554 dx : `int` 

555 Lag in x 

556 dy : `int` 

557 Lag in y 

558 var : `float` 

559 Variance at (dx, dy). 

560 cov : `float` 

561 Covariance at (dx, dy). 

562 nPix : `int` 

563 Number of pixel pairs used to evaluate var and cov. 

564 

565 If either mu1 or m2 are NaN's, the returned value is NaN. 

566 """ 

567 if np.isnan(mu1) or np.isnan(mu2): 

568 self.log.warning("Mean of amp in image 1 or 2 is NaN: %f, %f.", mu1, mu2) 

569 return np.nan, np.nan, None 

570 mu = 0.5*(mu1 + mu2) 

571 

572 # Take difference of pairs 

573 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2)) 

574 temp = im2Area.clone() 

575 temp *= mu1 

576 diffIm = im1Area.clone() 

577 diffIm *= mu2 

578 diffIm -= temp 

579 diffIm /= mu 

580 

581 # Variance calculation via afwMath 

582 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue()) 

583 

584 # Covariances calculations 

585 # Get the pixels that were not clipped 

586 varClip = afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue() 

587 meanClip = afwMath.makeStatistics(diffIm, afwMath.MEANCLIP, imStatsCtrl).getValue() 

588 cut = meanClip + self.config.nSigmaClipPtc*np.sqrt(varClip) 

589 unmasked = np.where(np.fabs(diffIm.image.array) <= cut, 1, 0) 

590 

591 # Get the pixels in the mask planes of the difference image 

592 # that were ignored by the clipping algorithm 

593 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0) 

594 # Combine the two sets of pixels ('1': use; '0': don't use) 

595 # into a final weight matrix to be used in the covariance 

596 # calculations below. 

597 w = unmasked*wDiff 

598 

599 if np.sum(w) < self.config.minNumberGoodPixelsForCovariance: 

600 self.log.warning("Number of good points for covariance calculation (%s) is less " 

601 "(than threshold %s)", np.sum(w), self.config.minNumberGoodPixelsForCovariance) 

602 return np.nan, np.nan, None 

603 

604 maxRangeCov = self.config.maximumRangeCovariancesAstier 

605 

606 # Calculate covariances via FFT. 

607 shapeDiff = np.array(diffIm.image.array.shape) 

608 # Calculate the sizes of FFT dimensions. 

609 s = shapeDiff + maxRangeCov 

610 tempSize = np.array(np.log(s)/np.log(2.)).astype(int) 

611 fftSize = np.array(2**(tempSize+1)).astype(int) 

612 fftShape = (fftSize[0], fftSize[1]) 

613 c = CovFastFourierTransform(diffIm.image.array, w, fftShape, maxRangeCov) 

614 # np.sum(w) is the same as npix[0][0] returned in covDiffAstier 

615 covDiffAstier = c.reportCovFastFourierTransform(maxRangeCov) 

616 

617 # Compare Cov[0,0] and afwMath.VARIANCECLIP covDiffAstier[0] 

618 # is the Cov[0,0] element, [3] is the variance, and there's a 

619 # factor of 0.5 difference with afwMath.VARIANCECLIP. 

620 thresholdPercentage = self.config.thresholdDiffAfwVarVsCov00 

621 fractionalDiff = 100*np.fabs(1 - varDiff/(covDiffAstier[0][3]*0.5)) 

622 if fractionalDiff >= thresholdPercentage: 

623 self.log.warning("Absolute fractional difference between afwMatch.VARIANCECLIP and Cov[0,0] " 

624 "is more than %f%%: %f", thresholdPercentage, fractionalDiff) 

625 

626 return mu, varDiff, covDiffAstier 

627 

628 def getImageAreasMasksStats(self, exposure1, exposure2, region=None): 

629 """Get image areas in a region as well as masks and statistic objects. 

630 

631 Parameters 

632 ---------- 

633 exposure1 : `lsst.afw.image.exposure.ExposureF` 

634 First exposure of flat field pair. 

635 exposure2 : `lsst.afw.image.exposure.ExposureF` 

636 Second exposure of flat field pair. 

637 region : `lsst.geom.Box2I`, optional 

638 Region of each exposure where to perform the calculations 

639 (e.g, an amplifier). 

640 

641 Returns 

642 ------- 

643 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

644 Masked image from exposure 1. 

645 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

646 Masked image from exposure 2. 

647 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

648 Statistics control object. 

649 mu1: `float` 

650 Clipped mean of im1Area (ADU). 

651 mu2: `float` 

652 Clipped mean of im2Area (ADU). 

653 """ 

654 if region is not None: 

655 im1Area = exposure1.maskedImage[region] 

656 im2Area = exposure2.maskedImage[region] 

657 else: 

658 im1Area = exposure1.maskedImage 

659 im2Area = exposure2.maskedImage 

660 

661 if self.config.binSize > 1: 

662 im1Area = afwMath.binImage(im1Area, self.config.binSize) 

663 im2Area = afwMath.binImage(im2Area, self.config.binSize) 

664 

665 # Get mask planes and construct statistics control object from one 

666 # of the exposures 

667 imMaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList) 

668 imStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc, 

669 self.config.nIterSigmaClipPtc, 

670 imMaskVal) 

671 imStatsCtrl.setNanSafe(True) 

672 imStatsCtrl.setAndMask(imMaskVal) 

673 

674 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, imStatsCtrl).getValue() 

675 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, imStatsCtrl).getValue() 

676 

677 return (im1Area, im2Area, imStatsCtrl, mu1, mu2) 

678 

679 def getGainFromFlatPair(self, im1Area, im2Area, imStatsCtrl, mu1, mu2, 

680 correctionType='NONE', readNoise=None): 

681 """Estimate the gain from a single pair of flats. 

682 

683 The basic premise is 1/g = <(I1 - I2)^2/(I1 + I2)> = 1/const, 

684 where I1 and I2 correspond to flats 1 and 2, respectively. 

685 Corrections for the variable QE and the read-noise are then 

686 made following the derivation in Robert Lupton's forthcoming 

687 book, which gets 

688 

689 1/g = <(I1 - I2)^2/(I1 + I2)> - 1/mu(sigma^2 - 1/2g^2). 

690 

691 This is a quadratic equation, whose solutions are given by: 

692 

693 g = mu +/- sqrt(2*sigma^2 - 2*const*mu + mu^2)/(2*const*mu*2 

694 - 2*sigma^2) 

695 

696 where 'mu' is the average signal level and 'sigma' is the 

697 amplifier's readnoise. The positive solution will be used. 

698 The way the correction is applied depends on the value 

699 supplied for correctionType. 

700 

701 correctionType is one of ['NONE', 'SIMPLE' or 'FULL'] 

702 'NONE' : uses the 1/g = <(I1 - I2)^2/(I1 + I2)> formula. 

703 'SIMPLE' : uses the gain from the 'NONE' method for the 

704 1/2g^2 term. 

705 'FULL' : solves the full equation for g, discarding the 

706 non-physical solution to the resulting quadratic. 

707 

708 Parameters 

709 ---------- 

710 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

711 Masked image from exposure 1. 

712 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

713 Masked image from exposure 2. 

714 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

715 Statistics control object. 

716 mu1: `float` 

717 Clipped mean of im1Area (ADU). 

718 mu2: `float` 

719 Clipped mean of im2Area (ADU). 

720 correctionType : `str`, optional 

721 The correction applied, one of ['NONE', 'SIMPLE', 'FULL'] 

722 readNoise : `float`, optional 

723 Amplifier readout noise (ADU). 

724 

725 Returns 

726 ------- 

727 gain : `float` 

728 Gain, in e/ADU. 

729 

730 Raises 

731 ------ 

732 RuntimeError: if `correctionType` is not one of 'NONE', 

733 'SIMPLE', or 'FULL'. 

734 """ 

735 if correctionType not in ['NONE', 'SIMPLE', 'FULL']: 

736 raise RuntimeError("Unknown correction type: %s" % correctionType) 

737 

738 if correctionType != 'NONE' and readNoise is None: 

739 self.log.warning("'correctionType' in 'getGainFromFlatPair' is %s, " 

740 "but 'readNoise' is 'None'. Setting 'correctionType' " 

741 "to 'NONE', so a gain value will be estimated without " 

742 "corrections." % correctionType) 

743 correctionType = 'NONE' 

744 

745 mu = 0.5*(mu1 + mu2) 

746 

747 # ratioIm = (I1 - I2)^2 / (I1 + I2) 

748 temp = im2Area.clone() 

749 ratioIm = im1Area.clone() 

750 ratioIm -= temp 

751 ratioIm *= ratioIm 

752 

753 # Sum of pairs 

754 sumIm = im1Area.clone() 

755 sumIm += temp 

756 

757 ratioIm /= sumIm 

758 

759 const = afwMath.makeStatistics(ratioIm, afwMath.MEAN, imStatsCtrl).getValue() 

760 gain = 1. / const 

761 

762 if correctionType == 'SIMPLE': 

763 gain = 1/(const - (1/mu)*(readNoise**2 - (1/2*gain**2))) 

764 elif correctionType == 'FULL': 

765 root = np.sqrt(mu**2 - 2*mu*const + 2*readNoise**2) 

766 denom = (2*const*mu - 2*readNoise**2) 

767 positiveSolution = (root + mu)/denom 

768 gain = positiveSolution 

769 

770 return gain 

771 

772 def getReadNoiseFromMetadata(self, taskMetadata, ampName): 

773 """Gets readout noise for an amp from ISR metadata. 

774 

775 Parameters 

776 ---------- 

777 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`] 

778 List of exposures metadata from ISR. 

779 ampName : `str` 

780 Amplifier name. 

781 

782 Returns 

783 ------- 

784 readNoise : `float` 

785 Median of the overscan readnoise in the 

786 post-ISR metadata of the input exposures (ADU). 

787 Returns 'None' if the median could not be calculated. 

788 """ 

789 # Empirical readout noise [ADU] measured from an 

790 # overscan-subtracted overscan during ISR. 

791 expectedKey = f"RESIDUAL STDEV {ampName}" 

792 

793 readNoises = [] 

794 for expMetadata in taskMetadata: 

795 if 'isr' in expMetadata: 

796 overscanNoise = expMetadata['isr'][expectedKey] 

797 else: 

798 continue 

799 readNoises.append(overscanNoise) 

800 

801 if len(readNoises): 

802 readNoise = np.median(np.array(readNoises)) 

803 else: 

804 self.log.warning("Median readout noise from ISR metadata for amp %s " 

805 "could not be calculated." % ampName) 

806 readNoise = None 

807 

808 return readNoise