Coverage for python/lsst/cp/pipe/ptc/cpExtractPtcTask.py: 12%

234 statements  

« prev     ^ index     » next       coverage.py v6.5.0, created at 2022-10-20 03:07 -0700

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22import numpy as np 

23 

24import lsst.afw.math as afwMath 

25import lsst.pex.config as pexConfig 

26import lsst.pipe.base as pipeBase 

27from lsst.cp.pipe.utils import (arrangeFlatsByExpTime, arrangeFlatsByExpId, 

28 sigmaClipCorrection, CovFastFourierTransform) 

29 

30import lsst.pipe.base.connectionTypes as cT 

31 

32from lsst.ip.isr import PhotonTransferCurveDataset 

33from lsst.ip.isr import IsrTask 

34 

35__all__ = ['PhotonTransferCurveExtractConfig', 'PhotonTransferCurveExtractTask'] 

36 

37 

38class PhotonTransferCurveExtractConnections(pipeBase.PipelineTaskConnections, 

39 dimensions=("instrument", "detector")): 

40 

41 inputExp = cT.Input( 

42 name="ptcInputExposurePairs", 

43 doc="Input post-ISR processed exposure pairs (flats) to" 

44 "measure covariances from.", 

45 storageClass="Exposure", 

46 dimensions=("instrument", "exposure", "detector"), 

47 multiple=True, 

48 deferLoad=True, 

49 ) 

50 taskMetadata = cT.Input( 

51 name="isr_metadata", 

52 doc="Input task metadata to extract statistics from.", 

53 storageClass="TaskMetadata", 

54 dimensions=("instrument", "exposure", "detector"), 

55 multiple=True, 

56 ) 

57 outputCovariances = cT.Output( 

58 name="ptcCovariances", 

59 doc="Extracted flat (co)variances.", 

60 storageClass="PhotonTransferCurveDataset", 

61 dimensions=("instrument", "exposure", "detector"), 

62 isCalibration=True, 

63 multiple=True, 

64 ) 

65 

66 

67class PhotonTransferCurveExtractConfig(pipeBase.PipelineTaskConfig, 

68 pipelineConnections=PhotonTransferCurveExtractConnections): 

69 """Configuration for the measurement of covariances from flats. 

70 """ 

71 

72 matchByExposureId = pexConfig.Field( 

73 dtype=bool, 

74 doc="Should exposures be matched by ID rather than exposure time?", 

75 default=False, 

76 ) 

77 maximumRangeCovariancesAstier = pexConfig.Field( 

78 dtype=int, 

79 doc="Maximum range of covariances as in Astier+19", 

80 default=8, 

81 ) 

82 binSize = pexConfig.Field( 

83 dtype=int, 

84 doc="Bin the image by this factor in both dimensions.", 

85 default=1, 

86 ) 

87 minMeanSignal = pexConfig.DictField( 

88 keytype=str, 

89 itemtype=float, 

90 doc="Minimum values (inclusive) of mean signal (in ADU) per amp to use." 

91 " The same cut is applied to all amps if this parameter [`dict`] is passed as " 

92 " {'ALL_AMPS': value}", 

93 default={'ALL_AMPS': 0.0}, 

94 ) 

95 maxMeanSignal = pexConfig.DictField( 

96 keytype=str, 

97 itemtype=float, 

98 doc="Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp." 

99 " The same cut is applied to all amps if this dictionary is of the form" 

100 " {'ALL_AMPS': value}", 

101 default={'ALL_AMPS': 1e6}, 

102 ) 

103 maskNameList = pexConfig.ListField( 

104 dtype=str, 

105 doc="Mask list to exclude from statistics calculations.", 

106 default=['SUSPECT', 'BAD', 'NO_DATA', 'SAT'], 

107 ) 

108 nSigmaClipPtc = pexConfig.Field( 

109 dtype=float, 

110 doc="Sigma cut for afwMath.StatisticsControl()", 

111 default=5.5, 

112 ) 

113 nIterSigmaClipPtc = pexConfig.Field( 

114 dtype=int, 

115 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()", 

116 default=3, 

117 ) 

118 minNumberGoodPixelsForCovariance = pexConfig.Field( 

119 dtype=int, 

120 doc="Minimum number of acceptable good pixels per amp to calculate the covariances (via FFT or" 

121 " direclty).", 

122 default=10000, 

123 ) 

124 thresholdDiffAfwVarVsCov00 = pexConfig.Field( 

125 dtype=float, 

126 doc="If the absolute fractional differece between afwMath.VARIANCECLIP and Cov00 " 

127 "for a region of a difference image is greater than this threshold (percentage), " 

128 "a warning will be issued.", 

129 default=1., 

130 ) 

131 detectorMeasurementRegion = pexConfig.ChoiceField( 

132 dtype=str, 

133 doc="Region of each exposure where to perform the calculations (amplifier or full image).", 

134 default='AMP', 

135 allowed={ 

136 "AMP": "Amplifier of the detector.", 

137 "FULL": "Full image." 

138 } 

139 ) 

140 numEdgeSuspect = pexConfig.Field( 

141 dtype=int, 

142 doc="Number of edge pixels to be flagged as untrustworthy.", 

143 default=0, 

144 ) 

145 edgeMaskLevel = pexConfig.ChoiceField( 

146 dtype=str, 

147 doc="Mask edge pixels in which coordinate frame: DETECTOR or AMP?", 

148 default="DETECTOR", 

149 allowed={ 

150 'DETECTOR': 'Mask only the edges of the full detector.', 

151 'AMP': 'Mask edges of each amplifier.', 

152 }, 

153 ) 

154 doGain = pexConfig.Field( 

155 dtype=bool, 

156 doc="Calculate a gain per input flat pair.", 

157 default=True, 

158 ) 

159 gainCorrectionType = pexConfig.ChoiceField( 

160 dtype=str, 

161 doc="Correction type for the gain.", 

162 default='FULL', 

163 allowed={ 

164 'NONE': 'No correction.', 

165 'SIMPLE': 'First order correction.', 

166 'FULL': 'Second order correction.' 

167 } 

168 ) 

169 

170 

171class PhotonTransferCurveExtractTask(pipeBase.PipelineTask): 

172 """Task to measure covariances from flat fields. 

173 

174 This task receives as input a list of flat-field images 

175 (flats), and sorts these flats in pairs taken at the 

176 same time (the task will raise if there is one one flat 

177 at a given exposure time, and it will discard extra flats if 

178 there are more than two per exposure time). This task measures 

179 the mean, variance, and covariances from a region (e.g., 

180 an amplifier) of the difference image of the two flats with 

181 the same exposure time. 

182 

183 The variance is calculated via afwMath, and the covariance 

184 via the methods in Astier+19 (appendix A). In theory, 

185 var = covariance[0,0]. This should be validated, and in the 

186 future, we may decide to just keep one (covariance). 

187 At this moment, if the two values differ by more than the value 

188 of `thresholdDiffAfwVarVsCov00` (default: 1%), a warning will 

189 be issued. 

190 

191 The measured covariances at a given exposure time (along with 

192 other quantities such as the mean) are stored in a PTC dataset 

193 object (`~lsst.ip.isr.PhotonTransferCurveDataset`), which gets 

194 partially filled at this stage (the remainder of the attributes 

195 of the dataset will be filled after running the second task of 

196 the PTC-measurement pipeline, `~PhotonTransferCurveSolveTask`). 

197 

198 The number of partially-filled 

199 `~lsst.ip.isr.PhotonTransferCurveDataset` objects will be less 

200 than the number of input exposures because the task combines 

201 input flats in pairs. However, it is required at this moment 

202 that the number of input dimensions matches 

203 bijectively the number of output dimensions. Therefore, a number 

204 of "dummy" PTC datasets are inserted in the output list. This 

205 output list will then be used as input of the next task in the 

206 PTC-measurement pipeline, `PhotonTransferCurveSolveTask`, 

207 which will assemble the multiple `PhotonTransferCurveDataset` 

208 objects into a single one in order to fit the measured covariances 

209 as a function of flux to one of three models 

210 (see `PhotonTransferCurveSolveTask` for details). 

211 

212 Reference: Astier+19: "The Shape of the Photon Transfer Curve of CCD 

213 sensors", arXiv:1905.08677. 

214 """ 

215 

216 ConfigClass = PhotonTransferCurveExtractConfig 

217 _DefaultName = 'cpPtcExtract' 

218 

219 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

220 """Ensure that the input and output dimensions are passed along. 

221 

222 Parameters 

223 ---------- 

224 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext` 

225 Butler to operate on. 

226 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection` 

227 Input data refs to load. 

228 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection` 

229 Output data refs to persist. 

230 """ 

231 inputs = butlerQC.get(inputRefs) 

232 # Ids of input list of exposure references 

233 # (deferLoad=True in the input connections) 

234 inputs['inputDims'] = [expRef.datasetRef.dataId['exposure'] for expRef in inputRefs.inputExp] 

235 

236 # Dictionary, keyed by expTime, with tuples containing flat 

237 # exposures and their IDs. 

238 if self.config.matchByExposureId: 

239 inputs['inputExp'] = arrangeFlatsByExpId(inputs['inputExp'], inputs['inputDims']) 

240 else: 

241 inputs['inputExp'] = arrangeFlatsByExpTime(inputs['inputExp'], inputs['inputDims']) 

242 

243 outputs = self.run(**inputs) 

244 butlerQC.put(outputs, outputRefs) 

245 

246 def run(self, inputExp, inputDims, taskMetadata): 

247 """Measure covariances from difference of flat pairs 

248 

249 Parameters 

250 ---------- 

251 inputExp : `dict` [`float`, `list` 

252 [`~lsst.pipe.base.connections.DeferredDatasetRef`]] 

253 Dictionary that groups references to flat-field exposures that 

254 have the same exposure time (seconds), or that groups them 

255 sequentially by their exposure id. 

256 inputDims : `list` 

257 List of exposure IDs. 

258 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`] 

259 List of exposures metadata from ISR. 

260 

261 Returns 

262 ------- 

263 results : `lsst.pipe.base.Struct` 

264 The resulting Struct contains: 

265 

266 ``outputCovariances`` 

267 A list containing the per-pair PTC measurements (`list` 

268 [`lsst.ip.isr.PhotonTransferCurveDataset`]) 

269 """ 

270 # inputExp.values() returns a view, which we turn into a list. We then 

271 # access the first exposure-ID tuple to get the detector. 

272 # The first "get()" retrieves the exposure from the exposure reference. 

273 detector = list(inputExp.values())[0][0][0].get(component='detector') 

274 detNum = detector.getId() 

275 amps = detector.getAmplifiers() 

276 ampNames = [amp.getName() for amp in amps] 

277 

278 # Each amp may have a different min and max ADU signal 

279 # specified in the config. 

280 maxMeanSignalDict = {ampName: 1e6 for ampName in ampNames} 

281 minMeanSignalDict = {ampName: 0.0 for ampName in ampNames} 

282 for ampName in ampNames: 

283 if 'ALL_AMPS' in self.config.maxMeanSignal: 

284 maxMeanSignalDict[ampName] = self.config.maxMeanSignal['ALL_AMPS'] 

285 elif ampName in self.config.maxMeanSignal: 

286 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[ampName] 

287 

288 if 'ALL_AMPS' in self.config.minMeanSignal: 

289 minMeanSignalDict[ampName] = self.config.minMeanSignal['ALL_AMPS'] 

290 elif ampName in self.config.minMeanSignal: 

291 minMeanSignalDict[ampName] = self.config.minMeanSignal[ampName] 

292 # These are the column names for `tupleRows` below. 

293 tags = [('mu', '<f8'), ('afwVar', '<f8'), ('i', '<i8'), ('j', '<i8'), ('var', '<f8'), 

294 ('cov', '<f8'), ('npix', '<i8'), ('ext', '<i8'), ('expTime', '<f8'), ('ampName', '<U3')] 

295 # Create a dummy ptcDataset. Dummy datasets will be 

296 # used to ensure that the number of output and input 

297 # dimensions match. 

298 dummyPtcDataset = PhotonTransferCurveDataset(ampNames, 'DUMMY', 

299 self.config.maximumRangeCovariancesAstier) 

300 

301 readNoiseDict = {ampName: 0.0 for ampName in ampNames} 

302 for ampName in ampNames: 

303 # Initialize amps of `dummyPtcDatset`. 

304 dummyPtcDataset.setAmpValues(ampName) 

305 # Overscan readnoise from post-ISR exposure metadata. 

306 # It will be used to estimate the gain from a pair of flats. 

307 readNoiseDict[ampName] = self.getReadNoiseFromMetadata(taskMetadata, ampName) 

308 

309 # Output list with PTC datasets. 

310 partialPtcDatasetList = [] 

311 # The number of output references needs to match that of input 

312 # references: initialize outputlist with dummy PTC datasets. 

313 for i in range(len(inputDims)): 

314 partialPtcDatasetList.append(dummyPtcDataset) 

315 

316 if self.config.numEdgeSuspect > 0: 

317 isrTask = IsrTask() 

318 self.log.info("Masking %d pixels from the edges of all exposures as SUSPECT.", 

319 self.config.numEdgeSuspect) 

320 

321 for expTime in inputExp: 

322 exposures = inputExp[expTime] 

323 if len(exposures) == 1: 

324 self.log.warning("Only one exposure found at expTime %f. Dropping exposure %d.", 

325 expTime, exposures[0][1]) 

326 continue 

327 else: 

328 # Only use the first two exposures at expTime. Each 

329 # element is a tuple (exposure, expId) 

330 expRef1, expId1 = exposures[0] 

331 expRef2, expId2 = exposures[1] 

332 # use get() to obtain `lsst.afw.image.Exposure` 

333 exp1, exp2 = expRef1.get(), expRef2.get() 

334 

335 if len(exposures) > 2: 

336 self.log.warning("Already found 2 exposures at expTime %f. Ignoring exposures: %s", 

337 expTime, ", ".join(str(i[1]) for i in exposures[2:])) 

338 # Mask pixels at the edge of the detector or of each amp 

339 if self.config.numEdgeSuspect > 0: 

340 isrTask.maskEdges(exp1, numEdgePixels=self.config.numEdgeSuspect, 

341 maskPlane="SUSPECT", level=self.config.edgeMaskLevel) 

342 isrTask.maskEdges(exp2, numEdgePixels=self.config.numEdgeSuspect, 

343 maskPlane="SUSPECT", level=self.config.edgeMaskLevel) 

344 

345 nAmpsNan = 0 

346 partialPtcDataset = PhotonTransferCurveDataset(ampNames, 'PARTIAL', 

347 self.config.maximumRangeCovariancesAstier) 

348 for ampNumber, amp in enumerate(detector): 

349 ampName = amp.getName() 

350 if self.config.detectorMeasurementRegion == 'AMP': 

351 region = amp.getBBox() 

352 elif self.config.detectorMeasurementRegion == 'FULL': 

353 region = None 

354 

355 # Get masked image regions, masking planes, statistic control 

356 # objects, and clipped means. Calculate once to reuse in 

357 # `measureMeanVarCov` and `getGainFromFlatPair`. 

358 im1Area, im2Area, imStatsCtrl, mu1, mu2 = self.getImageAreasMasksStats(exp1, exp2, 

359 region=region) 

360 

361 # `measureMeanVarCov` is the function that measures 

362 # the variance and covariances from a region of 

363 # the difference image of two flats at the same 

364 # exposure time. The variable `covAstier` that is 

365 # returned is of the form: 

366 # [(i, j, var (cov[0,0]), cov, npix) for (i,j) in 

367 # {maxLag, maxLag}^2]. 

368 muDiff, varDiff, covAstier = self.measureMeanVarCov(im1Area, im2Area, imStatsCtrl, mu1, mu2) 

369 # Estimate the gain from the flat pair 

370 if self.config.doGain: 

371 gain = self.getGainFromFlatPair(im1Area, im2Area, imStatsCtrl, mu1, mu2, 

372 correctionType=self.config.gainCorrectionType, 

373 readNoise=readNoiseDict[ampName]) 

374 else: 

375 gain = np.nan 

376 

377 # Correction factor for bias introduced by sigma 

378 # clipping. 

379 # Function returns 1/sqrt(varFactor), so it needs 

380 # to be squared. varDiff is calculated via 

381 # afwMath.VARIANCECLIP. 

382 varFactor = sigmaClipCorrection(self.config.nSigmaClipPtc)**2 

383 varDiff *= varFactor 

384 

385 expIdMask = True 

386 # Mask data point at this mean signal level if 

387 # the signal, variance, or covariance calculations 

388 # from `measureMeanVarCov` resulted in NaNs. 

389 if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None): 

390 self.log.warning("NaN mean or var, or None cov in amp %s in exposure pair %d, %d of " 

391 "detector %d.", ampName, expId1, expId2, detNum) 

392 nAmpsNan += 1 

393 expIdMask = False 

394 covArray = np.full((1, self.config.maximumRangeCovariancesAstier, 

395 self.config.maximumRangeCovariancesAstier), np.nan) 

396 covSqrtWeights = np.full_like(covArray, np.nan) 

397 

398 # Mask data point if it is outside of the 

399 # specified mean signal range. 

400 if (muDiff <= minMeanSignalDict[ampName]) or (muDiff >= maxMeanSignalDict[ampName]): 

401 expIdMask = False 

402 

403 if covAstier is not None: 

404 # Turn the tuples with the measured information 

405 # into covariance arrays. 

406 # covrow: (i, j, var (cov[0,0]), cov, npix) 

407 tupleRows = [(muDiff, varDiff) + covRow + (ampNumber, expTime, 

408 ampName) for covRow in covAstier] 

409 tempStructArray = np.array(tupleRows, dtype=tags) 

410 covArray, vcov, _ = self.makeCovArray(tempStructArray, 

411 self.config.maximumRangeCovariancesAstier) 

412 covSqrtWeights = np.nan_to_num(1./np.sqrt(vcov)) 

413 

414 # Correct covArray for sigma clipping: 

415 # 1) Apply varFactor twice for the whole covariance matrix 

416 covArray *= varFactor**2 

417 # 2) But, only once for the variance element of the 

418 # matrix, covArray[0,0] (so divide one factor out). 

419 covArray[0, 0] /= varFactor 

420 

421 partialPtcDataset.setAmpValues(ampName, rawExpTime=[expTime], rawMean=[muDiff], 

422 rawVar=[varDiff], inputExpIdPair=[(expId1, expId2)], 

423 expIdMask=[expIdMask], covArray=covArray, 

424 covSqrtWeights=covSqrtWeights, gain=gain, 

425 noise=readNoiseDict[ampName]) 

426 # Use location of exp1 to save PTC dataset from (exp1, exp2) pair. 

427 # Below, np.where(expId1 == np.array(inputDims)) returns a tuple 

428 # with a single-element array, so [0][0] 

429 # is necessary to extract the required index. 

430 datasetIndex = np.where(expId1 == np.array(inputDims))[0][0] 

431 # `partialPtcDatasetList` is a list of 

432 # `PhotonTransferCurveDataset` objects. Some of them 

433 # will be dummy datasets (to match length of input 

434 # and output references), and the rest will have 

435 # datasets with the mean signal, variance, and 

436 # covariance measurements at a given exposure 

437 # time. The next ppart of the PTC-measurement 

438 # pipeline, `solve`, will take this list as input, 

439 # and assemble the measurements in the datasets 

440 # in an addecuate manner for fitting a PTC 

441 # model. 

442 partialPtcDataset.updateMetadata(setDate=True, detector=detector) 

443 partialPtcDatasetList[datasetIndex] = partialPtcDataset 

444 

445 if nAmpsNan == len(ampNames): 

446 msg = f"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}." 

447 self.log.warning(msg) 

448 return pipeBase.Struct( 

449 outputCovariances=partialPtcDatasetList, 

450 ) 

451 

452 def makeCovArray(self, inputTuple, maxRangeFromTuple): 

453 """Make covariances array from tuple. 

454 

455 Parameters 

456 ---------- 

457 inputTuple : `numpy.ndarray` 

458 Structured array with rows with at least 

459 (mu, afwVar, cov, var, i, j, npix), where: 

460 mu : `float` 

461 0.5*(m1 + m2), where mu1 is the mean value of flat1 

462 and mu2 is the mean value of flat2. 

463 afwVar : `float` 

464 Variance of difference flat, calculated with afw. 

465 cov : `float` 

466 Covariance value at lag(i, j) 

467 var : `float` 

468 Variance(covariance value at lag(0, 0)) 

469 i : `int` 

470 Lag in dimension "x". 

471 j : `int` 

472 Lag in dimension "y". 

473 npix : `int` 

474 Number of pixels used for covariance calculation. 

475 maxRangeFromTuple : `int` 

476 Maximum range to select from tuple. 

477 

478 Returns 

479 ------- 

480 cov : `numpy.array` 

481 Covariance arrays, indexed by mean signal mu. 

482 vCov : `numpy.array` 

483 Variance of the [co]variance arrays, indexed by mean signal mu. 

484 muVals : `numpy.array` 

485 List of mean signal values. 

486 """ 

487 if maxRangeFromTuple is not None: 

488 cut = (inputTuple['i'] < maxRangeFromTuple) & (inputTuple['j'] < maxRangeFromTuple) 

489 cutTuple = inputTuple[cut] 

490 else: 

491 cutTuple = inputTuple 

492 # increasing mu order, so that we can group measurements with the 

493 # same mu 

494 muTemp = cutTuple['mu'] 

495 ind = np.argsort(muTemp) 

496 

497 cutTuple = cutTuple[ind] 

498 # should group measurements on the same image pairs(same average) 

499 mu = cutTuple['mu'] 

500 xx = np.hstack(([mu[0]], mu)) 

501 delta = xx[1:] - xx[:-1] 

502 steps, = np.where(delta > 0) 

503 ind = np.zeros_like(mu, dtype=int) 

504 ind[steps] = 1 

505 ind = np.cumsum(ind) # this acts as an image pair index. 

506 # now fill the 3-d cov array(and variance) 

507 muVals = np.array(np.unique(mu)) 

508 i = cutTuple['i'].astype(int) 

509 j = cutTuple['j'].astype(int) 

510 c = 0.5*cutTuple['cov'] 

511 n = cutTuple['npix'] 

512 v = 0.5*cutTuple['var'] 

513 # book and fill 

514 cov = np.ndarray((len(muVals), np.max(i)+1, np.max(j)+1)) 

515 var = np.zeros_like(cov) 

516 cov[ind, i, j] = c 

517 var[ind, i, j] = v**2/n 

518 var[:, 0, 0] *= 2 # var(v) = 2*v**2/N 

519 

520 return cov, var, muVals 

521 

522 def measureMeanVarCov(self, im1Area, im2Area, imStatsCtrl, mu1, mu2): 

523 """Calculate the mean of each of two exposures and the variance 

524 and covariance of their difference. The variance is calculated 

525 via afwMath, and the covariance via the methods in Astier+19 

526 (appendix A). In theory, var = covariance[0,0]. This should 

527 be validated, and in the future, we may decide to just keep 

528 one (covariance). 

529 

530 Parameters 

531 ---------- 

532 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

533 Masked image from exposure 1. 

534 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

535 Masked image from exposure 2. 

536 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

537 Statistics control object. 

538 mu1: `float` 

539 Clipped mean of im1Area (ADU). 

540 mu2: `float` 

541 Clipped mean of im2Area (ADU). 

542 

543 Returns 

544 ------- 

545 mu : `float` or `NaN` 

546 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means 

547 of the regions in both exposures. If either mu1 or m2 are 

548 NaN's, the returned value is NaN. 

549 varDiff : `float` or `NaN` 

550 Half of the clipped variance of the difference of the 

551 regions inthe two input exposures. If either mu1 or m2 are 

552 NaN's, the returned value is NaN. 

553 covDiffAstier : `list` or `NaN` 

554 List with tuples of the form (dx, dy, var, cov, npix), where: 

555 dx : `int` 

556 Lag in x 

557 dy : `int` 

558 Lag in y 

559 var : `float` 

560 Variance at (dx, dy). 

561 cov : `float` 

562 Covariance at (dx, dy). 

563 nPix : `int` 

564 Number of pixel pairs used to evaluate var and cov. 

565 

566 If either mu1 or m2 are NaN's, the returned value is NaN. 

567 """ 

568 if np.isnan(mu1) or np.isnan(mu2): 

569 self.log.warning("Mean of amp in image 1 or 2 is NaN: %f, %f.", mu1, mu2) 

570 return np.nan, np.nan, None 

571 mu = 0.5*(mu1 + mu2) 

572 

573 # Take difference of pairs 

574 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2)) 

575 temp = im2Area.clone() 

576 temp *= mu1 

577 diffIm = im1Area.clone() 

578 diffIm *= mu2 

579 diffIm -= temp 

580 diffIm /= mu 

581 

582 # Variance calculation via afwMath 

583 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue()) 

584 

585 # Covariances calculations 

586 # Get the pixels that were not clipped 

587 varClip = afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue() 

588 meanClip = afwMath.makeStatistics(diffIm, afwMath.MEANCLIP, imStatsCtrl).getValue() 

589 cut = meanClip + self.config.nSigmaClipPtc*np.sqrt(varClip) 

590 unmasked = np.where(np.fabs(diffIm.image.array) <= cut, 1, 0) 

591 

592 # Get the pixels in the mask planes of the difference image 

593 # that were ignored by the clipping algorithm 

594 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0) 

595 # Combine the two sets of pixels ('1': use; '0': don't use) 

596 # into a final weight matrix to be used in the covariance 

597 # calculations below. 

598 w = unmasked*wDiff 

599 

600 if np.sum(w) < self.config.minNumberGoodPixelsForCovariance: 

601 self.log.warning("Number of good points for covariance calculation (%s) is less " 

602 "(than threshold %s)", np.sum(w), self.config.minNumberGoodPixelsForCovariance) 

603 return np.nan, np.nan, None 

604 

605 maxRangeCov = self.config.maximumRangeCovariancesAstier 

606 

607 # Calculate covariances via FFT. 

608 shapeDiff = np.array(diffIm.image.array.shape) 

609 # Calculate the sizes of FFT dimensions. 

610 s = shapeDiff + maxRangeCov 

611 tempSize = np.array(np.log(s)/np.log(2.)).astype(int) 

612 fftSize = np.array(2**(tempSize+1)).astype(int) 

613 fftShape = (fftSize[0], fftSize[1]) 

614 c = CovFastFourierTransform(diffIm.image.array, w, fftShape, maxRangeCov) 

615 # np.sum(w) is the same as npix[0][0] returned in covDiffAstier 

616 covDiffAstier = c.reportCovFastFourierTransform(maxRangeCov) 

617 

618 # Compare Cov[0,0] and afwMath.VARIANCECLIP covDiffAstier[0] 

619 # is the Cov[0,0] element, [3] is the variance, and there's a 

620 # factor of 0.5 difference with afwMath.VARIANCECLIP. 

621 thresholdPercentage = self.config.thresholdDiffAfwVarVsCov00 

622 fractionalDiff = 100*np.fabs(1 - varDiff/(covDiffAstier[0][3]*0.5)) 

623 if fractionalDiff >= thresholdPercentage: 

624 self.log.warning("Absolute fractional difference between afwMatch.VARIANCECLIP and Cov[0,0] " 

625 "is more than %f%%: %f", thresholdPercentage, fractionalDiff) 

626 

627 return mu, varDiff, covDiffAstier 

628 

629 def getImageAreasMasksStats(self, exposure1, exposure2, region=None): 

630 """Get image areas in a region as well as masks and statistic objects. 

631 

632 Parameters 

633 ---------- 

634 exposure1 : `lsst.afw.image.exposure.ExposureF` 

635 First exposure of flat field pair. 

636 exposure2 : `lsst.afw.image.exposure.ExposureF` 

637 Second exposure of flat field pair. 

638 region : `lsst.geom.Box2I`, optional 

639 Region of each exposure where to perform the calculations 

640 (e.g, an amplifier). 

641 

642 Returns 

643 ------- 

644 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

645 Masked image from exposure 1. 

646 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

647 Masked image from exposure 2. 

648 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

649 Statistics control object. 

650 mu1: `float` 

651 Clipped mean of im1Area (ADU). 

652 mu2: `float` 

653 Clipped mean of im2Area (ADU). 

654 """ 

655 if region is not None: 

656 im1Area = exposure1.maskedImage[region] 

657 im2Area = exposure2.maskedImage[region] 

658 else: 

659 im1Area = exposure1.maskedImage 

660 im2Area = exposure2.maskedImage 

661 

662 if self.config.binSize > 1: 

663 im1Area = afwMath.binImage(im1Area, self.config.binSize) 

664 im2Area = afwMath.binImage(im2Area, self.config.binSize) 

665 

666 # Get mask planes and construct statistics control object from one 

667 # of the exposures 

668 imMaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList) 

669 imStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc, 

670 self.config.nIterSigmaClipPtc, 

671 imMaskVal) 

672 imStatsCtrl.setNanSafe(True) 

673 imStatsCtrl.setAndMask(imMaskVal) 

674 

675 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, imStatsCtrl).getValue() 

676 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, imStatsCtrl).getValue() 

677 

678 return (im1Area, im2Area, imStatsCtrl, mu1, mu2) 

679 

680 def getGainFromFlatPair(self, im1Area, im2Area, imStatsCtrl, mu1, mu2, 

681 correctionType='NONE', readNoise=None): 

682 """Estimate the gain from a single pair of flats. 

683 

684 The basic premise is 1/g = <(I1 - I2)^2/(I1 + I2)> = 1/const, 

685 where I1 and I2 correspond to flats 1 and 2, respectively. 

686 Corrections for the variable QE and the read-noise are then 

687 made following the derivation in Robert Lupton's forthcoming 

688 book, which gets 

689 

690 1/g = <(I1 - I2)^2/(I1 + I2)> - 1/mu(sigma^2 - 1/2g^2). 

691 

692 This is a quadratic equation, whose solutions are given by: 

693 

694 g = mu +/- sqrt(2*sigma^2 - 2*const*mu + mu^2)/(2*const*mu*2 

695 - 2*sigma^2) 

696 

697 where 'mu' is the average signal level and 'sigma' is the 

698 amplifier's readnoise. The positive solution will be used. 

699 The way the correction is applied depends on the value 

700 supplied for correctionType. 

701 

702 correctionType is one of ['NONE', 'SIMPLE' or 'FULL'] 

703 'NONE' : uses the 1/g = <(I1 - I2)^2/(I1 + I2)> formula. 

704 'SIMPLE' : uses the gain from the 'NONE' method for the 

705 1/2g^2 term. 

706 'FULL' : solves the full equation for g, discarding the 

707 non-physical solution to the resulting quadratic. 

708 

709 Parameters 

710 ---------- 

711 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

712 Masked image from exposure 1. 

713 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

714 Masked image from exposure 2. 

715 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

716 Statistics control object. 

717 mu1: `float` 

718 Clipped mean of im1Area (ADU). 

719 mu2: `float` 

720 Clipped mean of im2Area (ADU). 

721 correctionType : `str`, optional 

722 The correction applied, one of ['NONE', 'SIMPLE', 'FULL'] 

723 readNoise : `float`, optional 

724 Amplifier readout noise (ADU). 

725 

726 Returns 

727 ------- 

728 gain : `float` 

729 Gain, in e/ADU. 

730 

731 Raises 

732 ------ 

733 RuntimeError 

734 Raise if `correctionType` is not one of 'NONE', 

735 'SIMPLE', or 'FULL'. 

736 """ 

737 if correctionType not in ['NONE', 'SIMPLE', 'FULL']: 

738 raise RuntimeError("Unknown correction type: %s" % correctionType) 

739 

740 if correctionType != 'NONE' and readNoise is None: 

741 self.log.warning("'correctionType' in 'getGainFromFlatPair' is %s, " 

742 "but 'readNoise' is 'None'. Setting 'correctionType' " 

743 "to 'NONE', so a gain value will be estimated without " 

744 "corrections." % correctionType) 

745 correctionType = 'NONE' 

746 

747 mu = 0.5*(mu1 + mu2) 

748 

749 # ratioIm = (I1 - I2)^2 / (I1 + I2) 

750 temp = im2Area.clone() 

751 ratioIm = im1Area.clone() 

752 ratioIm -= temp 

753 ratioIm *= ratioIm 

754 

755 # Sum of pairs 

756 sumIm = im1Area.clone() 

757 sumIm += temp 

758 

759 ratioIm /= sumIm 

760 

761 const = afwMath.makeStatistics(ratioIm, afwMath.MEAN, imStatsCtrl).getValue() 

762 gain = 1. / const 

763 

764 if correctionType == 'SIMPLE': 

765 gain = 1/(const - (1/mu)*(readNoise**2 - (1/2*gain**2))) 

766 elif correctionType == 'FULL': 

767 root = np.sqrt(mu**2 - 2*mu*const + 2*readNoise**2) 

768 denom = (2*const*mu - 2*readNoise**2) 

769 positiveSolution = (root + mu)/denom 

770 gain = positiveSolution 

771 

772 return gain 

773 

774 def getReadNoiseFromMetadata(self, taskMetadata, ampName): 

775 """Gets readout noise for an amp from ISR metadata. 

776 

777 Parameters 

778 ---------- 

779 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`] 

780 List of exposures metadata from ISR. 

781 ampName : `str` 

782 Amplifier name. 

783 

784 Returns 

785 ------- 

786 readNoise : `float` 

787 Median of the overscan readnoise in the 

788 post-ISR metadata of the input exposures (ADU). 

789 Returns 'None' if the median could not be calculated. 

790 """ 

791 # Empirical readout noise [ADU] measured from an 

792 # overscan-subtracted overscan during ISR. 

793 expectedKey = f"RESIDUAL STDEV {ampName}" 

794 

795 readNoises = [] 

796 for expMetadata in taskMetadata: 

797 if 'isr' in expMetadata: 

798 overscanNoise = expMetadata['isr'][expectedKey] 

799 else: 

800 continue 

801 readNoises.append(overscanNoise) 

802 

803 if len(readNoises): 

804 readNoise = np.median(np.array(readNoises)) 

805 else: 

806 self.log.warning("Median readout noise from ISR metadata for amp %s " 

807 "could not be calculated." % ampName) 

808 readNoise = None 

809 

810 return readNoise