Coverage for python/lsst/cp/pipe/ptc/cpExtractPtcTask.py: 11%

260 statements  

« prev     ^ index     » next       coverage.py v7.2.3, created at 2023-04-28 04:59 -0700

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22import numpy as np 

23 

24import lsst.afw.math as afwMath 

25import lsst.pex.config as pexConfig 

26import lsst.pipe.base as pipeBase 

27from lsst.cp.pipe.utils import (arrangeFlatsByExpTime, arrangeFlatsByExpId, 

28 arrangeFlatsByExpFlux, sigmaClipCorrection, 

29 CovFastFourierTransform) 

30 

31import lsst.pipe.base.connectionTypes as cT 

32 

33from lsst.ip.isr import PhotonTransferCurveDataset 

34from lsst.ip.isr import IsrTask 

35 

36__all__ = ['PhotonTransferCurveExtractConfig', 'PhotonTransferCurveExtractTask'] 

37 

38 

39class PhotonTransferCurveExtractConnections(pipeBase.PipelineTaskConnections, 

40 dimensions=("instrument", "detector")): 

41 

42 inputExp = cT.Input( 

43 name="ptcInputExposurePairs", 

44 doc="Input post-ISR processed exposure pairs (flats) to" 

45 "measure covariances from.", 

46 storageClass="Exposure", 

47 dimensions=("instrument", "exposure", "detector"), 

48 multiple=True, 

49 deferLoad=True, 

50 ) 

51 taskMetadata = cT.Input( 

52 name="isr_metadata", 

53 doc="Input task metadata to extract statistics from.", 

54 storageClass="TaskMetadata", 

55 dimensions=("instrument", "exposure", "detector"), 

56 multiple=True, 

57 ) 

58 outputCovariances = cT.Output( 

59 name="ptcCovariances", 

60 doc="Extracted flat (co)variances.", 

61 storageClass="PhotonTransferCurveDataset", 

62 dimensions=("instrument", "exposure", "detector"), 

63 isCalibration=True, 

64 multiple=True, 

65 ) 

66 

67 

68class PhotonTransferCurveExtractConfig(pipeBase.PipelineTaskConfig, 

69 pipelineConnections=PhotonTransferCurveExtractConnections): 

70 """Configuration for the measurement of covariances from flats. 

71 """ 

72 matchExposuresType = pexConfig.ChoiceField( 

73 dtype=str, 

74 doc="Match input exposures by time, flux, or expId", 

75 default='TIME', 

76 allowed={ 

77 "TIME": "Match exposures by exposure time.", 

78 "FLUX": "Match exposures by target flux. Use header keyword" 

79 " in matchExposuresByFluxKeyword to find the flux.", 

80 "EXPID": "Match exposures by exposure ID." 

81 } 

82 ) 

83 matchExposuresByFluxKeyword = pexConfig.Field( 

84 dtype=str, 

85 doc="Header keyword for flux if matchExposuresType is FLUX.", 

86 default='CCOBFLUX', 

87 ) 

88 maximumRangeCovariancesAstier = pexConfig.Field( 

89 dtype=int, 

90 doc="Maximum range of covariances as in Astier+19", 

91 default=8, 

92 ) 

93 binSize = pexConfig.Field( 

94 dtype=int, 

95 doc="Bin the image by this factor in both dimensions.", 

96 default=1, 

97 ) 

98 minMeanSignal = pexConfig.DictField( 

99 keytype=str, 

100 itemtype=float, 

101 doc="Minimum values (inclusive) of mean signal (in ADU) per amp to use." 

102 " The same cut is applied to all amps if this parameter [`dict`] is passed as " 

103 " {'ALL_AMPS': value}", 

104 default={'ALL_AMPS': 0.0}, 

105 ) 

106 maxMeanSignal = pexConfig.DictField( 

107 keytype=str, 

108 itemtype=float, 

109 doc="Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp." 

110 " The same cut is applied to all amps if this dictionary is of the form" 

111 " {'ALL_AMPS': value}", 

112 default={'ALL_AMPS': 1e6}, 

113 ) 

114 maskNameList = pexConfig.ListField( 

115 dtype=str, 

116 doc="Mask list to exclude from statistics calculations.", 

117 default=['SUSPECT', 'BAD', 'NO_DATA', 'SAT'], 

118 ) 

119 nSigmaClipPtc = pexConfig.Field( 

120 dtype=float, 

121 doc="Sigma cut for afwMath.StatisticsControl()", 

122 default=5.5, 

123 ) 

124 nIterSigmaClipPtc = pexConfig.Field( 

125 dtype=int, 

126 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()", 

127 default=3, 

128 ) 

129 minNumberGoodPixelsForCovariance = pexConfig.Field( 

130 dtype=int, 

131 doc="Minimum number of acceptable good pixels per amp to calculate the covariances (via FFT or" 

132 " direclty).", 

133 default=10000, 

134 ) 

135 thresholdDiffAfwVarVsCov00 = pexConfig.Field( 

136 dtype=float, 

137 doc="If the absolute fractional differece between afwMath.VARIANCECLIP and Cov00 " 

138 "for a region of a difference image is greater than this threshold (percentage), " 

139 "a warning will be issued.", 

140 default=1., 

141 ) 

142 detectorMeasurementRegion = pexConfig.ChoiceField( 

143 dtype=str, 

144 doc="Region of each exposure where to perform the calculations (amplifier or full image).", 

145 default='AMP', 

146 allowed={ 

147 "AMP": "Amplifier of the detector.", 

148 "FULL": "Full image." 

149 } 

150 ) 

151 numEdgeSuspect = pexConfig.Field( 

152 dtype=int, 

153 doc="Number of edge pixels to be flagged as untrustworthy.", 

154 default=0, 

155 ) 

156 edgeMaskLevel = pexConfig.ChoiceField( 

157 dtype=str, 

158 doc="Mask edge pixels in which coordinate frame: DETECTOR or AMP?", 

159 default="DETECTOR", 

160 allowed={ 

161 'DETECTOR': 'Mask only the edges of the full detector.', 

162 'AMP': 'Mask edges of each amplifier.', 

163 }, 

164 ) 

165 doGain = pexConfig.Field( 

166 dtype=bool, 

167 doc="Calculate a gain per input flat pair.", 

168 default=True, 

169 ) 

170 gainCorrectionType = pexConfig.ChoiceField( 

171 dtype=str, 

172 doc="Correction type for the gain.", 

173 default='FULL', 

174 allowed={ 

175 'NONE': 'No correction.', 

176 'SIMPLE': 'First order correction.', 

177 'FULL': 'Second order correction.' 

178 } 

179 ) 

180 

181 

182class PhotonTransferCurveExtractTask(pipeBase.PipelineTask): 

183 """Task to measure covariances from flat fields. 

184 

185 This task receives as input a list of flat-field images 

186 (flats), and sorts these flats in pairs taken at the 

187 same time (the task will raise if there is one one flat 

188 at a given exposure time, and it will discard extra flats if 

189 there are more than two per exposure time). This task measures 

190 the mean, variance, and covariances from a region (e.g., 

191 an amplifier) of the difference image of the two flats with 

192 the same exposure time (alternatively, all input images could have 

193 the same exposure time but their flux changed). 

194 

195 The variance is calculated via afwMath, and the covariance 

196 via the methods in Astier+19 (appendix A). In theory, 

197 var = covariance[0,0]. This should be validated, and in the 

198 future, we may decide to just keep one (covariance). 

199 At this moment, if the two values differ by more than the value 

200 of `thresholdDiffAfwVarVsCov00` (default: 1%), a warning will 

201 be issued. 

202 

203 The measured covariances at a given exposure time (along with 

204 other quantities such as the mean) are stored in a PTC dataset 

205 object (`~lsst.ip.isr.PhotonTransferCurveDataset`), which gets 

206 partially filled at this stage (the remainder of the attributes 

207 of the dataset will be filled after running the second task of 

208 the PTC-measurement pipeline, `~PhotonTransferCurveSolveTask`). 

209 

210 The number of partially-filled 

211 `~lsst.ip.isr.PhotonTransferCurveDataset` objects will be less 

212 than the number of input exposures because the task combines 

213 input flats in pairs. However, it is required at this moment 

214 that the number of input dimensions matches 

215 bijectively the number of output dimensions. Therefore, a number 

216 of "dummy" PTC datasets are inserted in the output list. This 

217 output list will then be used as input of the next task in the 

218 PTC-measurement pipeline, `PhotonTransferCurveSolveTask`, 

219 which will assemble the multiple `PhotonTransferCurveDataset` 

220 objects into a single one in order to fit the measured covariances 

221 as a function of flux to one of three models 

222 (see `PhotonTransferCurveSolveTask` for details). 

223 

224 Reference: Astier+19: "The Shape of the Photon Transfer Curve of CCD 

225 sensors", arXiv:1905.08677. 

226 """ 

227 

228 ConfigClass = PhotonTransferCurveExtractConfig 

229 _DefaultName = 'cpPtcExtract' 

230 

231 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

232 """Ensure that the input and output dimensions are passed along. 

233 

234 Parameters 

235 ---------- 

236 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext` 

237 Butler to operate on. 

238 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection` 

239 Input data refs to load. 

240 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection` 

241 Output data refs to persist. 

242 """ 

243 inputs = butlerQC.get(inputRefs) 

244 # Ids of input list of exposure references 

245 # (deferLoad=True in the input connections) 

246 inputs['inputDims'] = [expRef.datasetRef.dataId['exposure'] for expRef in inputRefs.inputExp] 

247 

248 # Dictionary, keyed by expTime (or expFlux or expId), with tuples 

249 # containing flat exposures and their IDs. 

250 matchType = self.config.matchExposuresType 

251 if matchType == 'TIME': 

252 inputs['inputExp'] = arrangeFlatsByExpTime(inputs['inputExp'], inputs['inputDims']) 

253 elif matchType == 'FLUX': 

254 inputs['inputExp'] = arrangeFlatsByExpFlux(inputs['inputExp'], inputs['inputDims'], 

255 self.config.matchExposuresByFluxKeyword) 

256 else: 

257 inputs['inputExp'] = arrangeFlatsByExpId(inputs['inputExp'], inputs['inputDims']) 

258 

259 outputs = self.run(**inputs) 

260 outputs = self._guaranteeOutputs(inputs['inputDims'], outputs, outputRefs) 

261 butlerQC.put(outputs, outputRefs) 

262 

263 def _guaranteeOutputs(self, inputDims, outputs, outputRefs): 

264 """Ensure that all outputRefs have a matching output, and if they do 

265 not, fill the output with dummy PTC datasets. 

266 

267 Parameters 

268 ---------- 

269 inputDims : `dict` [`str`, `int`] 

270 Input exposure dimensions. 

271 outputs : `lsst.pipe.base.Struct` 

272 Outputs from the ``run`` method. Contains the entry: 

273 

274 ``outputCovariances`` 

275 Output PTC datasets (`list` [`lsst.ip.isr.IsrCalib`]) 

276 outputRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection` 

277 Container with all of the outputs expected to be generated. 

278 

279 Returns 

280 ------- 

281 outputs : `lsst.pipe.base.Struct` 

282 Dummy dataset padded version of the input ``outputs`` with 

283 the same entries. 

284 """ 

285 newCovariances = [] 

286 for ref in outputRefs.outputCovariances: 

287 outputExpId = ref.dataId['exposure'] 

288 if outputExpId in inputDims: 

289 entry = inputDims.index(outputExpId) 

290 newCovariances.append(outputs.outputCovariances[entry]) 

291 else: 

292 newPtc = PhotonTransferCurveDataset(['no amp'], 'DUMMY', 1) 

293 newPtc.setAmpValuesPartialDataset('no amp') 

294 newCovariances.append(newPtc) 

295 return pipeBase.Struct(outputCovariances=newCovariances) 

296 

297 def run(self, inputExp, inputDims, taskMetadata): 

298 

299 """Measure covariances from difference of flat pairs 

300 

301 Parameters 

302 ---------- 

303 inputExp : `dict` [`float`, `list` 

304 [`~lsst.pipe.base.connections.DeferredDatasetRef`]] 

305 Dictionary that groups references to flat-field exposures that 

306 have the same exposure time (seconds), or that groups them 

307 sequentially by their exposure id. 

308 inputDims : `list` 

309 List of exposure IDs. 

310 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`] 

311 List of exposures metadata from ISR. 

312 

313 Returns 

314 ------- 

315 results : `lsst.pipe.base.Struct` 

316 The resulting Struct contains: 

317 

318 ``outputCovariances`` 

319 A list containing the per-pair PTC measurements (`list` 

320 [`lsst.ip.isr.PhotonTransferCurveDataset`]) 

321 """ 

322 # inputExp.values() returns a view, which we turn into a list. We then 

323 # access the first exposure-ID tuple to get the detector. 

324 # The first "get()" retrieves the exposure from the exposure reference. 

325 detector = list(inputExp.values())[0][0][0].get(component='detector') 

326 detNum = detector.getId() 

327 amps = detector.getAmplifiers() 

328 ampNames = [amp.getName() for amp in amps] 

329 

330 # Each amp may have a different min and max ADU signal 

331 # specified in the config. 

332 maxMeanSignalDict = {ampName: 1e6 for ampName in ampNames} 

333 minMeanSignalDict = {ampName: 0.0 for ampName in ampNames} 

334 for ampName in ampNames: 

335 if 'ALL_AMPS' in self.config.maxMeanSignal: 

336 maxMeanSignalDict[ampName] = self.config.maxMeanSignal['ALL_AMPS'] 

337 elif ampName in self.config.maxMeanSignal: 

338 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[ampName] 

339 

340 if 'ALL_AMPS' in self.config.minMeanSignal: 

341 minMeanSignalDict[ampName] = self.config.minMeanSignal['ALL_AMPS'] 

342 elif ampName in self.config.minMeanSignal: 

343 minMeanSignalDict[ampName] = self.config.minMeanSignal[ampName] 

344 # These are the column names for `tupleRows` below. 

345 tags = [('mu', '<f8'), ('afwVar', '<f8'), ('i', '<i8'), ('j', '<i8'), ('var', '<f8'), 

346 ('cov', '<f8'), ('npix', '<i8'), ('ext', '<i8'), ('expTime', '<f8'), ('ampName', '<U3')] 

347 # Create a dummy ptcDataset. Dummy datasets will be 

348 # used to ensure that the number of output and input 

349 # dimensions match. 

350 dummyPtcDataset = PhotonTransferCurveDataset(ampNames, 'DUMMY', 

351 self.config.maximumRangeCovariancesAstier) 

352 for ampName in ampNames: 

353 dummyPtcDataset.setAmpValuesPartialDataset(ampName) 

354 # Get read noise. Try from the exposure, then try 

355 # taskMetadata. This adds a get() for the exposures. 

356 readNoiseLists = {} 

357 for pairIndex, expRefs in inputExp.items(): 

358 # This yields an index (exposure_time, seq_num, or flux) 

359 # and a pair of references at that index. 

360 for expRef, expId in expRefs: 

361 # This yields an exposure ref and an exposureId. 

362 exposureMetadata = expRef.get(component="metadata") 

363 metadataIndex = inputDims.index(expId) 

364 thisTaskMetadata = taskMetadata[metadataIndex] 

365 

366 for ampName in ampNames: 

367 if ampName not in readNoiseLists: 

368 readNoiseLists[ampName] = [self.getReadNoise(exposureMetadata, 

369 thisTaskMetadata, ampName)] 

370 else: 

371 readNoiseLists[ampName].append(self.getReadNoise(exposureMetadata, 

372 thisTaskMetadata, ampName)) 

373 

374 readNoiseDict = {ampName: 0.0 for ampName in ampNames} 

375 for ampName in ampNames: 

376 # Take median read noise value 

377 readNoiseDict[ampName] = np.nanmedian(readNoiseLists[ampName]) 

378 

379 # Output list with PTC datasets. 

380 partialPtcDatasetList = [] 

381 # The number of output references needs to match that of input 

382 # references: initialize outputlist with dummy PTC datasets. 

383 for i in range(len(inputDims)): 

384 partialPtcDatasetList.append(dummyPtcDataset) 

385 

386 if self.config.numEdgeSuspect > 0: 

387 isrTask = IsrTask() 

388 self.log.info("Masking %d pixels from the edges of all %ss as SUSPECT.", 

389 self.config.numEdgeSuspect, self.config.edgeMaskLevel) 

390 

391 # Depending on the value of config.matchExposuresType 

392 # 'expTime' can stand for exposure time, flux, or ID. 

393 for expTime in inputExp: 

394 exposures = inputExp[expTime] 

395 if len(exposures) == 1: 

396 self.log.warning("Only one exposure found at %s %f. Dropping exposure %d.", 

397 self.config.matchExposuresType, expTime, exposures[0][1]) 

398 continue 

399 else: 

400 # Only use the first two exposures at expTime. Each 

401 # element is a tuple (exposure, expId) 

402 expRef1, expId1 = exposures[0] 

403 expRef2, expId2 = exposures[1] 

404 # use get() to obtain `lsst.afw.image.Exposure` 

405 exp1, exp2 = expRef1.get(), expRef2.get() 

406 

407 if len(exposures) > 2: 

408 self.log.warning("Already found 2 exposures at %s %f. Ignoring exposures: %s", 

409 self.config.matchExposuresType, expTime, 

410 ", ".join(str(i[1]) for i in exposures[2:])) 

411 # Mask pixels at the edge of the detector or of each amp 

412 if self.config.numEdgeSuspect > 0: 

413 isrTask.maskEdges(exp1, numEdgePixels=self.config.numEdgeSuspect, 

414 maskPlane="SUSPECT", level=self.config.edgeMaskLevel) 

415 isrTask.maskEdges(exp2, numEdgePixels=self.config.numEdgeSuspect, 

416 maskPlane="SUSPECT", level=self.config.edgeMaskLevel) 

417 

418 nAmpsNan = 0 

419 partialPtcDataset = PhotonTransferCurveDataset(ampNames, 'PARTIAL', 

420 self.config.maximumRangeCovariancesAstier) 

421 for ampNumber, amp in enumerate(detector): 

422 ampName = amp.getName() 

423 if self.config.detectorMeasurementRegion == 'AMP': 

424 region = amp.getBBox() 

425 elif self.config.detectorMeasurementRegion == 'FULL': 

426 region = None 

427 

428 # Get masked image regions, masking planes, statistic control 

429 # objects, and clipped means. Calculate once to reuse in 

430 # `measureMeanVarCov` and `getGainFromFlatPair`. 

431 im1Area, im2Area, imStatsCtrl, mu1, mu2 = self.getImageAreasMasksStats(exp1, exp2, 

432 region=region) 

433 

434 # `measureMeanVarCov` is the function that measures 

435 # the variance and covariances from a region of 

436 # the difference image of two flats at the same 

437 # exposure time. The variable `covAstier` that is 

438 # returned is of the form: 

439 # [(i, j, var (cov[0,0]), cov, npix) for (i,j) in 

440 # {maxLag, maxLag}^2]. 

441 muDiff, varDiff, covAstier = self.measureMeanVarCov(im1Area, im2Area, imStatsCtrl, mu1, mu2) 

442 # Estimate the gain from the flat pair 

443 if self.config.doGain: 

444 gain = self.getGainFromFlatPair(im1Area, im2Area, imStatsCtrl, mu1, mu2, 

445 correctionType=self.config.gainCorrectionType, 

446 readNoise=readNoiseDict[ampName]) 

447 else: 

448 gain = np.nan 

449 

450 # Correction factor for bias introduced by sigma 

451 # clipping. 

452 # Function returns 1/sqrt(varFactor), so it needs 

453 # to be squared. varDiff is calculated via 

454 # afwMath.VARIANCECLIP. 

455 varFactor = sigmaClipCorrection(self.config.nSigmaClipPtc)**2 

456 varDiff *= varFactor 

457 

458 expIdMask = True 

459 # Mask data point at this mean signal level if 

460 # the signal, variance, or covariance calculations 

461 # from `measureMeanVarCov` resulted in NaNs. 

462 if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None): 

463 self.log.warning("NaN mean or var, or None cov in amp %s in exposure pair %d, %d of " 

464 "detector %d.", ampName, expId1, expId2, detNum) 

465 nAmpsNan += 1 

466 expIdMask = False 

467 covArray = np.full((1, self.config.maximumRangeCovariancesAstier, 

468 self.config.maximumRangeCovariancesAstier), np.nan) 

469 covSqrtWeights = np.full_like(covArray, np.nan) 

470 

471 # Mask data point if it is outside of the 

472 # specified mean signal range. 

473 if (muDiff <= minMeanSignalDict[ampName]) or (muDiff >= maxMeanSignalDict[ampName]): 

474 expIdMask = False 

475 

476 if covAstier is not None: 

477 # Turn the tuples with the measured information 

478 # into covariance arrays. 

479 # covrow: (i, j, var (cov[0,0]), cov, npix) 

480 tupleRows = [(muDiff, varDiff) + covRow + (ampNumber, expTime, 

481 ampName) for covRow in covAstier] 

482 tempStructArray = np.array(tupleRows, dtype=tags) 

483 

484 covArray, vcov, _ = self.makeCovArray(tempStructArray, 

485 self.config.maximumRangeCovariancesAstier) 

486 

487 # The returned covArray should only have 1 entry; 

488 # raise if this is not the case. 

489 if covArray.shape[0] != 1: 

490 raise RuntimeError("Serious programming error in covArray shape.") 

491 

492 covSqrtWeights = np.nan_to_num(1./np.sqrt(vcov)) 

493 

494 # Correct covArray for sigma clipping: 

495 # 1) Apply varFactor twice for the whole covariance matrix 

496 covArray *= varFactor**2 

497 # 2) But, only once for the variance element of the 

498 # matrix, covArray[0, 0, 0] (so divide one factor out). 

499 # (the first 0 is because this is a 3D array for insertion into 

500 # the combined dataset). 

501 covArray[0, 0, 0] /= varFactor 

502 

503 partialPtcDataset.setAmpValuesPartialDataset( 

504 ampName, 

505 inputExpIdPair=(expId1, expId2), 

506 rawExpTime=expTime, 

507 rawMean=muDiff, 

508 rawVar=varDiff, 

509 expIdMask=expIdMask, 

510 covariance=covArray[0, :, :], 

511 covSqrtWeights=covSqrtWeights[0, :, :], 

512 gain=gain, 

513 noise=readNoiseDict[ampName], 

514 ) 

515 

516 # Use location of exp1 to save PTC dataset from (exp1, exp2) pair. 

517 # Below, np.where(expId1 == np.array(inputDims)) returns a tuple 

518 # with a single-element array, so [0][0] 

519 # is necessary to extract the required index. 

520 datasetIndex = np.where(expId1 == np.array(inputDims))[0][0] 

521 # `partialPtcDatasetList` is a list of 

522 # `PhotonTransferCurveDataset` objects. Some of them 

523 # will be dummy datasets (to match length of input 

524 # and output references), and the rest will have 

525 # datasets with the mean signal, variance, and 

526 # covariance measurements at a given exposure 

527 # time. The next ppart of the PTC-measurement 

528 # pipeline, `solve`, will take this list as input, 

529 # and assemble the measurements in the datasets 

530 # in an addecuate manner for fitting a PTC 

531 # model. 

532 partialPtcDataset.updateMetadataFromExposures([exp1, exp2]) 

533 partialPtcDataset.updateMetadata(setDate=True, detector=detector) 

534 partialPtcDatasetList[datasetIndex] = partialPtcDataset 

535 

536 if nAmpsNan == len(ampNames): 

537 msg = f"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}." 

538 self.log.warning(msg) 

539 

540 return pipeBase.Struct( 

541 outputCovariances=partialPtcDatasetList, 

542 ) 

543 

544 def makeCovArray(self, inputTuple, maxRangeFromTuple): 

545 """Make covariances array from tuple. 

546 

547 Parameters 

548 ---------- 

549 inputTuple : `numpy.ndarray` 

550 Structured array with rows with at least 

551 (mu, afwVar, cov, var, i, j, npix), where: 

552 mu : `float` 

553 0.5*(m1 + m2), where mu1 is the mean value of flat1 

554 and mu2 is the mean value of flat2. 

555 afwVar : `float` 

556 Variance of difference flat, calculated with afw. 

557 cov : `float` 

558 Covariance value at lag(i, j) 

559 var : `float` 

560 Variance(covariance value at lag(0, 0)) 

561 i : `int` 

562 Lag in dimension "x". 

563 j : `int` 

564 Lag in dimension "y". 

565 npix : `int` 

566 Number of pixels used for covariance calculation. 

567 maxRangeFromTuple : `int` 

568 Maximum range to select from tuple. 

569 

570 Returns 

571 ------- 

572 cov : `numpy.array` 

573 Covariance arrays, indexed by mean signal mu. 

574 vCov : `numpy.array` 

575 Variance of the [co]variance arrays, indexed by mean signal mu. 

576 muVals : `numpy.array` 

577 List of mean signal values. 

578 """ 

579 if maxRangeFromTuple is not None: 

580 cut = (inputTuple['i'] < maxRangeFromTuple) & (inputTuple['j'] < maxRangeFromTuple) 

581 cutTuple = inputTuple[cut] 

582 else: 

583 cutTuple = inputTuple 

584 # increasing mu order, so that we can group measurements with the 

585 # same mu 

586 muTemp = cutTuple['mu'] 

587 ind = np.argsort(muTemp) 

588 

589 cutTuple = cutTuple[ind] 

590 # should group measurements on the same image pairs(same average) 

591 mu = cutTuple['mu'] 

592 xx = np.hstack(([mu[0]], mu)) 

593 delta = xx[1:] - xx[:-1] 

594 steps, = np.where(delta > 0) 

595 ind = np.zeros_like(mu, dtype=int) 

596 ind[steps] = 1 

597 ind = np.cumsum(ind) # this acts as an image pair index. 

598 # now fill the 3-d cov array(and variance) 

599 muVals = np.array(np.unique(mu)) 

600 i = cutTuple['i'].astype(int) 

601 j = cutTuple['j'].astype(int) 

602 c = 0.5*cutTuple['cov'] 

603 n = cutTuple['npix'] 

604 v = 0.5*cutTuple['var'] 

605 # book and fill 

606 cov = np.ndarray((len(muVals), np.max(i)+1, np.max(j)+1)) 

607 var = np.zeros_like(cov) 

608 cov[ind, i, j] = c 

609 var[ind, i, j] = v**2/n 

610 var[:, 0, 0] *= 2 # var(v) = 2*v**2/N 

611 

612 return cov, var, muVals 

613 

614 def measureMeanVarCov(self, im1Area, im2Area, imStatsCtrl, mu1, mu2): 

615 """Calculate the mean of each of two exposures and the variance 

616 and covariance of their difference. The variance is calculated 

617 via afwMath, and the covariance via the methods in Astier+19 

618 (appendix A). In theory, var = covariance[0,0]. This should 

619 be validated, and in the future, we may decide to just keep 

620 one (covariance). 

621 

622 Parameters 

623 ---------- 

624 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

625 Masked image from exposure 1. 

626 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

627 Masked image from exposure 2. 

628 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

629 Statistics control object. 

630 mu1: `float` 

631 Clipped mean of im1Area (ADU). 

632 mu2: `float` 

633 Clipped mean of im2Area (ADU). 

634 

635 Returns 

636 ------- 

637 mu : `float` or `NaN` 

638 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means 

639 of the regions in both exposures. If either mu1 or m2 are 

640 NaN's, the returned value is NaN. 

641 varDiff : `float` or `NaN` 

642 Half of the clipped variance of the difference of the 

643 regions inthe two input exposures. If either mu1 or m2 are 

644 NaN's, the returned value is NaN. 

645 covDiffAstier : `list` or `NaN` 

646 List with tuples of the form (dx, dy, var, cov, npix), where: 

647 dx : `int` 

648 Lag in x 

649 dy : `int` 

650 Lag in y 

651 var : `float` 

652 Variance at (dx, dy). 

653 cov : `float` 

654 Covariance at (dx, dy). 

655 nPix : `int` 

656 Number of pixel pairs used to evaluate var and cov. 

657 

658 If either mu1 or m2 are NaN's, the returned value is NaN. 

659 """ 

660 if np.isnan(mu1) or np.isnan(mu2): 

661 self.log.warning("Mean of amp in image 1 or 2 is NaN: %f, %f.", mu1, mu2) 

662 return np.nan, np.nan, None 

663 mu = 0.5*(mu1 + mu2) 

664 

665 # Take difference of pairs 

666 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2)) 

667 temp = im2Area.clone() 

668 temp *= mu1 

669 diffIm = im1Area.clone() 

670 diffIm *= mu2 

671 diffIm -= temp 

672 diffIm /= mu 

673 

674 if self.config.binSize > 1: 

675 diffIm = afwMath.binImage(diffIm, self.config.binSize) 

676 

677 # Variance calculation via afwMath 

678 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue()) 

679 

680 # Covariances calculations 

681 # Get the pixels that were not clipped 

682 varClip = afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue() 

683 meanClip = afwMath.makeStatistics(diffIm, afwMath.MEANCLIP, imStatsCtrl).getValue() 

684 cut = meanClip + self.config.nSigmaClipPtc*np.sqrt(varClip) 

685 unmasked = np.where(np.fabs(diffIm.image.array) <= cut, 1, 0) 

686 

687 # Get the pixels in the mask planes of the difference image 

688 # that were ignored by the clipping algorithm 

689 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0) 

690 # Combine the two sets of pixels ('1': use; '0': don't use) 

691 # into a final weight matrix to be used in the covariance 

692 # calculations below. 

693 w = unmasked*wDiff 

694 

695 if np.sum(w) < self.config.minNumberGoodPixelsForCovariance/(self.config.binSize**2): 

696 self.log.warning("Number of good points for covariance calculation (%s) is less " 

697 "(than threshold %s)", np.sum(w), 

698 self.config.minNumberGoodPixelsForCovariance/(self.config.binSize**2)) 

699 return np.nan, np.nan, None 

700 

701 maxRangeCov = self.config.maximumRangeCovariancesAstier 

702 

703 # Calculate covariances via FFT. 

704 shapeDiff = np.array(diffIm.image.array.shape) 

705 # Calculate the sizes of FFT dimensions. 

706 s = shapeDiff + maxRangeCov 

707 tempSize = np.array(np.log(s)/np.log(2.)).astype(int) 

708 fftSize = np.array(2**(tempSize+1)).astype(int) 

709 fftShape = (fftSize[0], fftSize[1]) 

710 c = CovFastFourierTransform(diffIm.image.array, w, fftShape, maxRangeCov) 

711 # np.sum(w) is the same as npix[0][0] returned in covDiffAstier 

712 covDiffAstier = c.reportCovFastFourierTransform(maxRangeCov) 

713 

714 # Compare Cov[0,0] and afwMath.VARIANCECLIP covDiffAstier[0] 

715 # is the Cov[0,0] element, [3] is the variance, and there's a 

716 # factor of 0.5 difference with afwMath.VARIANCECLIP. 

717 thresholdPercentage = self.config.thresholdDiffAfwVarVsCov00 

718 fractionalDiff = 100*np.fabs(1 - varDiff/(covDiffAstier[0][3]*0.5)) 

719 if fractionalDiff >= thresholdPercentage: 

720 self.log.warning("Absolute fractional difference between afwMatch.VARIANCECLIP and Cov[0,0] " 

721 "is more than %f%%: %f", thresholdPercentage, fractionalDiff) 

722 

723 return mu, varDiff, covDiffAstier 

724 

725 def getImageAreasMasksStats(self, exposure1, exposure2, region=None): 

726 """Get image areas in a region as well as masks and statistic objects. 

727 

728 Parameters 

729 ---------- 

730 exposure1 : `lsst.afw.image.exposure.ExposureF` 

731 First exposure of flat field pair. 

732 exposure2 : `lsst.afw.image.exposure.ExposureF` 

733 Second exposure of flat field pair. 

734 region : `lsst.geom.Box2I`, optional 

735 Region of each exposure where to perform the calculations 

736 (e.g, an amplifier). 

737 

738 Returns 

739 ------- 

740 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

741 Masked image from exposure 1. 

742 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

743 Masked image from exposure 2. 

744 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

745 Statistics control object. 

746 mu1: `float` 

747 Clipped mean of im1Area (ADU). 

748 mu2: `float` 

749 Clipped mean of im2Area (ADU). 

750 """ 

751 if region is not None: 

752 im1Area = exposure1.maskedImage[region] 

753 im2Area = exposure2.maskedImage[region] 

754 else: 

755 im1Area = exposure1.maskedImage 

756 im2Area = exposure2.maskedImage 

757 

758 # Get mask planes and construct statistics control object from one 

759 # of the exposures 

760 imMaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList) 

761 imStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc, 

762 self.config.nIterSigmaClipPtc, 

763 imMaskVal) 

764 imStatsCtrl.setNanSafe(True) 

765 imStatsCtrl.setAndMask(imMaskVal) 

766 

767 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, imStatsCtrl).getValue() 

768 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, imStatsCtrl).getValue() 

769 

770 return (im1Area, im2Area, imStatsCtrl, mu1, mu2) 

771 

772 def getGainFromFlatPair(self, im1Area, im2Area, imStatsCtrl, mu1, mu2, 

773 correctionType='NONE', readNoise=None): 

774 """Estimate the gain from a single pair of flats. 

775 

776 The basic premise is 1/g = <(I1 - I2)^2/(I1 + I2)> = 1/const, 

777 where I1 and I2 correspond to flats 1 and 2, respectively. 

778 Corrections for the variable QE and the read-noise are then 

779 made following the derivation in Robert Lupton's forthcoming 

780 book, which gets 

781 

782 1/g = <(I1 - I2)^2/(I1 + I2)> - 1/mu(sigma^2 - 1/2g^2). 

783 

784 This is a quadratic equation, whose solutions are given by: 

785 

786 g = mu +/- sqrt(2*sigma^2 - 2*const*mu + mu^2)/(2*const*mu*2 

787 - 2*sigma^2) 

788 

789 where 'mu' is the average signal level and 'sigma' is the 

790 amplifier's readnoise. The positive solution will be used. 

791 The way the correction is applied depends on the value 

792 supplied for correctionType. 

793 

794 correctionType is one of ['NONE', 'SIMPLE' or 'FULL'] 

795 'NONE' : uses the 1/g = <(I1 - I2)^2/(I1 + I2)> formula. 

796 'SIMPLE' : uses the gain from the 'NONE' method for the 

797 1/2g^2 term. 

798 'FULL' : solves the full equation for g, discarding the 

799 non-physical solution to the resulting quadratic. 

800 

801 Parameters 

802 ---------- 

803 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

804 Masked image from exposure 1. 

805 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

806 Masked image from exposure 2. 

807 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

808 Statistics control object. 

809 mu1: `float` 

810 Clipped mean of im1Area (ADU). 

811 mu2: `float` 

812 Clipped mean of im2Area (ADU). 

813 correctionType : `str`, optional 

814 The correction applied, one of ['NONE', 'SIMPLE', 'FULL'] 

815 readNoise : `float`, optional 

816 Amplifier readout noise (ADU). 

817 

818 Returns 

819 ------- 

820 gain : `float` 

821 Gain, in e/ADU. 

822 

823 Raises 

824 ------ 

825 RuntimeError 

826 Raise if `correctionType` is not one of 'NONE', 

827 'SIMPLE', or 'FULL'. 

828 """ 

829 if correctionType not in ['NONE', 'SIMPLE', 'FULL']: 

830 raise RuntimeError("Unknown correction type: %s" % correctionType) 

831 

832 if correctionType != 'NONE' and not np.isfinite(readNoise): 

833 self.log.warning("'correctionType' in 'getGainFromFlatPair' is %s, " 

834 "but 'readNoise' is NaN. Setting 'correctionType' " 

835 "to 'NONE', so a gain value will be estimated without " 

836 "corrections." % correctionType) 

837 correctionType = 'NONE' 

838 

839 mu = 0.5*(mu1 + mu2) 

840 

841 # ratioIm = (I1 - I2)^2 / (I1 + I2) 

842 temp = im2Area.clone() 

843 ratioIm = im1Area.clone() 

844 ratioIm -= temp 

845 ratioIm *= ratioIm 

846 

847 # Sum of pairs 

848 sumIm = im1Area.clone() 

849 sumIm += temp 

850 

851 ratioIm /= sumIm 

852 

853 const = afwMath.makeStatistics(ratioIm, afwMath.MEAN, imStatsCtrl).getValue() 

854 gain = 1. / const 

855 

856 if correctionType == 'SIMPLE': 

857 gain = 1/(const - (1/mu)*(readNoise**2 - (1/2*gain**2))) 

858 elif correctionType == 'FULL': 

859 root = np.sqrt(mu**2 - 2*mu*const + 2*readNoise**2) 

860 denom = (2*const*mu - 2*readNoise**2) 

861 positiveSolution = (root + mu)/denom 

862 gain = positiveSolution 

863 

864 return gain 

865 

866 def getReadNoise(self, exposureMetadata, taskMetadata, ampName): 

867 """Gets readout noise for an amp from ISR metadata. 

868 

869 If possible, this attempts to get the now-standard headers 

870 added to the exposure itself. If not found there, the ISR 

871 TaskMetadata is searched. If neither of these has the value, 

872 warn and set the read noise to NaN. 

873 

874 Parameters 

875 ---------- 

876 exposureMetadata : `lsst.daf.base.PropertySet` 

877 Metadata to check for read noise first. 

878 taskMetadata : `lsst.pipe.base.TaskMetadata` 

879 List of exposures metadata from ISR for this exposure. 

880 ampName : `str` 

881 Amplifier name. 

882 

883 Returns 

884 ------- 

885 readNoise : `float` 

886 The read noise for this set of exposure/amplifier. 

887 """ 

888 # Try from the exposure first. 

889 expectedKey = f"LSST ISR OVERSCAN RESIDUAL SERIAL STDEV {ampName}" 

890 if expectedKey in exposureMetadata: 

891 return exposureMetadata[expectedKey] 

892 

893 # If not, try getting it from the task metadata. 

894 expectedKey = f"RESIDUAL STDEV {ampName}" 

895 if "isr" in taskMetadata: 

896 if expectedKey in taskMetadata["isr"]: 

897 return taskMetadata["isr"][expectedKey] 

898 

899 self.log.warning("Median readout noise from ISR metadata for amp %s " 

900 "could not be calculated." % ampName) 

901 return np.nan