Coverage for python/lsst/cp/pipe/ptc/cpExtractPtcTask.py: 11%

349 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-18 13:23 +0000

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22import numpy as np 

23from lmfit.models import GaussianModel 

24import scipy.stats 

25import warnings 

26 

27import lsst.afw.math as afwMath 

28import lsst.pex.config as pexConfig 

29import lsst.pipe.base as pipeBase 

30from lsst.cp.pipe.utils import (arrangeFlatsByExpTime, arrangeFlatsByExpId, 

31 arrangeFlatsByExpFlux, sigmaClipCorrection, 

32 CovFastFourierTransform) 

33 

34import lsst.pipe.base.connectionTypes as cT 

35 

36from lsst.ip.isr import PhotonTransferCurveDataset 

37from lsst.ip.isr import IsrTask 

38 

39__all__ = ['PhotonTransferCurveExtractConfig', 'PhotonTransferCurveExtractTask'] 

40 

41 

42class PhotonTransferCurveExtractConnections(pipeBase.PipelineTaskConnections, 

43 dimensions=("instrument", "detector")): 

44 

45 inputExp = cT.Input( 

46 name="ptcInputExposurePairs", 

47 doc="Input post-ISR processed exposure pairs (flats) to" 

48 "measure covariances from.", 

49 storageClass="Exposure", 

50 dimensions=("instrument", "exposure", "detector"), 

51 multiple=True, 

52 deferLoad=True, 

53 ) 

54 inputPhotodiodeData = cT.Input( 

55 name="photodiode", 

56 doc="Photodiode readings data.", 

57 storageClass="IsrCalib", 

58 dimensions=("instrument", "exposure"), 

59 multiple=True, 

60 deferLoad=True, 

61 ) 

62 taskMetadata = cT.Input( 

63 name="isr_metadata", 

64 doc="Input task metadata to extract statistics from.", 

65 storageClass="TaskMetadata", 

66 dimensions=("instrument", "exposure", "detector"), 

67 multiple=True, 

68 ) 

69 outputCovariances = cT.Output( 

70 name="ptcCovariances", 

71 doc="Extracted flat (co)variances.", 

72 storageClass="PhotonTransferCurveDataset", 

73 dimensions=("instrument", "exposure", "detector"), 

74 isCalibration=True, 

75 multiple=True, 

76 ) 

77 

78 def __init__(self, *, config=None): 

79 if not config.doExtractPhotodiodeData: 

80 del self.inputPhotodiodeData 

81 

82 

83class PhotonTransferCurveExtractConfig(pipeBase.PipelineTaskConfig, 

84 pipelineConnections=PhotonTransferCurveExtractConnections): 

85 """Configuration for the measurement of covariances from flats. 

86 """ 

87 matchExposuresType = pexConfig.ChoiceField( 

88 dtype=str, 

89 doc="Match input exposures by time, flux, or expId", 

90 default='TIME', 

91 allowed={ 

92 "TIME": "Match exposures by exposure time.", 

93 "FLUX": "Match exposures by target flux. Use header keyword" 

94 " in matchExposuresByFluxKeyword to find the flux.", 

95 "EXPID": "Match exposures by exposure ID." 

96 } 

97 ) 

98 matchExposuresByFluxKeyword = pexConfig.Field( 

99 dtype=str, 

100 doc="Header keyword for flux if matchExposuresType is FLUX.", 

101 default='CCOBFLUX', 

102 ) 

103 maximumRangeCovariancesAstier = pexConfig.Field( 

104 dtype=int, 

105 doc="Maximum range of covariances as in Astier+19", 

106 default=8, 

107 ) 

108 binSize = pexConfig.Field( 

109 dtype=int, 

110 doc="Bin the image by this factor in both dimensions.", 

111 default=1, 

112 ) 

113 minMeanSignal = pexConfig.DictField( 

114 keytype=str, 

115 itemtype=float, 

116 doc="Minimum values (inclusive) of mean signal (in ADU) per amp to use." 

117 " The same cut is applied to all amps if this parameter [`dict`] is passed as " 

118 " {'ALL_AMPS': value}", 

119 default={'ALL_AMPS': 0.0}, 

120 deprecated="This config has been moved to cpSolvePtcTask, and will be removed after v26.", 

121 ) 

122 maxMeanSignal = pexConfig.DictField( 

123 keytype=str, 

124 itemtype=float, 

125 doc="Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp." 

126 " The same cut is applied to all amps if this dictionary is of the form" 

127 " {'ALL_AMPS': value}", 

128 default={'ALL_AMPS': 1e6}, 

129 deprecated="This config has been moved to cpSolvePtcTask, and will be removed after v26.", 

130 ) 

131 maskNameList = pexConfig.ListField( 

132 dtype=str, 

133 doc="Mask list to exclude from statistics calculations.", 

134 default=['SUSPECT', 'BAD', 'NO_DATA', 'SAT'], 

135 ) 

136 nSigmaClipPtc = pexConfig.Field( 

137 dtype=float, 

138 doc="Sigma cut for afwMath.StatisticsControl()", 

139 default=5.5, 

140 ) 

141 nIterSigmaClipPtc = pexConfig.Field( 

142 dtype=int, 

143 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()", 

144 default=3, 

145 ) 

146 minNumberGoodPixelsForCovariance = pexConfig.Field( 

147 dtype=int, 

148 doc="Minimum number of acceptable good pixels per amp to calculate the covariances (via FFT or" 

149 " direclty).", 

150 default=10000, 

151 ) 

152 thresholdDiffAfwVarVsCov00 = pexConfig.Field( 

153 dtype=float, 

154 doc="If the absolute fractional differece between afwMath.VARIANCECLIP and Cov00 " 

155 "for a region of a difference image is greater than this threshold (percentage), " 

156 "a warning will be issued.", 

157 default=1., 

158 ) 

159 detectorMeasurementRegion = pexConfig.ChoiceField( 

160 dtype=str, 

161 doc="Region of each exposure where to perform the calculations (amplifier or full image).", 

162 default='AMP', 

163 allowed={ 

164 "AMP": "Amplifier of the detector.", 

165 "FULL": "Full image." 

166 } 

167 ) 

168 numEdgeSuspect = pexConfig.Field( 

169 dtype=int, 

170 doc="Number of edge pixels to be flagged as untrustworthy.", 

171 default=0, 

172 ) 

173 edgeMaskLevel = pexConfig.ChoiceField( 

174 dtype=str, 

175 doc="Mask edge pixels in which coordinate frame: DETECTOR or AMP?", 

176 default="DETECTOR", 

177 allowed={ 

178 'DETECTOR': 'Mask only the edges of the full detector.', 

179 'AMP': 'Mask edges of each amplifier.', 

180 }, 

181 ) 

182 doGain = pexConfig.Field( 

183 dtype=bool, 

184 doc="Calculate a gain per input flat pair.", 

185 default=True, 

186 ) 

187 gainCorrectionType = pexConfig.ChoiceField( 

188 dtype=str, 

189 doc="Correction type for the gain.", 

190 default='FULL', 

191 allowed={ 

192 'NONE': 'No correction.', 

193 'SIMPLE': 'First order correction.', 

194 'FULL': 'Second order correction.' 

195 } 

196 ) 

197 ksHistNBins = pexConfig.Field( 

198 dtype=int, 

199 doc="Number of bins for the KS test histogram.", 

200 default=100, 

201 ) 

202 ksHistLimitMultiplier = pexConfig.Field( 

203 dtype=float, 

204 doc="Number of sigma (as predicted from the mean value) to compute KS test histogram.", 

205 default=8.0, 

206 ) 

207 ksHistMinDataValues = pexConfig.Field( 

208 dtype=int, 

209 doc="Minimum number of good data values to compute KS test histogram.", 

210 default=100, 

211 ) 

212 auxiliaryHeaderKeys = pexConfig.ListField( 

213 dtype=str, 

214 doc="Auxiliary header keys to store with the PTC dataset.", 

215 default=[], 

216 ) 

217 doExtractPhotodiodeData = pexConfig.Field( 

218 dtype=bool, 

219 doc="Extract photodiode data?", 

220 default=False, 

221 ) 

222 photodiodeIntegrationMethod = pexConfig.ChoiceField( 

223 dtype=str, 

224 doc="Integration method for photodiode monitoring data.", 

225 default="CHARGE_SUM", 

226 allowed={ 

227 "DIRECT_SUM": ("Use numpy's trapz integrator on all photodiode " 

228 "readout entries"), 

229 "TRIMMED_SUM": ("Use numpy's trapz integrator, clipping the " 

230 "leading and trailing entries, which are " 

231 "nominally at zero baseline level."), 

232 "CHARGE_SUM": ("Treat the current values as integrated charge " 

233 "over the sampling interval and simply sum " 

234 "the values, after subtracting a baseline level."), 

235 }, 

236 ) 

237 photodiodeCurrentScale = pexConfig.Field( 

238 dtype=float, 

239 doc="Scale factor to apply to photodiode current values for the " 

240 "``CHARGE_SUM`` integration method.", 

241 default=-1.0, 

242 ) 

243 

244 

245class PhotonTransferCurveExtractTask(pipeBase.PipelineTask): 

246 """Task to measure covariances from flat fields. 

247 

248 This task receives as input a list of flat-field images 

249 (flats), and sorts these flats in pairs taken at the 

250 same time (the task will raise if there is one one flat 

251 at a given exposure time, and it will discard extra flats if 

252 there are more than two per exposure time). This task measures 

253 the mean, variance, and covariances from a region (e.g., 

254 an amplifier) of the difference image of the two flats with 

255 the same exposure time (alternatively, all input images could have 

256 the same exposure time but their flux changed). 

257 

258 The variance is calculated via afwMath, and the covariance 

259 via the methods in Astier+19 (appendix A). In theory, 

260 var = covariance[0,0]. This should be validated, and in the 

261 future, we may decide to just keep one (covariance). 

262 At this moment, if the two values differ by more than the value 

263 of `thresholdDiffAfwVarVsCov00` (default: 1%), a warning will 

264 be issued. 

265 

266 The measured covariances at a given exposure time (along with 

267 other quantities such as the mean) are stored in a PTC dataset 

268 object (`~lsst.ip.isr.PhotonTransferCurveDataset`), which gets 

269 partially filled at this stage (the remainder of the attributes 

270 of the dataset will be filled after running the second task of 

271 the PTC-measurement pipeline, `~PhotonTransferCurveSolveTask`). 

272 

273 The number of partially-filled 

274 `~lsst.ip.isr.PhotonTransferCurveDataset` objects will be less 

275 than the number of input exposures because the task combines 

276 input flats in pairs. However, it is required at this moment 

277 that the number of input dimensions matches 

278 bijectively the number of output dimensions. Therefore, a number 

279 of "dummy" PTC datasets are inserted in the output list. This 

280 output list will then be used as input of the next task in the 

281 PTC-measurement pipeline, `PhotonTransferCurveSolveTask`, 

282 which will assemble the multiple `PhotonTransferCurveDataset` 

283 objects into a single one in order to fit the measured covariances 

284 as a function of flux to one of three models 

285 (see `PhotonTransferCurveSolveTask` for details). 

286 

287 Reference: Astier+19: "The Shape of the Photon Transfer Curve of CCD 

288 sensors", arXiv:1905.08677. 

289 """ 

290 

291 ConfigClass = PhotonTransferCurveExtractConfig 

292 _DefaultName = 'cpPtcExtract' 

293 

294 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

295 """Ensure that the input and output dimensions are passed along. 

296 

297 Parameters 

298 ---------- 

299 butlerQC : `~lsst.daf.butler.QuantumContext` 

300 Butler to operate on. 

301 inputRefs : `~lsst.pipe.base.InputQuantizedConnection` 

302 Input data refs to load. 

303 ouptutRefs : `~lsst.pipe.base.OutputQuantizedConnection` 

304 Output data refs to persist. 

305 """ 

306 inputs = butlerQC.get(inputRefs) 

307 # Ids of input list of exposure references 

308 # (deferLoad=True in the input connections) 

309 inputs['inputDims'] = [expRef.datasetRef.dataId['exposure'] for expRef in inputRefs.inputExp] 

310 

311 # Dictionary, keyed by expTime (or expFlux or expId), with tuples 

312 # containing flat exposures and their IDs. 

313 matchType = self.config.matchExposuresType 

314 if matchType == 'TIME': 

315 inputs['inputExp'] = arrangeFlatsByExpTime(inputs['inputExp'], inputs['inputDims'], log=self.log) 

316 elif matchType == 'FLUX': 

317 inputs['inputExp'] = arrangeFlatsByExpFlux( 

318 inputs['inputExp'], 

319 inputs['inputDims'], 

320 self.config.matchExposuresByFluxKeyword, 

321 log=self.log, 

322 ) 

323 else: 

324 inputs['inputExp'] = arrangeFlatsByExpId(inputs['inputExp'], inputs['inputDims']) 

325 

326 outputs = self.run(**inputs) 

327 outputs = self._guaranteeOutputs(inputs['inputDims'], outputs, outputRefs) 

328 butlerQC.put(outputs, outputRefs) 

329 

330 def _guaranteeOutputs(self, inputDims, outputs, outputRefs): 

331 """Ensure that all outputRefs have a matching output, and if they do 

332 not, fill the output with dummy PTC datasets. 

333 

334 Parameters 

335 ---------- 

336 inputDims : `dict` [`str`, `int`] 

337 Input exposure dimensions. 

338 outputs : `lsst.pipe.base.Struct` 

339 Outputs from the ``run`` method. Contains the entry: 

340 

341 ``outputCovariances`` 

342 Output PTC datasets (`list` [`lsst.ip.isr.IsrCalib`]) 

343 outputRefs : `~lsst.pipe.base.OutputQuantizedConnection` 

344 Container with all of the outputs expected to be generated. 

345 

346 Returns 

347 ------- 

348 outputs : `lsst.pipe.base.Struct` 

349 Dummy dataset padded version of the input ``outputs`` with 

350 the same entries. 

351 """ 

352 newCovariances = [] 

353 for ref in outputRefs.outputCovariances: 

354 outputExpId = ref.dataId['exposure'] 

355 if outputExpId in inputDims: 

356 entry = inputDims.index(outputExpId) 

357 newCovariances.append(outputs.outputCovariances[entry]) 

358 else: 

359 newPtc = PhotonTransferCurveDataset(['no amp'], 'DUMMY', covMatrixSide=1) 

360 newPtc.setAmpValuesPartialDataset('no amp') 

361 newCovariances.append(newPtc) 

362 return pipeBase.Struct(outputCovariances=newCovariances) 

363 

364 def run(self, inputExp, inputDims, taskMetadata, inputPhotodiodeData=None): 

365 

366 """Measure covariances from difference of flat pairs 

367 

368 Parameters 

369 ---------- 

370 inputExp : `dict` [`float`, `list` 

371 [`~lsst.pipe.base.connections.DeferredDatasetRef`]] 

372 Dictionary that groups references to flat-field exposures that 

373 have the same exposure time (seconds), or that groups them 

374 sequentially by their exposure id. 

375 inputDims : `list` 

376 List of exposure IDs. 

377 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`] 

378 List of exposures metadata from ISR. 

379 inputPhotodiodeData : `dict` [`str`, `lsst.ip.isr.PhotodiodeCalib`] 

380 Photodiode readings data (optional). 

381 

382 Returns 

383 ------- 

384 results : `lsst.pipe.base.Struct` 

385 The resulting Struct contains: 

386 

387 ``outputCovariances`` 

388 A list containing the per-pair PTC measurements (`list` 

389 [`lsst.ip.isr.PhotonTransferCurveDataset`]) 

390 """ 

391 # inputExp.values() returns a view, which we turn into a list. We then 

392 # access the first exposure-ID tuple to get the detector. 

393 # The first "get()" retrieves the exposure from the exposure reference. 

394 detector = list(inputExp.values())[0][0][0].get(component='detector') 

395 detNum = detector.getId() 

396 amps = detector.getAmplifiers() 

397 ampNames = [amp.getName() for amp in amps] 

398 

399 # Each amp may have a different min and max ADU signal 

400 # specified in the config. 

401 maxMeanSignalDict = {ampName: 1e6 for ampName in ampNames} 

402 minMeanSignalDict = {ampName: 0.0 for ampName in ampNames} 

403 for ampName in ampNames: 

404 if 'ALL_AMPS' in self.config.maxMeanSignal: 

405 maxMeanSignalDict[ampName] = self.config.maxMeanSignal['ALL_AMPS'] 

406 elif ampName in self.config.maxMeanSignal: 

407 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[ampName] 

408 

409 if 'ALL_AMPS' in self.config.minMeanSignal: 

410 minMeanSignalDict[ampName] = self.config.minMeanSignal['ALL_AMPS'] 

411 elif ampName in self.config.minMeanSignal: 

412 minMeanSignalDict[ampName] = self.config.minMeanSignal[ampName] 

413 # These are the column names for `tupleRows` below. 

414 tags = [('mu', '<f8'), ('afwVar', '<f8'), ('i', '<i8'), ('j', '<i8'), ('var', '<f8'), 

415 ('cov', '<f8'), ('npix', '<i8'), ('ext', '<i8'), ('expTime', '<f8'), ('ampName', '<U3')] 

416 # Create a dummy ptcDataset. Dummy datasets will be 

417 # used to ensure that the number of output and input 

418 # dimensions match. 

419 dummyPtcDataset = PhotonTransferCurveDataset( 

420 ampNames, 'DUMMY', 

421 covMatrixSide=self.config.maximumRangeCovariancesAstier) 

422 for ampName in ampNames: 

423 dummyPtcDataset.setAmpValuesPartialDataset(ampName) 

424 

425 # Extract the photodiode data if requested. 

426 if self.config.doExtractPhotodiodeData: 

427 # Compute the photodiode integrals once, at the start. 

428 monitorDiodeCharge = {} 

429 for handle in inputPhotodiodeData: 

430 expId = handle.dataId['exposure'] 

431 pdCalib = handle.get() 

432 pdCalib.integrationMethod = self.config.photodiodeIntegrationMethod 

433 pdCalib.currentScale = self.config.photodiodeCurrentScale 

434 monitorDiodeCharge[expId] = pdCalib.integrate() 

435 

436 # Get read noise. Try from the exposure, then try 

437 # taskMetadata. This adds a get() for the exposures. 

438 readNoiseLists = {} 

439 for pairIndex, expRefs in inputExp.items(): 

440 # This yields an index (exposure_time, seq_num, or flux) 

441 # and a pair of references at that index. 

442 for expRef, expId in expRefs: 

443 # This yields an exposure ref and an exposureId. 

444 exposureMetadata = expRef.get(component="metadata") 

445 metadataIndex = inputDims.index(expId) 

446 thisTaskMetadata = taskMetadata[metadataIndex] 

447 

448 for ampName in ampNames: 

449 if ampName not in readNoiseLists: 

450 readNoiseLists[ampName] = [self.getReadNoise(exposureMetadata, 

451 thisTaskMetadata, ampName)] 

452 else: 

453 readNoiseLists[ampName].append(self.getReadNoise(exposureMetadata, 

454 thisTaskMetadata, ampName)) 

455 

456 readNoiseDict = {ampName: 0.0 for ampName in ampNames} 

457 for ampName in ampNames: 

458 # Take median read noise value 

459 readNoiseDict[ampName] = np.nanmedian(readNoiseLists[ampName]) 

460 

461 # Output list with PTC datasets. 

462 partialPtcDatasetList = [] 

463 # The number of output references needs to match that of input 

464 # references: initialize outputlist with dummy PTC datasets. 

465 for i in range(len(inputDims)): 

466 partialPtcDatasetList.append(dummyPtcDataset) 

467 

468 if self.config.numEdgeSuspect > 0: 

469 isrTask = IsrTask() 

470 self.log.info("Masking %d pixels from the edges of all %ss as SUSPECT.", 

471 self.config.numEdgeSuspect, self.config.edgeMaskLevel) 

472 

473 # Depending on the value of config.matchExposuresType 

474 # 'expTime' can stand for exposure time, flux, or ID. 

475 for expTime in inputExp: 

476 exposures = inputExp[expTime] 

477 if not np.isfinite(expTime): 

478 self.log.warning("Illegal/missing %s found (%s). Dropping exposure %d", 

479 self.config.matchExposuresType, expTime, exposures[0][1]) 

480 continue 

481 elif len(exposures) == 1: 

482 self.log.warning("Only one exposure found at %s %f. Dropping exposure %d.", 

483 self.config.matchExposuresType, expTime, exposures[0][1]) 

484 continue 

485 else: 

486 # Only use the first two exposures at expTime. Each 

487 # element is a tuple (exposure, expId) 

488 expRef1, expId1 = exposures[0] 

489 expRef2, expId2 = exposures[1] 

490 # use get() to obtain `lsst.afw.image.Exposure` 

491 exp1, exp2 = expRef1.get(), expRef2.get() 

492 

493 if len(exposures) > 2: 

494 self.log.warning("Already found 2 exposures at %s %f. Ignoring exposures: %s", 

495 self.config.matchExposuresType, expTime, 

496 ", ".join(str(i[1]) for i in exposures[2:])) 

497 # Mask pixels at the edge of the detector or of each amp 

498 if self.config.numEdgeSuspect > 0: 

499 isrTask.maskEdges(exp1, numEdgePixels=self.config.numEdgeSuspect, 

500 maskPlane="SUSPECT", level=self.config.edgeMaskLevel) 

501 isrTask.maskEdges(exp2, numEdgePixels=self.config.numEdgeSuspect, 

502 maskPlane="SUSPECT", level=self.config.edgeMaskLevel) 

503 

504 # Extract any metadata keys from the headers. 

505 auxDict = {} 

506 metadata = exp1.getMetadata() 

507 for key in self.config.auxiliaryHeaderKeys: 

508 if key not in metadata: 

509 self.log.warning( 

510 "Requested auxiliary keyword %s not found in exposure metadata for %d", 

511 key, 

512 expId1, 

513 ) 

514 value = np.nan 

515 else: 

516 value = metadata[key] 

517 

518 auxDict[key] = value 

519 

520 nAmpsNan = 0 

521 partialPtcDataset = PhotonTransferCurveDataset( 

522 ampNames, 'PARTIAL', 

523 covMatrixSide=self.config.maximumRangeCovariancesAstier) 

524 for ampNumber, amp in enumerate(detector): 

525 ampName = amp.getName() 

526 if self.config.detectorMeasurementRegion == 'AMP': 

527 region = amp.getBBox() 

528 elif self.config.detectorMeasurementRegion == 'FULL': 

529 region = None 

530 

531 # Get masked image regions, masking planes, statistic control 

532 # objects, and clipped means. Calculate once to reuse in 

533 # `measureMeanVarCov` and `getGainFromFlatPair`. 

534 im1Area, im2Area, imStatsCtrl, mu1, mu2 = self.getImageAreasMasksStats(exp1, exp2, 

535 region=region) 

536 

537 # We demand that both mu1 and mu2 be finite and greater than 0. 

538 if not np.isfinite(mu1) or not np.isfinite(mu2) \ 

539 or ((np.nan_to_num(mu1) + np.nan_to_num(mu2)/2.) <= 0.0): 

540 self.log.warning( 

541 "Illegal mean value(s) detected for amp %s on exposure pair %d/%d", 

542 ampName, 

543 expId1, 

544 expId2, 

545 ) 

546 partialPtcDataset.setAmpValuesPartialDataset( 

547 ampName, 

548 inputExpIdPair=(expId1, expId2), 

549 rawExpTime=expTime, 

550 expIdMask=False, 

551 ) 

552 continue 

553 

554 # `measureMeanVarCov` is the function that measures 

555 # the variance and covariances from a region of 

556 # the difference image of two flats at the same 

557 # exposure time. The variable `covAstier` that is 

558 # returned is of the form: 

559 # [(i, j, var (cov[0,0]), cov, npix) for (i,j) in 

560 # {maxLag, maxLag}^2]. 

561 muDiff, varDiff, covAstier = self.measureMeanVarCov(im1Area, im2Area, imStatsCtrl, mu1, mu2) 

562 # Estimate the gain from the flat pair 

563 if self.config.doGain: 

564 gain = self.getGainFromFlatPair(im1Area, im2Area, imStatsCtrl, mu1, mu2, 

565 correctionType=self.config.gainCorrectionType, 

566 readNoise=readNoiseDict[ampName]) 

567 else: 

568 gain = np.nan 

569 

570 # Correction factor for bias introduced by sigma 

571 # clipping. 

572 # Function returns 1/sqrt(varFactor), so it needs 

573 # to be squared. varDiff is calculated via 

574 # afwMath.VARIANCECLIP. 

575 varFactor = sigmaClipCorrection(self.config.nSigmaClipPtc)**2 

576 varDiff *= varFactor 

577 

578 expIdMask = True 

579 # Mask data point at this mean signal level if 

580 # the signal, variance, or covariance calculations 

581 # from `measureMeanVarCov` resulted in NaNs. 

582 if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None): 

583 self.log.warning("NaN mean or var, or None cov in amp %s in exposure pair %d, %d of " 

584 "detector %d.", ampName, expId1, expId2, detNum) 

585 nAmpsNan += 1 

586 expIdMask = False 

587 covArray = np.full((1, self.config.maximumRangeCovariancesAstier, 

588 self.config.maximumRangeCovariancesAstier), np.nan) 

589 covSqrtWeights = np.full_like(covArray, np.nan) 

590 

591 # Mask data point if it is outside of the 

592 # specified mean signal range. 

593 if (muDiff <= minMeanSignalDict[ampName]) or (muDiff >= maxMeanSignalDict[ampName]): 

594 expIdMask = False 

595 

596 if covAstier is not None: 

597 # Turn the tuples with the measured information 

598 # into covariance arrays. 

599 # covrow: (i, j, var (cov[0,0]), cov, npix) 

600 tupleRows = [(muDiff, varDiff) + covRow + (ampNumber, expTime, 

601 ampName) for covRow in covAstier] 

602 tempStructArray = np.array(tupleRows, dtype=tags) 

603 

604 covArray, vcov, _ = self.makeCovArray(tempStructArray, 

605 self.config.maximumRangeCovariancesAstier) 

606 

607 # The returned covArray should only have 1 entry; 

608 # raise if this is not the case. 

609 if covArray.shape[0] != 1: 

610 raise RuntimeError("Serious programming error in covArray shape.") 

611 

612 covSqrtWeights = np.nan_to_num(1./np.sqrt(vcov)) 

613 

614 # Correct covArray for sigma clipping: 

615 # 1) Apply varFactor twice for the whole covariance matrix 

616 covArray *= varFactor**2 

617 # 2) But, only once for the variance element of the 

618 # matrix, covArray[0, 0, 0] (so divide one factor out). 

619 # (the first 0 is because this is a 3D array for insertion into 

620 # the combined dataset). 

621 covArray[0, 0, 0] /= varFactor 

622 

623 if expIdMask: 

624 # Run the Gaussian histogram only if this is a legal 

625 # amplifier. 

626 histVar, histChi2Dof, kspValue = self.computeGaussianHistogramParameters( 

627 im1Area, 

628 im2Area, 

629 imStatsCtrl, 

630 mu1, 

631 mu2, 

632 ) 

633 else: 

634 histVar = np.nan 

635 histChi2Dof = np.nan 

636 kspValue = 0.0 

637 

638 if self.config.doExtractPhotodiodeData: 

639 nExps = 0 

640 photoCharge = 0.0 

641 for expId in [expId1, expId2]: 

642 if expId in monitorDiodeCharge: 

643 photoCharge += monitorDiodeCharge[expId] 

644 nExps += 1 

645 if nExps > 0: 

646 photoCharge /= nExps 

647 else: 

648 photoCharge = np.nan 

649 else: 

650 photoCharge = np.nan 

651 

652 partialPtcDataset.setAmpValuesPartialDataset( 

653 ampName, 

654 inputExpIdPair=(expId1, expId2), 

655 rawExpTime=expTime, 

656 rawMean=muDiff, 

657 rawVar=varDiff, 

658 photoCharge=photoCharge, 

659 expIdMask=expIdMask, 

660 covariance=covArray[0, :, :], 

661 covSqrtWeights=covSqrtWeights[0, :, :], 

662 gain=gain, 

663 noise=readNoiseDict[ampName], 

664 histVar=histVar, 

665 histChi2Dof=histChi2Dof, 

666 kspValue=kspValue, 

667 ) 

668 

669 partialPtcDataset.setAuxValuesPartialDataset(auxDict) 

670 

671 # Use location of exp1 to save PTC dataset from (exp1, exp2) pair. 

672 # Below, np.where(expId1 == np.array(inputDims)) returns a tuple 

673 # with a single-element array, so [0][0] 

674 # is necessary to extract the required index. 

675 datasetIndex = np.where(expId1 == np.array(inputDims))[0][0] 

676 # `partialPtcDatasetList` is a list of 

677 # `PhotonTransferCurveDataset` objects. Some of them 

678 # will be dummy datasets (to match length of input 

679 # and output references), and the rest will have 

680 # datasets with the mean signal, variance, and 

681 # covariance measurements at a given exposure 

682 # time. The next ppart of the PTC-measurement 

683 # pipeline, `solve`, will take this list as input, 

684 # and assemble the measurements in the datasets 

685 # in an addecuate manner for fitting a PTC 

686 # model. 

687 partialPtcDataset.updateMetadataFromExposures([exp1, exp2]) 

688 partialPtcDataset.updateMetadata(setDate=True, detector=detector) 

689 partialPtcDatasetList[datasetIndex] = partialPtcDataset 

690 

691 if nAmpsNan == len(ampNames): 

692 msg = f"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}." 

693 self.log.warning(msg) 

694 

695 return pipeBase.Struct( 

696 outputCovariances=partialPtcDatasetList, 

697 ) 

698 

699 def makeCovArray(self, inputTuple, maxRangeFromTuple): 

700 """Make covariances array from tuple. 

701 

702 Parameters 

703 ---------- 

704 inputTuple : `numpy.ndarray` 

705 Structured array with rows with at least 

706 (mu, afwVar, cov, var, i, j, npix), where: 

707 mu : `float` 

708 0.5*(m1 + m2), where mu1 is the mean value of flat1 

709 and mu2 is the mean value of flat2. 

710 afwVar : `float` 

711 Variance of difference flat, calculated with afw. 

712 cov : `float` 

713 Covariance value at lag(i, j) 

714 var : `float` 

715 Variance(covariance value at lag(0, 0)) 

716 i : `int` 

717 Lag in dimension "x". 

718 j : `int` 

719 Lag in dimension "y". 

720 npix : `int` 

721 Number of pixels used for covariance calculation. 

722 maxRangeFromTuple : `int` 

723 Maximum range to select from tuple. 

724 

725 Returns 

726 ------- 

727 cov : `numpy.array` 

728 Covariance arrays, indexed by mean signal mu. 

729 vCov : `numpy.array` 

730 Variance of the [co]variance arrays, indexed by mean signal mu. 

731 muVals : `numpy.array` 

732 List of mean signal values. 

733 """ 

734 if maxRangeFromTuple is not None: 

735 cut = (inputTuple['i'] < maxRangeFromTuple) & (inputTuple['j'] < maxRangeFromTuple) 

736 cutTuple = inputTuple[cut] 

737 else: 

738 cutTuple = inputTuple 

739 # increasing mu order, so that we can group measurements with the 

740 # same mu 

741 muTemp = cutTuple['mu'] 

742 ind = np.argsort(muTemp) 

743 

744 cutTuple = cutTuple[ind] 

745 # should group measurements on the same image pairs(same average) 

746 mu = cutTuple['mu'] 

747 xx = np.hstack(([mu[0]], mu)) 

748 delta = xx[1:] - xx[:-1] 

749 steps, = np.where(delta > 0) 

750 ind = np.zeros_like(mu, dtype=int) 

751 ind[steps] = 1 

752 ind = np.cumsum(ind) # this acts as an image pair index. 

753 # now fill the 3-d cov array(and variance) 

754 muVals = np.array(np.unique(mu)) 

755 i = cutTuple['i'].astype(int) 

756 j = cutTuple['j'].astype(int) 

757 c = 0.5*cutTuple['cov'] 

758 n = cutTuple['npix'] 

759 v = 0.5*cutTuple['var'] 

760 # book and fill 

761 cov = np.ndarray((len(muVals), np.max(i)+1, np.max(j)+1)) 

762 var = np.zeros_like(cov) 

763 cov[ind, i, j] = c 

764 var[ind, i, j] = v**2/n 

765 var[:, 0, 0] *= 2 # var(v) = 2*v**2/N 

766 

767 return cov, var, muVals 

768 

769 def measureMeanVarCov(self, im1Area, im2Area, imStatsCtrl, mu1, mu2): 

770 """Calculate the mean of each of two exposures and the variance 

771 and covariance of their difference. The variance is calculated 

772 via afwMath, and the covariance via the methods in Astier+19 

773 (appendix A). In theory, var = covariance[0,0]. This should 

774 be validated, and in the future, we may decide to just keep 

775 one (covariance). 

776 

777 Parameters 

778 ---------- 

779 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

780 Masked image from exposure 1. 

781 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

782 Masked image from exposure 2. 

783 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

784 Statistics control object. 

785 mu1: `float` 

786 Clipped mean of im1Area (ADU). 

787 mu2: `float` 

788 Clipped mean of im2Area (ADU). 

789 

790 Returns 

791 ------- 

792 mu : `float` or `NaN` 

793 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means 

794 of the regions in both exposures. If either mu1 or m2 are 

795 NaN's, the returned value is NaN. 

796 varDiff : `float` or `NaN` 

797 Half of the clipped variance of the difference of the 

798 regions inthe two input exposures. If either mu1 or m2 are 

799 NaN's, the returned value is NaN. 

800 covDiffAstier : `list` or `NaN` 

801 List with tuples of the form (dx, dy, var, cov, npix), where: 

802 dx : `int` 

803 Lag in x 

804 dy : `int` 

805 Lag in y 

806 var : `float` 

807 Variance at (dx, dy). 

808 cov : `float` 

809 Covariance at (dx, dy). 

810 nPix : `int` 

811 Number of pixel pairs used to evaluate var and cov. 

812 

813 If either mu1 or m2 are NaN's, the returned value is NaN. 

814 """ 

815 if np.isnan(mu1) or np.isnan(mu2): 

816 self.log.warning("Mean of amp in image 1 or 2 is NaN: %f, %f.", mu1, mu2) 

817 return np.nan, np.nan, None 

818 mu = 0.5*(mu1 + mu2) 

819 

820 # Take difference of pairs 

821 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2)) 

822 temp = im2Area.clone() 

823 temp *= mu1 

824 diffIm = im1Area.clone() 

825 diffIm *= mu2 

826 diffIm -= temp 

827 diffIm /= mu 

828 

829 if self.config.binSize > 1: 

830 diffIm = afwMath.binImage(diffIm, self.config.binSize) 

831 

832 # Variance calculation via afwMath 

833 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue()) 

834 

835 # Covariances calculations 

836 # Get the pixels that were not clipped 

837 varClip = afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue() 

838 meanClip = afwMath.makeStatistics(diffIm, afwMath.MEANCLIP, imStatsCtrl).getValue() 

839 cut = meanClip + self.config.nSigmaClipPtc*np.sqrt(varClip) 

840 unmasked = np.where(np.fabs(diffIm.image.array) <= cut, 1, 0) 

841 

842 # Get the pixels in the mask planes of the difference image 

843 # that were ignored by the clipping algorithm 

844 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0) 

845 # Combine the two sets of pixels ('1': use; '0': don't use) 

846 # into a final weight matrix to be used in the covariance 

847 # calculations below. 

848 w = unmasked*wDiff 

849 

850 if np.sum(w) < self.config.minNumberGoodPixelsForCovariance/(self.config.binSize**2): 

851 self.log.warning("Number of good points for covariance calculation (%s) is less " 

852 "(than threshold %s)", np.sum(w), 

853 self.config.minNumberGoodPixelsForCovariance/(self.config.binSize**2)) 

854 return np.nan, np.nan, None 

855 

856 maxRangeCov = self.config.maximumRangeCovariancesAstier 

857 

858 # Calculate covariances via FFT. 

859 shapeDiff = np.array(diffIm.image.array.shape) 

860 # Calculate the sizes of FFT dimensions. 

861 s = shapeDiff + maxRangeCov 

862 tempSize = np.array(np.log(s)/np.log(2.)).astype(int) 

863 fftSize = np.array(2**(tempSize+1)).astype(int) 

864 fftShape = (fftSize[0], fftSize[1]) 

865 c = CovFastFourierTransform(diffIm.image.array, w, fftShape, maxRangeCov) 

866 # np.sum(w) is the same as npix[0][0] returned in covDiffAstier 

867 try: 

868 covDiffAstier = c.reportCovFastFourierTransform(maxRangeCov) 

869 except ValueError: 

870 # This is raised if there are not enough pixels. 

871 self.log.warning("Not enough pixels covering the requested covariance range in x/y (%d)", 

872 self.config.maximumRangeCovariancesAstier) 

873 return np.nan, np.nan, None 

874 

875 # Compare Cov[0,0] and afwMath.VARIANCECLIP covDiffAstier[0] 

876 # is the Cov[0,0] element, [3] is the variance, and there's a 

877 # factor of 0.5 difference with afwMath.VARIANCECLIP. 

878 thresholdPercentage = self.config.thresholdDiffAfwVarVsCov00 

879 fractionalDiff = 100*np.fabs(1 - varDiff/(covDiffAstier[0][3]*0.5)) 

880 if fractionalDiff >= thresholdPercentage: 

881 self.log.warning("Absolute fractional difference between afwMatch.VARIANCECLIP and Cov[0,0] " 

882 "is more than %f%%: %f", thresholdPercentage, fractionalDiff) 

883 

884 return mu, varDiff, covDiffAstier 

885 

886 def getImageAreasMasksStats(self, exposure1, exposure2, region=None): 

887 """Get image areas in a region as well as masks and statistic objects. 

888 

889 Parameters 

890 ---------- 

891 exposure1 : `lsst.afw.image.ExposureF` 

892 First exposure of flat field pair. 

893 exposure2 : `lsst.afw.image.ExposureF` 

894 Second exposure of flat field pair. 

895 region : `lsst.geom.Box2I`, optional 

896 Region of each exposure where to perform the calculations 

897 (e.g, an amplifier). 

898 

899 Returns 

900 ------- 

901 im1Area : `lsst.afw.image.MaskedImageF` 

902 Masked image from exposure 1. 

903 im2Area : `lsst.afw.image.MaskedImageF` 

904 Masked image from exposure 2. 

905 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

906 Statistics control object. 

907 mu1 : `float` 

908 Clipped mean of im1Area (ADU). 

909 mu2 : `float` 

910 Clipped mean of im2Area (ADU). 

911 """ 

912 if region is not None: 

913 im1Area = exposure1.maskedImage[region] 

914 im2Area = exposure2.maskedImage[region] 

915 else: 

916 im1Area = exposure1.maskedImage 

917 im2Area = exposure2.maskedImage 

918 

919 # Get mask planes and construct statistics control object from one 

920 # of the exposures 

921 imMaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList) 

922 imStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc, 

923 self.config.nIterSigmaClipPtc, 

924 imMaskVal) 

925 imStatsCtrl.setNanSafe(True) 

926 imStatsCtrl.setAndMask(imMaskVal) 

927 

928 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, imStatsCtrl).getValue() 

929 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, imStatsCtrl).getValue() 

930 

931 return (im1Area, im2Area, imStatsCtrl, mu1, mu2) 

932 

933 def getGainFromFlatPair(self, im1Area, im2Area, imStatsCtrl, mu1, mu2, 

934 correctionType='NONE', readNoise=None): 

935 """Estimate the gain from a single pair of flats. 

936 

937 The basic premise is 1/g = <(I1 - I2)^2/(I1 + I2)> = 1/const, 

938 where I1 and I2 correspond to flats 1 and 2, respectively. 

939 Corrections for the variable QE and the read-noise are then 

940 made following the derivation in Robert Lupton's forthcoming 

941 book, which gets 

942 

943 1/g = <(I1 - I2)^2/(I1 + I2)> - 1/mu(sigma^2 - 1/2g^2). 

944 

945 This is a quadratic equation, whose solutions are given by: 

946 

947 g = mu +/- sqrt(2*sigma^2 - 2*const*mu + mu^2)/(2*const*mu*2 

948 - 2*sigma^2) 

949 

950 where 'mu' is the average signal level and 'sigma' is the 

951 amplifier's readnoise. The positive solution will be used. 

952 The way the correction is applied depends on the value 

953 supplied for correctionType. 

954 

955 correctionType is one of ['NONE', 'SIMPLE' or 'FULL'] 

956 'NONE' : uses the 1/g = <(I1 - I2)^2/(I1 + I2)> formula. 

957 'SIMPLE' : uses the gain from the 'NONE' method for the 

958 1/2g^2 term. 

959 'FULL' : solves the full equation for g, discarding the 

960 non-physical solution to the resulting quadratic. 

961 

962 Parameters 

963 ---------- 

964 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

965 Masked image from exposure 1. 

966 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

967 Masked image from exposure 2. 

968 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

969 Statistics control object. 

970 mu1: `float` 

971 Clipped mean of im1Area (ADU). 

972 mu2: `float` 

973 Clipped mean of im2Area (ADU). 

974 correctionType : `str`, optional 

975 The correction applied, one of ['NONE', 'SIMPLE', 'FULL'] 

976 readNoise : `float`, optional 

977 Amplifier readout noise (ADU). 

978 

979 Returns 

980 ------- 

981 gain : `float` 

982 Gain, in e/ADU. 

983 

984 Raises 

985 ------ 

986 RuntimeError 

987 Raise if `correctionType` is not one of 'NONE', 

988 'SIMPLE', or 'FULL'. 

989 """ 

990 if correctionType not in ['NONE', 'SIMPLE', 'FULL']: 

991 raise RuntimeError("Unknown correction type: %s" % correctionType) 

992 

993 if correctionType != 'NONE' and not np.isfinite(readNoise): 

994 self.log.warning("'correctionType' in 'getGainFromFlatPair' is %s, " 

995 "but 'readNoise' is NaN. Setting 'correctionType' " 

996 "to 'NONE', so a gain value will be estimated without " 

997 "corrections." % correctionType) 

998 correctionType = 'NONE' 

999 

1000 mu = 0.5*(mu1 + mu2) 

1001 

1002 # ratioIm = (I1 - I2)^2 / (I1 + I2) 

1003 temp = im2Area.clone() 

1004 ratioIm = im1Area.clone() 

1005 ratioIm -= temp 

1006 ratioIm *= ratioIm 

1007 

1008 # Sum of pairs 

1009 sumIm = im1Area.clone() 

1010 sumIm += temp 

1011 

1012 ratioIm /= sumIm 

1013 

1014 const = afwMath.makeStatistics(ratioIm, afwMath.MEAN, imStatsCtrl).getValue() 

1015 gain = 1. / const 

1016 

1017 if correctionType == 'SIMPLE': 

1018 gain = 1/(const - (1/mu)*(readNoise**2 - (1/2*gain**2))) 

1019 elif correctionType == 'FULL': 

1020 root = np.sqrt(mu**2 - 2*mu*const + 2*readNoise**2) 

1021 denom = (2*const*mu - 2*readNoise**2) 

1022 positiveSolution = (root + mu)/denom 

1023 gain = positiveSolution 

1024 

1025 return gain 

1026 

1027 def getReadNoise(self, exposureMetadata, taskMetadata, ampName): 

1028 """Gets readout noise for an amp from ISR metadata. 

1029 

1030 If possible, this attempts to get the now-standard headers 

1031 added to the exposure itself. If not found there, the ISR 

1032 TaskMetadata is searched. If neither of these has the value, 

1033 warn and set the read noise to NaN. 

1034 

1035 Parameters 

1036 ---------- 

1037 exposureMetadata : `lsst.daf.base.PropertySet` 

1038 Metadata to check for read noise first. 

1039 taskMetadata : `lsst.pipe.base.TaskMetadata` 

1040 List of exposures metadata from ISR for this exposure. 

1041 ampName : `str` 

1042 Amplifier name. 

1043 

1044 Returns 

1045 ------- 

1046 readNoise : `float` 

1047 The read noise for this set of exposure/amplifier. 

1048 """ 

1049 # Try from the exposure first. 

1050 expectedKey = f"LSST ISR OVERSCAN RESIDUAL SERIAL STDEV {ampName}" 

1051 if expectedKey in exposureMetadata: 

1052 return exposureMetadata[expectedKey] 

1053 

1054 # If not, try getting it from the task metadata. 

1055 expectedKey = f"RESIDUAL STDEV {ampName}" 

1056 if "isr" in taskMetadata: 

1057 if expectedKey in taskMetadata["isr"]: 

1058 return taskMetadata["isr"][expectedKey] 

1059 

1060 self.log.warning("Median readout noise from ISR metadata for amp %s " 

1061 "could not be calculated." % ampName) 

1062 return np.nan 

1063 

1064 def computeGaussianHistogramParameters(self, im1Area, im2Area, imStatsCtrl, mu1, mu2): 

1065 """Compute KS test for a Gaussian model fit to a histogram of the 

1066 difference image. 

1067 

1068 Parameters 

1069 ---------- 

1070 im1Area : `lsst.afw.image.MaskedImageF` 

1071 Masked image from exposure 1. 

1072 im2Area : `lsst.afw.image.MaskedImageF` 

1073 Masked image from exposure 2. 

1074 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

1075 Statistics control object. 

1076 mu1 : `float` 

1077 Clipped mean of im1Area (ADU). 

1078 mu2 : `float` 

1079 Clipped mean of im2Area (ADU). 

1080 

1081 Returns 

1082 ------- 

1083 varFit : `float` 

1084 Variance from the Gaussian fit. 

1085 chi2Dof : `float` 

1086 Chi-squared per degree of freedom of Gaussian fit. 

1087 kspValue : `float` 

1088 The KS test p-value for the Gaussian fit. 

1089 

1090 Notes 

1091 ----- 

1092 The algorithm here was originally developed by Aaron Roodman. 

1093 Tests on the full focal plane of LSSTCam during testing has shown 

1094 that a KS test p-value cut of 0.01 is a good discriminant for 

1095 well-behaved flat pairs (p>0.01) and poorly behaved non-Gaussian 

1096 flat pairs (p<0.01). 

1097 """ 

1098 diffExp = im1Area.clone() 

1099 diffExp -= im2Area 

1100 

1101 sel = (((diffExp.mask.array & imStatsCtrl.getAndMask()) == 0) 

1102 & np.isfinite(diffExp.mask.array)) 

1103 diffArr = diffExp.image.array[sel] 

1104 

1105 numOk = len(diffArr) 

1106 

1107 if numOk >= self.config.ksHistMinDataValues and np.isfinite(mu1) and np.isfinite(mu2): 

1108 # Create a histogram symmetric around zero, with a bin size 

1109 # determined from the expected variance given by the average of 

1110 # the input signal levels. 

1111 lim = self.config.ksHistLimitMultiplier * np.sqrt((mu1 + mu2)/2.) 

1112 yVals, binEdges = np.histogram(diffArr, bins=self.config.ksHistNBins, range=[-lim, lim]) 

1113 

1114 # Fit the histogram with a Gaussian model. 

1115 model = GaussianModel() 

1116 yVals = yVals.astype(np.float64) 

1117 xVals = ((binEdges[0: -1] + binEdges[1:])/2.).astype(np.float64) 

1118 errVals = np.sqrt(yVals) 

1119 errVals[(errVals == 0.0)] = 1.0 

1120 pars = model.guess(yVals, x=xVals) 

1121 with warnings.catch_warnings(): 

1122 warnings.simplefilter("ignore") 

1123 # The least-squares fitter sometimes spouts (spurious) warnings 

1124 # when the model is very bad. Swallow these warnings now and 

1125 # let the KS test check the model below. 

1126 out = model.fit( 

1127 yVals, 

1128 pars, 

1129 x=xVals, 

1130 weights=1./errVals, 

1131 calc_covar=True, 

1132 method="least_squares", 

1133 ) 

1134 

1135 # Calculate chi2. 

1136 chiArr = out.residual 

1137 nDof = len(yVals) - 3 

1138 chi2Dof = np.sum(chiArr**2.)/nDof 

1139 sigmaFit = out.params["sigma"].value 

1140 

1141 # Calculate KS test p-value for the fit. 

1142 ksResult = scipy.stats.ks_1samp( 

1143 diffArr, 

1144 scipy.stats.norm.cdf, 

1145 (out.params["center"].value, sigmaFit), 

1146 ) 

1147 

1148 kspValue = ksResult.pvalue 

1149 if kspValue < 1e-15: 

1150 kspValue = 0.0 

1151 

1152 varFit = sigmaFit**2. 

1153 

1154 else: 

1155 varFit = np.nan 

1156 chi2Dof = np.nan 

1157 kspValue = 0.0 

1158 

1159 return varFit, chi2Dof, kspValue