Coverage for python/lsst/cp/pipe/ptc/cpExtractPtcTask.py: 11%

349 statements  

« prev     ^ index     » next       coverage.py v7.3.3, created at 2023-12-15 13:14 +0000

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22import numpy as np 

23from lmfit.models import GaussianModel 

24import scipy.stats 

25import warnings 

26 

27import lsst.afw.math as afwMath 

28import lsst.pex.config as pexConfig 

29import lsst.pipe.base as pipeBase 

30from lsst.cp.pipe.utils import (arrangeFlatsByExpTime, arrangeFlatsByExpId, 

31 arrangeFlatsByExpFlux, sigmaClipCorrection, 

32 CovFastFourierTransform) 

33 

34import lsst.pipe.base.connectionTypes as cT 

35 

36from lsst.ip.isr import PhotonTransferCurveDataset 

37from lsst.ip.isr import IsrTask 

38 

39__all__ = ['PhotonTransferCurveExtractConfig', 'PhotonTransferCurveExtractTask'] 

40 

41 

42class PhotonTransferCurveExtractConnections(pipeBase.PipelineTaskConnections, 

43 dimensions=("instrument", "detector")): 

44 

45 inputExp = cT.Input( 

46 name="ptcInputExposurePairs", 

47 doc="Input post-ISR processed exposure pairs (flats) to" 

48 "measure covariances from.", 

49 storageClass="Exposure", 

50 dimensions=("instrument", "exposure", "detector"), 

51 multiple=True, 

52 deferLoad=True, 

53 ) 

54 inputPhotodiodeData = cT.Input( 

55 name="photodiode", 

56 doc="Photodiode readings data.", 

57 storageClass="IsrCalib", 

58 dimensions=("instrument", "exposure"), 

59 multiple=True, 

60 deferLoad=True, 

61 ) 

62 taskMetadata = cT.Input( 

63 name="isr_metadata", 

64 doc="Input task metadata to extract statistics from.", 

65 storageClass="TaskMetadata", 

66 dimensions=("instrument", "exposure", "detector"), 

67 multiple=True, 

68 ) 

69 outputCovariances = cT.Output( 

70 name="ptcCovariances", 

71 doc="Extracted flat (co)variances.", 

72 storageClass="PhotonTransferCurveDataset", 

73 dimensions=("instrument", "exposure", "detector"), 

74 isCalibration=True, 

75 multiple=True, 

76 ) 

77 

78 def __init__(self, *, config=None): 

79 if not config.doExtractPhotodiodeData: 

80 del self.inputPhotodiodeData 

81 

82 

83class PhotonTransferCurveExtractConfig(pipeBase.PipelineTaskConfig, 

84 pipelineConnections=PhotonTransferCurveExtractConnections): 

85 """Configuration for the measurement of covariances from flats. 

86 """ 

87 matchExposuresType = pexConfig.ChoiceField( 

88 dtype=str, 

89 doc="Match input exposures by time, flux, or expId", 

90 default='TIME', 

91 allowed={ 

92 "TIME": "Match exposures by exposure time.", 

93 "FLUX": "Match exposures by target flux. Use header keyword" 

94 " in matchExposuresByFluxKeyword to find the flux.", 

95 "EXPID": "Match exposures by exposure ID." 

96 } 

97 ) 

98 matchExposuresByFluxKeyword = pexConfig.Field( 

99 dtype=str, 

100 doc="Header keyword for flux if matchExposuresType is FLUX.", 

101 default='CCOBFLUX', 

102 ) 

103 maximumRangeCovariancesAstier = pexConfig.Field( 

104 dtype=int, 

105 doc="Maximum range of covariances as in Astier+19", 

106 default=8, 

107 ) 

108 binSize = pexConfig.Field( 

109 dtype=int, 

110 doc="Bin the image by this factor in both dimensions.", 

111 default=1, 

112 ) 

113 minMeanSignal = pexConfig.DictField( 

114 keytype=str, 

115 itemtype=float, 

116 doc="Minimum values (inclusive) of mean signal (in ADU) per amp to use." 

117 " The same cut is applied to all amps if this parameter [`dict`] is passed as " 

118 " {'ALL_AMPS': value}", 

119 default={'ALL_AMPS': 0.0}, 

120 deprecated="This config has been moved to cpSolvePtcTask, and will be removed after v26.", 

121 ) 

122 maxMeanSignal = pexConfig.DictField( 

123 keytype=str, 

124 itemtype=float, 

125 doc="Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp." 

126 " The same cut is applied to all amps if this dictionary is of the form" 

127 " {'ALL_AMPS': value}", 

128 default={'ALL_AMPS': 1e6}, 

129 deprecated="This config has been moved to cpSolvePtcTask, and will be removed after v26.", 

130 ) 

131 maskNameList = pexConfig.ListField( 

132 dtype=str, 

133 doc="Mask list to exclude from statistics calculations.", 

134 default=['SUSPECT', 'BAD', 'NO_DATA', 'SAT'], 

135 ) 

136 nSigmaClipPtc = pexConfig.Field( 

137 dtype=float, 

138 doc="Sigma cut for afwMath.StatisticsControl()", 

139 default=5.5, 

140 ) 

141 nIterSigmaClipPtc = pexConfig.Field( 

142 dtype=int, 

143 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()", 

144 default=3, 

145 ) 

146 minNumberGoodPixelsForCovariance = pexConfig.Field( 

147 dtype=int, 

148 doc="Minimum number of acceptable good pixels per amp to calculate the covariances (via FFT or" 

149 " direclty).", 

150 default=10000, 

151 ) 

152 thresholdDiffAfwVarVsCov00 = pexConfig.Field( 

153 dtype=float, 

154 doc="If the absolute fractional differece between afwMath.VARIANCECLIP and Cov00 " 

155 "for a region of a difference image is greater than this threshold (percentage), " 

156 "a warning will be issued.", 

157 default=1., 

158 ) 

159 detectorMeasurementRegion = pexConfig.ChoiceField( 

160 dtype=str, 

161 doc="Region of each exposure where to perform the calculations (amplifier or full image).", 

162 default='AMP', 

163 allowed={ 

164 "AMP": "Amplifier of the detector.", 

165 "FULL": "Full image." 

166 } 

167 ) 

168 numEdgeSuspect = pexConfig.Field( 

169 dtype=int, 

170 doc="Number of edge pixels to be flagged as untrustworthy.", 

171 default=0, 

172 ) 

173 edgeMaskLevel = pexConfig.ChoiceField( 

174 dtype=str, 

175 doc="Mask edge pixels in which coordinate frame: DETECTOR or AMP?", 

176 default="DETECTOR", 

177 allowed={ 

178 'DETECTOR': 'Mask only the edges of the full detector.', 

179 'AMP': 'Mask edges of each amplifier.', 

180 }, 

181 ) 

182 doGain = pexConfig.Field( 

183 dtype=bool, 

184 doc="Calculate a gain per input flat pair.", 

185 default=True, 

186 ) 

187 gainCorrectionType = pexConfig.ChoiceField( 

188 dtype=str, 

189 doc="Correction type for the gain.", 

190 default='FULL', 

191 allowed={ 

192 'NONE': 'No correction.', 

193 'SIMPLE': 'First order correction.', 

194 'FULL': 'Second order correction.' 

195 } 

196 ) 

197 ksHistNBins = pexConfig.Field( 

198 dtype=int, 

199 doc="Number of bins for the KS test histogram.", 

200 default=100, 

201 ) 

202 ksHistLimitMultiplier = pexConfig.Field( 

203 dtype=float, 

204 doc="Number of sigma (as predicted from the mean value) to compute KS test histogram.", 

205 default=8.0, 

206 ) 

207 ksHistMinDataValues = pexConfig.Field( 

208 dtype=int, 

209 doc="Minimum number of good data values to compute KS test histogram.", 

210 default=100, 

211 ) 

212 auxiliaryHeaderKeys = pexConfig.ListField( 

213 dtype=str, 

214 doc="Auxiliary header keys to store with the PTC dataset.", 

215 default=[], 

216 ) 

217 doExtractPhotodiodeData = pexConfig.Field( 

218 dtype=bool, 

219 doc="Extract photodiode data?", 

220 default=False, 

221 ) 

222 photodiodeIntegrationMethod = pexConfig.ChoiceField( 

223 dtype=str, 

224 doc="Integration method for photodiode monitoring data.", 

225 default="CHARGE_SUM", 

226 allowed={ 

227 "DIRECT_SUM": ("Use numpy's trapz integrator on all photodiode " 

228 "readout entries"), 

229 "TRIMMED_SUM": ("Use numpy's trapz integrator, clipping the " 

230 "leading and trailing entries, which are " 

231 "nominally at zero baseline level."), 

232 "CHARGE_SUM": ("Treat the current values as integrated charge " 

233 "over the sampling interval and simply sum " 

234 "the values, after subtracting a baseline level."), 

235 }, 

236 ) 

237 photodiodeCurrentScale = pexConfig.Field( 

238 dtype=float, 

239 doc="Scale factor to apply to photodiode current values for the " 

240 "``CHARGE_SUM`` integration method.", 

241 default=-1.0, 

242 ) 

243 

244 

245class PhotonTransferCurveExtractTask(pipeBase.PipelineTask): 

246 """Task to measure covariances from flat fields. 

247 

248 This task receives as input a list of flat-field images 

249 (flats), and sorts these flats in pairs taken at the 

250 same time (the task will raise if there is one one flat 

251 at a given exposure time, and it will discard extra flats if 

252 there are more than two per exposure time). This task measures 

253 the mean, variance, and covariances from a region (e.g., 

254 an amplifier) of the difference image of the two flats with 

255 the same exposure time (alternatively, all input images could have 

256 the same exposure time but their flux changed). 

257 

258 The variance is calculated via afwMath, and the covariance 

259 via the methods in Astier+19 (appendix A). In theory, 

260 var = covariance[0,0]. This should be validated, and in the 

261 future, we may decide to just keep one (covariance). 

262 At this moment, if the two values differ by more than the value 

263 of `thresholdDiffAfwVarVsCov00` (default: 1%), a warning will 

264 be issued. 

265 

266 The measured covariances at a given exposure time (along with 

267 other quantities such as the mean) are stored in a PTC dataset 

268 object (`~lsst.ip.isr.PhotonTransferCurveDataset`), which gets 

269 partially filled at this stage (the remainder of the attributes 

270 of the dataset will be filled after running the second task of 

271 the PTC-measurement pipeline, `~PhotonTransferCurveSolveTask`). 

272 

273 The number of partially-filled 

274 `~lsst.ip.isr.PhotonTransferCurveDataset` objects will be less 

275 than the number of input exposures because the task combines 

276 input flats in pairs. However, it is required at this moment 

277 that the number of input dimensions matches 

278 bijectively the number of output dimensions. Therefore, a number 

279 of "dummy" PTC datasets are inserted in the output list. This 

280 output list will then be used as input of the next task in the 

281 PTC-measurement pipeline, `PhotonTransferCurveSolveTask`, 

282 which will assemble the multiple `PhotonTransferCurveDataset` 

283 objects into a single one in order to fit the measured covariances 

284 as a function of flux to one of three models 

285 (see `PhotonTransferCurveSolveTask` for details). 

286 

287 Reference: Astier+19: "The Shape of the Photon Transfer Curve of CCD 

288 sensors", arXiv:1905.08677. 

289 """ 

290 

291 ConfigClass = PhotonTransferCurveExtractConfig 

292 _DefaultName = 'cpPtcExtract' 

293 

294 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

295 """Ensure that the input and output dimensions are passed along. 

296 

297 Parameters 

298 ---------- 

299 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext` 

300 Butler to operate on. 

301 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection` 

302 Input data refs to load. 

303 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection` 

304 Output data refs to persist. 

305 """ 

306 inputs = butlerQC.get(inputRefs) 

307 # Ids of input list of exposure references 

308 # (deferLoad=True in the input connections) 

309 inputs['inputDims'] = [expRef.datasetRef.dataId['exposure'] for expRef in inputRefs.inputExp] 

310 

311 # Dictionary, keyed by expTime (or expFlux or expId), with tuples 

312 # containing flat exposures and their IDs. 

313 matchType = self.config.matchExposuresType 

314 if matchType == 'TIME': 

315 inputs['inputExp'] = arrangeFlatsByExpTime(inputs['inputExp'], inputs['inputDims'], log=self.log) 

316 elif matchType == 'FLUX': 

317 inputs['inputExp'] = arrangeFlatsByExpFlux( 

318 inputs['inputExp'], 

319 inputs['inputDims'], 

320 self.config.matchExposuresByFluxKeyword, 

321 log=self.log, 

322 ) 

323 else: 

324 inputs['inputExp'] = arrangeFlatsByExpId(inputs['inputExp'], inputs['inputDims']) 

325 

326 outputs = self.run(**inputs) 

327 outputs = self._guaranteeOutputs(inputs['inputDims'], outputs, outputRefs) 

328 butlerQC.put(outputs, outputRefs) 

329 

330 def _guaranteeOutputs(self, inputDims, outputs, outputRefs): 

331 """Ensure that all outputRefs have a matching output, and if they do 

332 not, fill the output with dummy PTC datasets. 

333 

334 Parameters 

335 ---------- 

336 inputDims : `dict` [`str`, `int`] 

337 Input exposure dimensions. 

338 outputs : `lsst.pipe.base.Struct` 

339 Outputs from the ``run`` method. Contains the entry: 

340 

341 ``outputCovariances`` 

342 Output PTC datasets (`list` [`lsst.ip.isr.IsrCalib`]) 

343 outputRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection` 

344 Container with all of the outputs expected to be generated. 

345 

346 Returns 

347 ------- 

348 outputs : `lsst.pipe.base.Struct` 

349 Dummy dataset padded version of the input ``outputs`` with 

350 the same entries. 

351 """ 

352 newCovariances = [] 

353 for ref in outputRefs.outputCovariances: 

354 outputExpId = ref.dataId['exposure'] 

355 if outputExpId in inputDims: 

356 entry = inputDims.index(outputExpId) 

357 newCovariances.append(outputs.outputCovariances[entry]) 

358 else: 

359 newPtc = PhotonTransferCurveDataset(['no amp'], 'DUMMY', 1) 

360 newPtc.setAmpValuesPartialDataset('no amp') 

361 newCovariances.append(newPtc) 

362 return pipeBase.Struct(outputCovariances=newCovariances) 

363 

364 def run(self, inputExp, inputDims, taskMetadata, inputPhotodiodeData=None): 

365 

366 """Measure covariances from difference of flat pairs 

367 

368 Parameters 

369 ---------- 

370 inputExp : `dict` [`float`, `list` 

371 [`~lsst.pipe.base.connections.DeferredDatasetRef`]] 

372 Dictionary that groups references to flat-field exposures that 

373 have the same exposure time (seconds), or that groups them 

374 sequentially by their exposure id. 

375 inputDims : `list` 

376 List of exposure IDs. 

377 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`] 

378 List of exposures metadata from ISR. 

379 inputPhotodiodeData : `dict` [`str`, `lsst.ip.isr.PhotodiodeCalib`] 

380 Photodiode readings data (optional). 

381 

382 Returns 

383 ------- 

384 results : `lsst.pipe.base.Struct` 

385 The resulting Struct contains: 

386 

387 ``outputCovariances`` 

388 A list containing the per-pair PTC measurements (`list` 

389 [`lsst.ip.isr.PhotonTransferCurveDataset`]) 

390 """ 

391 # inputExp.values() returns a view, which we turn into a list. We then 

392 # access the first exposure-ID tuple to get the detector. 

393 # The first "get()" retrieves the exposure from the exposure reference. 

394 detector = list(inputExp.values())[0][0][0].get(component='detector') 

395 detNum = detector.getId() 

396 amps = detector.getAmplifiers() 

397 ampNames = [amp.getName() for amp in amps] 

398 

399 # Each amp may have a different min and max ADU signal 

400 # specified in the config. 

401 maxMeanSignalDict = {ampName: 1e6 for ampName in ampNames} 

402 minMeanSignalDict = {ampName: 0.0 for ampName in ampNames} 

403 for ampName in ampNames: 

404 if 'ALL_AMPS' in self.config.maxMeanSignal: 

405 maxMeanSignalDict[ampName] = self.config.maxMeanSignal['ALL_AMPS'] 

406 elif ampName in self.config.maxMeanSignal: 

407 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[ampName] 

408 

409 if 'ALL_AMPS' in self.config.minMeanSignal: 

410 minMeanSignalDict[ampName] = self.config.minMeanSignal['ALL_AMPS'] 

411 elif ampName in self.config.minMeanSignal: 

412 minMeanSignalDict[ampName] = self.config.minMeanSignal[ampName] 

413 # These are the column names for `tupleRows` below. 

414 tags = [('mu', '<f8'), ('afwVar', '<f8'), ('i', '<i8'), ('j', '<i8'), ('var', '<f8'), 

415 ('cov', '<f8'), ('npix', '<i8'), ('ext', '<i8'), ('expTime', '<f8'), ('ampName', '<U3')] 

416 # Create a dummy ptcDataset. Dummy datasets will be 

417 # used to ensure that the number of output and input 

418 # dimensions match. 

419 dummyPtcDataset = PhotonTransferCurveDataset(ampNames, 'DUMMY', 

420 self.config.maximumRangeCovariancesAstier) 

421 for ampName in ampNames: 

422 dummyPtcDataset.setAmpValuesPartialDataset(ampName) 

423 

424 # Extract the photodiode data if requested. 

425 if self.config.doExtractPhotodiodeData: 

426 # Compute the photodiode integrals once, at the start. 

427 monitorDiodeCharge = {} 

428 for handle in inputPhotodiodeData: 

429 expId = handle.dataId['exposure'] 

430 pdCalib = handle.get() 

431 pdCalib.integrationMethod = self.config.photodiodeIntegrationMethod 

432 pdCalib.currentScale = self.config.photodiodeCurrentScale 

433 monitorDiodeCharge[expId] = pdCalib.integrate() 

434 

435 # Get read noise. Try from the exposure, then try 

436 # taskMetadata. This adds a get() for the exposures. 

437 readNoiseLists = {} 

438 for pairIndex, expRefs in inputExp.items(): 

439 # This yields an index (exposure_time, seq_num, or flux) 

440 # and a pair of references at that index. 

441 for expRef, expId in expRefs: 

442 # This yields an exposure ref and an exposureId. 

443 exposureMetadata = expRef.get(component="metadata") 

444 metadataIndex = inputDims.index(expId) 

445 thisTaskMetadata = taskMetadata[metadataIndex] 

446 

447 for ampName in ampNames: 

448 if ampName not in readNoiseLists: 

449 readNoiseLists[ampName] = [self.getReadNoise(exposureMetadata, 

450 thisTaskMetadata, ampName)] 

451 else: 

452 readNoiseLists[ampName].append(self.getReadNoise(exposureMetadata, 

453 thisTaskMetadata, ampName)) 

454 

455 readNoiseDict = {ampName: 0.0 for ampName in ampNames} 

456 for ampName in ampNames: 

457 # Take median read noise value 

458 readNoiseDict[ampName] = np.nanmedian(readNoiseLists[ampName]) 

459 

460 # Output list with PTC datasets. 

461 partialPtcDatasetList = [] 

462 # The number of output references needs to match that of input 

463 # references: initialize outputlist with dummy PTC datasets. 

464 for i in range(len(inputDims)): 

465 partialPtcDatasetList.append(dummyPtcDataset) 

466 

467 if self.config.numEdgeSuspect > 0: 

468 isrTask = IsrTask() 

469 self.log.info("Masking %d pixels from the edges of all %ss as SUSPECT.", 

470 self.config.numEdgeSuspect, self.config.edgeMaskLevel) 

471 

472 # Depending on the value of config.matchExposuresType 

473 # 'expTime' can stand for exposure time, flux, or ID. 

474 for expTime in inputExp: 

475 exposures = inputExp[expTime] 

476 if not np.isfinite(expTime): 

477 self.log.warning("Illegal/missing %s found (%s). Dropping exposure %d", 

478 self.config.matchExposuresType, expTime, exposures[0][1]) 

479 continue 

480 elif len(exposures) == 1: 

481 self.log.warning("Only one exposure found at %s %f. Dropping exposure %d.", 

482 self.config.matchExposuresType, expTime, exposures[0][1]) 

483 continue 

484 else: 

485 # Only use the first two exposures at expTime. Each 

486 # element is a tuple (exposure, expId) 

487 expRef1, expId1 = exposures[0] 

488 expRef2, expId2 = exposures[1] 

489 # use get() to obtain `lsst.afw.image.Exposure` 

490 exp1, exp2 = expRef1.get(), expRef2.get() 

491 

492 if len(exposures) > 2: 

493 self.log.warning("Already found 2 exposures at %s %f. Ignoring exposures: %s", 

494 self.config.matchExposuresType, expTime, 

495 ", ".join(str(i[1]) for i in exposures[2:])) 

496 # Mask pixels at the edge of the detector or of each amp 

497 if self.config.numEdgeSuspect > 0: 

498 isrTask.maskEdges(exp1, numEdgePixels=self.config.numEdgeSuspect, 

499 maskPlane="SUSPECT", level=self.config.edgeMaskLevel) 

500 isrTask.maskEdges(exp2, numEdgePixels=self.config.numEdgeSuspect, 

501 maskPlane="SUSPECT", level=self.config.edgeMaskLevel) 

502 

503 # Extract any metadata keys from the headers. 

504 auxDict = {} 

505 metadata = exp1.getMetadata() 

506 for key in self.config.auxiliaryHeaderKeys: 

507 if key not in metadata: 

508 self.log.warning( 

509 "Requested auxiliary keyword %s not found in exposure metadata for %d", 

510 key, 

511 expId1, 

512 ) 

513 value = np.nan 

514 else: 

515 value = metadata[key] 

516 

517 auxDict[key] = value 

518 

519 nAmpsNan = 0 

520 partialPtcDataset = PhotonTransferCurveDataset(ampNames, 'PARTIAL', 

521 self.config.maximumRangeCovariancesAstier) 

522 for ampNumber, amp in enumerate(detector): 

523 ampName = amp.getName() 

524 if self.config.detectorMeasurementRegion == 'AMP': 

525 region = amp.getBBox() 

526 elif self.config.detectorMeasurementRegion == 'FULL': 

527 region = None 

528 

529 # Get masked image regions, masking planes, statistic control 

530 # objects, and clipped means. Calculate once to reuse in 

531 # `measureMeanVarCov` and `getGainFromFlatPair`. 

532 im1Area, im2Area, imStatsCtrl, mu1, mu2 = self.getImageAreasMasksStats(exp1, exp2, 

533 region=region) 

534 

535 # We demand that both mu1 and mu2 be finite and greater than 0. 

536 if not np.isfinite(mu1) or not np.isfinite(mu2) \ 

537 or ((np.nan_to_num(mu1) + np.nan_to_num(mu2)/2.) <= 0.0): 

538 self.log.warning( 

539 "Illegal mean value(s) detected for amp %s on exposure pair %d/%d", 

540 ampName, 

541 expId1, 

542 expId2, 

543 ) 

544 partialPtcDataset.setAmpValuesPartialDataset( 

545 ampName, 

546 inputExpIdPair=(expId1, expId2), 

547 rawExpTime=expTime, 

548 expIdMask=False, 

549 ) 

550 continue 

551 

552 # `measureMeanVarCov` is the function that measures 

553 # the variance and covariances from a region of 

554 # the difference image of two flats at the same 

555 # exposure time. The variable `covAstier` that is 

556 # returned is of the form: 

557 # [(i, j, var (cov[0,0]), cov, npix) for (i,j) in 

558 # {maxLag, maxLag}^2]. 

559 muDiff, varDiff, covAstier = self.measureMeanVarCov(im1Area, im2Area, imStatsCtrl, mu1, mu2) 

560 # Estimate the gain from the flat pair 

561 if self.config.doGain: 

562 gain = self.getGainFromFlatPair(im1Area, im2Area, imStatsCtrl, mu1, mu2, 

563 correctionType=self.config.gainCorrectionType, 

564 readNoise=readNoiseDict[ampName]) 

565 else: 

566 gain = np.nan 

567 

568 # Correction factor for bias introduced by sigma 

569 # clipping. 

570 # Function returns 1/sqrt(varFactor), so it needs 

571 # to be squared. varDiff is calculated via 

572 # afwMath.VARIANCECLIP. 

573 varFactor = sigmaClipCorrection(self.config.nSigmaClipPtc)**2 

574 varDiff *= varFactor 

575 

576 expIdMask = True 

577 # Mask data point at this mean signal level if 

578 # the signal, variance, or covariance calculations 

579 # from `measureMeanVarCov` resulted in NaNs. 

580 if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None): 

581 self.log.warning("NaN mean or var, or None cov in amp %s in exposure pair %d, %d of " 

582 "detector %d.", ampName, expId1, expId2, detNum) 

583 nAmpsNan += 1 

584 expIdMask = False 

585 covArray = np.full((1, self.config.maximumRangeCovariancesAstier, 

586 self.config.maximumRangeCovariancesAstier), np.nan) 

587 covSqrtWeights = np.full_like(covArray, np.nan) 

588 

589 # Mask data point if it is outside of the 

590 # specified mean signal range. 

591 if (muDiff <= minMeanSignalDict[ampName]) or (muDiff >= maxMeanSignalDict[ampName]): 

592 expIdMask = False 

593 

594 if covAstier is not None: 

595 # Turn the tuples with the measured information 

596 # into covariance arrays. 

597 # covrow: (i, j, var (cov[0,0]), cov, npix) 

598 tupleRows = [(muDiff, varDiff) + covRow + (ampNumber, expTime, 

599 ampName) for covRow in covAstier] 

600 tempStructArray = np.array(tupleRows, dtype=tags) 

601 

602 covArray, vcov, _ = self.makeCovArray(tempStructArray, 

603 self.config.maximumRangeCovariancesAstier) 

604 

605 # The returned covArray should only have 1 entry; 

606 # raise if this is not the case. 

607 if covArray.shape[0] != 1: 

608 raise RuntimeError("Serious programming error in covArray shape.") 

609 

610 covSqrtWeights = np.nan_to_num(1./np.sqrt(vcov)) 

611 

612 # Correct covArray for sigma clipping: 

613 # 1) Apply varFactor twice for the whole covariance matrix 

614 covArray *= varFactor**2 

615 # 2) But, only once for the variance element of the 

616 # matrix, covArray[0, 0, 0] (so divide one factor out). 

617 # (the first 0 is because this is a 3D array for insertion into 

618 # the combined dataset). 

619 covArray[0, 0, 0] /= varFactor 

620 

621 if expIdMask: 

622 # Run the Gaussian histogram only if this is a legal 

623 # amplifier. 

624 histVar, histChi2Dof, kspValue = self.computeGaussianHistogramParameters( 

625 im1Area, 

626 im2Area, 

627 imStatsCtrl, 

628 mu1, 

629 mu2, 

630 ) 

631 else: 

632 histVar = np.nan 

633 histChi2Dof = np.nan 

634 kspValue = 0.0 

635 

636 if self.config.doExtractPhotodiodeData: 

637 nExps = 0 

638 photoCharge = 0.0 

639 for expId in [expId1, expId2]: 

640 if expId in monitorDiodeCharge: 

641 photoCharge += monitorDiodeCharge[expId] 

642 nExps += 1 

643 if nExps > 0: 

644 photoCharge /= nExps 

645 else: 

646 photoCharge = np.nan 

647 else: 

648 photoCharge = np.nan 

649 

650 partialPtcDataset.setAmpValuesPartialDataset( 

651 ampName, 

652 inputExpIdPair=(expId1, expId2), 

653 rawExpTime=expTime, 

654 rawMean=muDiff, 

655 rawVar=varDiff, 

656 photoCharge=photoCharge, 

657 expIdMask=expIdMask, 

658 covariance=covArray[0, :, :], 

659 covSqrtWeights=covSqrtWeights[0, :, :], 

660 gain=gain, 

661 noise=readNoiseDict[ampName], 

662 histVar=histVar, 

663 histChi2Dof=histChi2Dof, 

664 kspValue=kspValue, 

665 ) 

666 

667 partialPtcDataset.setAuxValuesPartialDataset(auxDict) 

668 

669 # Use location of exp1 to save PTC dataset from (exp1, exp2) pair. 

670 # Below, np.where(expId1 == np.array(inputDims)) returns a tuple 

671 # with a single-element array, so [0][0] 

672 # is necessary to extract the required index. 

673 datasetIndex = np.where(expId1 == np.array(inputDims))[0][0] 

674 # `partialPtcDatasetList` is a list of 

675 # `PhotonTransferCurveDataset` objects. Some of them 

676 # will be dummy datasets (to match length of input 

677 # and output references), and the rest will have 

678 # datasets with the mean signal, variance, and 

679 # covariance measurements at a given exposure 

680 # time. The next ppart of the PTC-measurement 

681 # pipeline, `solve`, will take this list as input, 

682 # and assemble the measurements in the datasets 

683 # in an addecuate manner for fitting a PTC 

684 # model. 

685 partialPtcDataset.updateMetadataFromExposures([exp1, exp2]) 

686 partialPtcDataset.updateMetadata(setDate=True, detector=detector) 

687 partialPtcDatasetList[datasetIndex] = partialPtcDataset 

688 

689 if nAmpsNan == len(ampNames): 

690 msg = f"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}." 

691 self.log.warning(msg) 

692 

693 return pipeBase.Struct( 

694 outputCovariances=partialPtcDatasetList, 

695 ) 

696 

697 def makeCovArray(self, inputTuple, maxRangeFromTuple): 

698 """Make covariances array from tuple. 

699 

700 Parameters 

701 ---------- 

702 inputTuple : `numpy.ndarray` 

703 Structured array with rows with at least 

704 (mu, afwVar, cov, var, i, j, npix), where: 

705 mu : `float` 

706 0.5*(m1 + m2), where mu1 is the mean value of flat1 

707 and mu2 is the mean value of flat2. 

708 afwVar : `float` 

709 Variance of difference flat, calculated with afw. 

710 cov : `float` 

711 Covariance value at lag(i, j) 

712 var : `float` 

713 Variance(covariance value at lag(0, 0)) 

714 i : `int` 

715 Lag in dimension "x". 

716 j : `int` 

717 Lag in dimension "y". 

718 npix : `int` 

719 Number of pixels used for covariance calculation. 

720 maxRangeFromTuple : `int` 

721 Maximum range to select from tuple. 

722 

723 Returns 

724 ------- 

725 cov : `numpy.array` 

726 Covariance arrays, indexed by mean signal mu. 

727 vCov : `numpy.array` 

728 Variance of the [co]variance arrays, indexed by mean signal mu. 

729 muVals : `numpy.array` 

730 List of mean signal values. 

731 """ 

732 if maxRangeFromTuple is not None: 

733 cut = (inputTuple['i'] < maxRangeFromTuple) & (inputTuple['j'] < maxRangeFromTuple) 

734 cutTuple = inputTuple[cut] 

735 else: 

736 cutTuple = inputTuple 

737 # increasing mu order, so that we can group measurements with the 

738 # same mu 

739 muTemp = cutTuple['mu'] 

740 ind = np.argsort(muTemp) 

741 

742 cutTuple = cutTuple[ind] 

743 # should group measurements on the same image pairs(same average) 

744 mu = cutTuple['mu'] 

745 xx = np.hstack(([mu[0]], mu)) 

746 delta = xx[1:] - xx[:-1] 

747 steps, = np.where(delta > 0) 

748 ind = np.zeros_like(mu, dtype=int) 

749 ind[steps] = 1 

750 ind = np.cumsum(ind) # this acts as an image pair index. 

751 # now fill the 3-d cov array(and variance) 

752 muVals = np.array(np.unique(mu)) 

753 i = cutTuple['i'].astype(int) 

754 j = cutTuple['j'].astype(int) 

755 c = 0.5*cutTuple['cov'] 

756 n = cutTuple['npix'] 

757 v = 0.5*cutTuple['var'] 

758 # book and fill 

759 cov = np.ndarray((len(muVals), np.max(i)+1, np.max(j)+1)) 

760 var = np.zeros_like(cov) 

761 cov[ind, i, j] = c 

762 var[ind, i, j] = v**2/n 

763 var[:, 0, 0] *= 2 # var(v) = 2*v**2/N 

764 

765 return cov, var, muVals 

766 

767 def measureMeanVarCov(self, im1Area, im2Area, imStatsCtrl, mu1, mu2): 

768 """Calculate the mean of each of two exposures and the variance 

769 and covariance of their difference. The variance is calculated 

770 via afwMath, and the covariance via the methods in Astier+19 

771 (appendix A). In theory, var = covariance[0,0]. This should 

772 be validated, and in the future, we may decide to just keep 

773 one (covariance). 

774 

775 Parameters 

776 ---------- 

777 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

778 Masked image from exposure 1. 

779 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

780 Masked image from exposure 2. 

781 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

782 Statistics control object. 

783 mu1: `float` 

784 Clipped mean of im1Area (ADU). 

785 mu2: `float` 

786 Clipped mean of im2Area (ADU). 

787 

788 Returns 

789 ------- 

790 mu : `float` or `NaN` 

791 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means 

792 of the regions in both exposures. If either mu1 or m2 are 

793 NaN's, the returned value is NaN. 

794 varDiff : `float` or `NaN` 

795 Half of the clipped variance of the difference of the 

796 regions inthe two input exposures. If either mu1 or m2 are 

797 NaN's, the returned value is NaN. 

798 covDiffAstier : `list` or `NaN` 

799 List with tuples of the form (dx, dy, var, cov, npix), where: 

800 dx : `int` 

801 Lag in x 

802 dy : `int` 

803 Lag in y 

804 var : `float` 

805 Variance at (dx, dy). 

806 cov : `float` 

807 Covariance at (dx, dy). 

808 nPix : `int` 

809 Number of pixel pairs used to evaluate var and cov. 

810 

811 If either mu1 or m2 are NaN's, the returned value is NaN. 

812 """ 

813 if np.isnan(mu1) or np.isnan(mu2): 

814 self.log.warning("Mean of amp in image 1 or 2 is NaN: %f, %f.", mu1, mu2) 

815 return np.nan, np.nan, None 

816 mu = 0.5*(mu1 + mu2) 

817 

818 # Take difference of pairs 

819 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2)) 

820 temp = im2Area.clone() 

821 temp *= mu1 

822 diffIm = im1Area.clone() 

823 diffIm *= mu2 

824 diffIm -= temp 

825 diffIm /= mu 

826 

827 if self.config.binSize > 1: 

828 diffIm = afwMath.binImage(diffIm, self.config.binSize) 

829 

830 # Variance calculation via afwMath 

831 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue()) 

832 

833 # Covariances calculations 

834 # Get the pixels that were not clipped 

835 varClip = afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue() 

836 meanClip = afwMath.makeStatistics(diffIm, afwMath.MEANCLIP, imStatsCtrl).getValue() 

837 cut = meanClip + self.config.nSigmaClipPtc*np.sqrt(varClip) 

838 unmasked = np.where(np.fabs(diffIm.image.array) <= cut, 1, 0) 

839 

840 # Get the pixels in the mask planes of the difference image 

841 # that were ignored by the clipping algorithm 

842 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0) 

843 # Combine the two sets of pixels ('1': use; '0': don't use) 

844 # into a final weight matrix to be used in the covariance 

845 # calculations below. 

846 w = unmasked*wDiff 

847 

848 if np.sum(w) < self.config.minNumberGoodPixelsForCovariance/(self.config.binSize**2): 

849 self.log.warning("Number of good points for covariance calculation (%s) is less " 

850 "(than threshold %s)", np.sum(w), 

851 self.config.minNumberGoodPixelsForCovariance/(self.config.binSize**2)) 

852 return np.nan, np.nan, None 

853 

854 maxRangeCov = self.config.maximumRangeCovariancesAstier 

855 

856 # Calculate covariances via FFT. 

857 shapeDiff = np.array(diffIm.image.array.shape) 

858 # Calculate the sizes of FFT dimensions. 

859 s = shapeDiff + maxRangeCov 

860 tempSize = np.array(np.log(s)/np.log(2.)).astype(int) 

861 fftSize = np.array(2**(tempSize+1)).astype(int) 

862 fftShape = (fftSize[0], fftSize[1]) 

863 c = CovFastFourierTransform(diffIm.image.array, w, fftShape, maxRangeCov) 

864 # np.sum(w) is the same as npix[0][0] returned in covDiffAstier 

865 try: 

866 covDiffAstier = c.reportCovFastFourierTransform(maxRangeCov) 

867 except ValueError: 

868 # This is raised if there are not enough pixels. 

869 self.log.warning("Not enough pixels covering the requested covariance range in x/y (%d)", 

870 self.config.maximumRangeCovariancesAstier) 

871 return np.nan, np.nan, None 

872 

873 # Compare Cov[0,0] and afwMath.VARIANCECLIP covDiffAstier[0] 

874 # is the Cov[0,0] element, [3] is the variance, and there's a 

875 # factor of 0.5 difference with afwMath.VARIANCECLIP. 

876 thresholdPercentage = self.config.thresholdDiffAfwVarVsCov00 

877 fractionalDiff = 100*np.fabs(1 - varDiff/(covDiffAstier[0][3]*0.5)) 

878 if fractionalDiff >= thresholdPercentage: 

879 self.log.warning("Absolute fractional difference between afwMatch.VARIANCECLIP and Cov[0,0] " 

880 "is more than %f%%: %f", thresholdPercentage, fractionalDiff) 

881 

882 return mu, varDiff, covDiffAstier 

883 

884 def getImageAreasMasksStats(self, exposure1, exposure2, region=None): 

885 """Get image areas in a region as well as masks and statistic objects. 

886 

887 Parameters 

888 ---------- 

889 exposure1 : `lsst.afw.image.ExposureF` 

890 First exposure of flat field pair. 

891 exposure2 : `lsst.afw.image.ExposureF` 

892 Second exposure of flat field pair. 

893 region : `lsst.geom.Box2I`, optional 

894 Region of each exposure where to perform the calculations 

895 (e.g, an amplifier). 

896 

897 Returns 

898 ------- 

899 im1Area : `lsst.afw.image.MaskedImageF` 

900 Masked image from exposure 1. 

901 im2Area : `lsst.afw.image.MaskedImageF` 

902 Masked image from exposure 2. 

903 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

904 Statistics control object. 

905 mu1 : `float` 

906 Clipped mean of im1Area (ADU). 

907 mu2 : `float` 

908 Clipped mean of im2Area (ADU). 

909 """ 

910 if region is not None: 

911 im1Area = exposure1.maskedImage[region] 

912 im2Area = exposure2.maskedImage[region] 

913 else: 

914 im1Area = exposure1.maskedImage 

915 im2Area = exposure2.maskedImage 

916 

917 # Get mask planes and construct statistics control object from one 

918 # of the exposures 

919 imMaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList) 

920 imStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc, 

921 self.config.nIterSigmaClipPtc, 

922 imMaskVal) 

923 imStatsCtrl.setNanSafe(True) 

924 imStatsCtrl.setAndMask(imMaskVal) 

925 

926 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, imStatsCtrl).getValue() 

927 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, imStatsCtrl).getValue() 

928 

929 return (im1Area, im2Area, imStatsCtrl, mu1, mu2) 

930 

931 def getGainFromFlatPair(self, im1Area, im2Area, imStatsCtrl, mu1, mu2, 

932 correctionType='NONE', readNoise=None): 

933 """Estimate the gain from a single pair of flats. 

934 

935 The basic premise is 1/g = <(I1 - I2)^2/(I1 + I2)> = 1/const, 

936 where I1 and I2 correspond to flats 1 and 2, respectively. 

937 Corrections for the variable QE and the read-noise are then 

938 made following the derivation in Robert Lupton's forthcoming 

939 book, which gets 

940 

941 1/g = <(I1 - I2)^2/(I1 + I2)> - 1/mu(sigma^2 - 1/2g^2). 

942 

943 This is a quadratic equation, whose solutions are given by: 

944 

945 g = mu +/- sqrt(2*sigma^2 - 2*const*mu + mu^2)/(2*const*mu*2 

946 - 2*sigma^2) 

947 

948 where 'mu' is the average signal level and 'sigma' is the 

949 amplifier's readnoise. The positive solution will be used. 

950 The way the correction is applied depends on the value 

951 supplied for correctionType. 

952 

953 correctionType is one of ['NONE', 'SIMPLE' or 'FULL'] 

954 'NONE' : uses the 1/g = <(I1 - I2)^2/(I1 + I2)> formula. 

955 'SIMPLE' : uses the gain from the 'NONE' method for the 

956 1/2g^2 term. 

957 'FULL' : solves the full equation for g, discarding the 

958 non-physical solution to the resulting quadratic. 

959 

960 Parameters 

961 ---------- 

962 im1Area : `lsst.afw.image.maskedImage.MaskedImageF` 

963 Masked image from exposure 1. 

964 im2Area : `lsst.afw.image.maskedImage.MaskedImageF` 

965 Masked image from exposure 2. 

966 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

967 Statistics control object. 

968 mu1: `float` 

969 Clipped mean of im1Area (ADU). 

970 mu2: `float` 

971 Clipped mean of im2Area (ADU). 

972 correctionType : `str`, optional 

973 The correction applied, one of ['NONE', 'SIMPLE', 'FULL'] 

974 readNoise : `float`, optional 

975 Amplifier readout noise (ADU). 

976 

977 Returns 

978 ------- 

979 gain : `float` 

980 Gain, in e/ADU. 

981 

982 Raises 

983 ------ 

984 RuntimeError 

985 Raise if `correctionType` is not one of 'NONE', 

986 'SIMPLE', or 'FULL'. 

987 """ 

988 if correctionType not in ['NONE', 'SIMPLE', 'FULL']: 

989 raise RuntimeError("Unknown correction type: %s" % correctionType) 

990 

991 if correctionType != 'NONE' and not np.isfinite(readNoise): 

992 self.log.warning("'correctionType' in 'getGainFromFlatPair' is %s, " 

993 "but 'readNoise' is NaN. Setting 'correctionType' " 

994 "to 'NONE', so a gain value will be estimated without " 

995 "corrections." % correctionType) 

996 correctionType = 'NONE' 

997 

998 mu = 0.5*(mu1 + mu2) 

999 

1000 # ratioIm = (I1 - I2)^2 / (I1 + I2) 

1001 temp = im2Area.clone() 

1002 ratioIm = im1Area.clone() 

1003 ratioIm -= temp 

1004 ratioIm *= ratioIm 

1005 

1006 # Sum of pairs 

1007 sumIm = im1Area.clone() 

1008 sumIm += temp 

1009 

1010 ratioIm /= sumIm 

1011 

1012 const = afwMath.makeStatistics(ratioIm, afwMath.MEAN, imStatsCtrl).getValue() 

1013 gain = 1. / const 

1014 

1015 if correctionType == 'SIMPLE': 

1016 gain = 1/(const - (1/mu)*(readNoise**2 - (1/2*gain**2))) 

1017 elif correctionType == 'FULL': 

1018 root = np.sqrt(mu**2 - 2*mu*const + 2*readNoise**2) 

1019 denom = (2*const*mu - 2*readNoise**2) 

1020 positiveSolution = (root + mu)/denom 

1021 gain = positiveSolution 

1022 

1023 return gain 

1024 

1025 def getReadNoise(self, exposureMetadata, taskMetadata, ampName): 

1026 """Gets readout noise for an amp from ISR metadata. 

1027 

1028 If possible, this attempts to get the now-standard headers 

1029 added to the exposure itself. If not found there, the ISR 

1030 TaskMetadata is searched. If neither of these has the value, 

1031 warn and set the read noise to NaN. 

1032 

1033 Parameters 

1034 ---------- 

1035 exposureMetadata : `lsst.daf.base.PropertySet` 

1036 Metadata to check for read noise first. 

1037 taskMetadata : `lsst.pipe.base.TaskMetadata` 

1038 List of exposures metadata from ISR for this exposure. 

1039 ampName : `str` 

1040 Amplifier name. 

1041 

1042 Returns 

1043 ------- 

1044 readNoise : `float` 

1045 The read noise for this set of exposure/amplifier. 

1046 """ 

1047 # Try from the exposure first. 

1048 expectedKey = f"LSST ISR OVERSCAN RESIDUAL SERIAL STDEV {ampName}" 

1049 if expectedKey in exposureMetadata: 

1050 return exposureMetadata[expectedKey] 

1051 

1052 # If not, try getting it from the task metadata. 

1053 expectedKey = f"RESIDUAL STDEV {ampName}" 

1054 if "isr" in taskMetadata: 

1055 if expectedKey in taskMetadata["isr"]: 

1056 return taskMetadata["isr"][expectedKey] 

1057 

1058 self.log.warning("Median readout noise from ISR metadata for amp %s " 

1059 "could not be calculated." % ampName) 

1060 return np.nan 

1061 

1062 def computeGaussianHistogramParameters(self, im1Area, im2Area, imStatsCtrl, mu1, mu2): 

1063 """Compute KS test for a Gaussian model fit to a histogram of the 

1064 difference image. 

1065 

1066 Parameters 

1067 ---------- 

1068 im1Area : `lsst.afw.image.MaskedImageF` 

1069 Masked image from exposure 1. 

1070 im2Area : `lsst.afw.image.MaskedImageF` 

1071 Masked image from exposure 2. 

1072 imStatsCtrl : `lsst.afw.math.StatisticsControl` 

1073 Statistics control object. 

1074 mu1 : `float` 

1075 Clipped mean of im1Area (ADU). 

1076 mu2 : `float` 

1077 Clipped mean of im2Area (ADU). 

1078 

1079 Returns 

1080 ------- 

1081 varFit : `float` 

1082 Variance from the Gaussian fit. 

1083 chi2Dof : `float` 

1084 Chi-squared per degree of freedom of Gaussian fit. 

1085 kspValue : `float` 

1086 The KS test p-value for the Gaussian fit. 

1087 

1088 Notes 

1089 ----- 

1090 The algorithm here was originally developed by Aaron Roodman. 

1091 Tests on the full focal plane of LSSTCam during testing has shown 

1092 that a KS test p-value cut of 0.01 is a good discriminant for 

1093 well-behaved flat pairs (p>0.01) and poorly behaved non-Gaussian 

1094 flat pairs (p<0.01). 

1095 """ 

1096 diffExp = im1Area.clone() 

1097 diffExp -= im2Area 

1098 

1099 sel = (((diffExp.mask.array & imStatsCtrl.getAndMask()) == 0) 

1100 & np.isfinite(diffExp.mask.array)) 

1101 diffArr = diffExp.image.array[sel] 

1102 

1103 numOk = len(diffArr) 

1104 

1105 if numOk >= self.config.ksHistMinDataValues and np.isfinite(mu1) and np.isfinite(mu2): 

1106 # Create a histogram symmetric around zero, with a bin size 

1107 # determined from the expected variance given by the average of 

1108 # the input signal levels. 

1109 lim = self.config.ksHistLimitMultiplier * np.sqrt((mu1 + mu2)/2.) 

1110 yVals, binEdges = np.histogram(diffArr, bins=self.config.ksHistNBins, range=[-lim, lim]) 

1111 

1112 # Fit the histogram with a Gaussian model. 

1113 model = GaussianModel() 

1114 yVals = yVals.astype(np.float64) 

1115 xVals = ((binEdges[0: -1] + binEdges[1:])/2.).astype(np.float64) 

1116 errVals = np.sqrt(yVals) 

1117 errVals[(errVals == 0.0)] = 1.0 

1118 pars = model.guess(yVals, x=xVals) 

1119 with warnings.catch_warnings(): 

1120 warnings.simplefilter("ignore") 

1121 # The least-squares fitter sometimes spouts (spurious) warnings 

1122 # when the model is very bad. Swallow these warnings now and 

1123 # let the KS test check the model below. 

1124 out = model.fit( 

1125 yVals, 

1126 pars, 

1127 x=xVals, 

1128 weights=1./errVals, 

1129 calc_covar=True, 

1130 method="least_squares", 

1131 ) 

1132 

1133 # Calculate chi2. 

1134 chiArr = out.residual 

1135 nDof = len(yVals) - 3 

1136 chi2Dof = np.sum(chiArr**2.)/nDof 

1137 sigmaFit = out.params["sigma"].value 

1138 

1139 # Calculate KS test p-value for the fit. 

1140 ksResult = scipy.stats.ks_1samp( 

1141 diffArr, 

1142 scipy.stats.norm.cdf, 

1143 (out.params["center"].value, sigmaFit), 

1144 ) 

1145 

1146 kspValue = ksResult.pvalue 

1147 if kspValue < 1e-15: 

1148 kspValue = 0.0 

1149 

1150 varFit = sigmaFit**2. 

1151 

1152 else: 

1153 varFit = np.nan 

1154 chi2Dof = np.nan 

1155 kspValue = 0.0 

1156 

1157 return varFit, chi2Dof, kspValue