Coverage for python/lsst/cp/pipe/ptc/cpSolvePtcTask.py: 13%

Shortcuts on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

297 statements  

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22import numpy as np 

23from collections import Counter 

24 

25import lsst.pex.config as pexConfig 

26import lsst.pipe.base as pipeBase 

27from lsst.cp.pipe.utils import (fitLeastSq, fitBootstrap, funcPolynomial, funcAstier) 

28 

29from scipy.optimize import least_squares 

30 

31import lsst.pipe.base.connectionTypes as cT 

32 

33from .astierCovPtcUtils import fitDataFullCovariance 

34 

35from lsst.ip.isr import PhotonTransferCurveDataset 

36 

37from lsst.cp.pipe._lookupStaticCalibration import lookupStaticCalibration 

38 

39import copy 

40 

41 

42__all__ = ['PhotonTransferCurveSolveConfig', 'PhotonTransferCurveSolveTask'] 

43 

44 

45class PhotonTransferCurveSolveConnections(pipeBase.PipelineTaskConnections, 

46 dimensions=("instrument", "detector")): 

47 inputCovariances = cT.Input( 

48 name="ptcCovariances", 

49 doc="Tuple with measured covariances from flats.", 

50 storageClass="PhotonTransferCurveDataset", 

51 dimensions=("instrument", "exposure", "detector"), 

52 multiple=True, 

53 ) 

54 camera = cT.PrerequisiteInput( 

55 name="camera", 

56 doc="Camera the input data comes from.", 

57 storageClass="Camera", 

58 dimensions=("instrument",), 

59 isCalibration=True, 

60 lookupFunction=lookupStaticCalibration, 

61 ) 

62 outputPtcDataset = cT.Output( 

63 name="ptcDatsetProposal", 

64 doc="Output proposed ptc dataset.", 

65 storageClass="PhotonTransferCurveDataset", 

66 dimensions=("instrument", "detector"), 

67 multiple=False, 

68 isCalibration=True, 

69 ) 

70 

71 

72class PhotonTransferCurveSolveConfig(pipeBase.PipelineTaskConfig, 

73 pipelineConnections=PhotonTransferCurveSolveConnections): 

74 """Configuration for fitting measured covariances. 

75 """ 

76 

77 ptcFitType = pexConfig.ChoiceField( 

78 dtype=str, 

79 doc="Fit PTC to Eq. 16, Eq. 20 in Astier+19, or to a polynomial.", 

80 default="POLYNOMIAL", 

81 allowed={ 

82 "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').", 

83 "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).", 

84 "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)" 

85 } 

86 ) 

87 maximumRangeCovariancesAstier = pexConfig.Field( 

88 dtype=int, 

89 doc="Maximum range of covariances as in Astier+19", 

90 default=8, 

91 ) 

92 sigmaClipFullFitCovariancesAstier = pexConfig.Field( 

93 dtype=float, 

94 doc="sigma clip for full model fit for FULLCOVARIANCE ptcFitType ", 

95 default=5.0, 

96 ) 

97 maxIterFullFitCovariancesAstier = pexConfig.Field( 

98 dtype=int, 

99 doc="Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType", 

100 default=3, 

101 ) 

102 polynomialFitDegree = pexConfig.Field( 

103 dtype=int, 

104 doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.", 

105 default=3, 

106 ) 

107 sigmaCutPtcOutliers = pexConfig.Field( 

108 dtype=float, 

109 doc="Sigma cut for outlier rejection in PTC.", 

110 default=5.0, 

111 ) 

112 maxIterationsPtcOutliers = pexConfig.Field( 

113 dtype=int, 

114 doc="Maximum number of iterations for outlier rejection in PTC.", 

115 default=2, 

116 ) 

117 initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField( 

118 dtype=float, 

119 doc="Initially exclude data points with a variance that are more than a factor of this from being" 

120 " linear in the positive direction, from the PTC fit. Note that these points will also be" 

121 " excluded from the non-linearity fit. This is done before the iterative outlier rejection," 

122 " to allow an accurate determination of the sigmas for said iterative fit.", 

123 default=0.05, 

124 min=0.0, 

125 max=1.0, 

126 ) 

127 initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField( 

128 dtype=float, 

129 doc="Initially exclude data points with a variance that are more than a factor of this from being" 

130 " linear in the negative direction, from the PTC fit. Note that these points will also be" 

131 " excluded from the non-linearity fit. This is done before the iterative outlier rejection," 

132 " to allow an accurate determination of the sigmas for said iterative fit.", 

133 default=0.25, 

134 min=0.0, 

135 max=1.0, 

136 ) 

137 minMeanRatioTest = pexConfig.Field( 

138 dtype=float, 

139 doc="In the initial test to screen out bad points with a ratio test, points with low" 

140 " flux can get inadvertantly screened. This test only screens out points with flux" 

141 " above this value.", 

142 default=20000, 

143 ) 

144 minVarPivotSearch = pexConfig.Field( 

145 dtype=float, 

146 doc="The code looks for a pivot signal point after which the variance starts decreasing at high-flux" 

147 " to exclude then form the PTC model fit. However, sometimes at low fluxes, the variance" 

148 " decreases slightly. Set this variable for the variance value, in ADU^2, after which the pivot " 

149 " should be sought.", 

150 default=10000, 

151 ) 

152 doFitBootstrap = pexConfig.Field( 

153 dtype=bool, 

154 doc="Use bootstrap for the PTC fit parameters and errors?.", 

155 default=False, 

156 ) 

157 

158 

159class PhotonTransferCurveSolveTask(pipeBase.PipelineTask, 

160 pipeBase.CmdLineTask): 

161 """Task to fit the PTC from flat covariances. 

162 

163 This task assembles the list of individual PTC datasets produced 

164 by ``PhotonTransferCurveSolveTask`` into one single final PTC 

165 dataset. The task fits the measured (co)variances to a polynomial 

166 model or to the models described in equations 16 and 20 of 

167 Astier+19 (referred to as ``POLYNOMIAL``, ``EXPAPPROXIMATION``, 

168 and ``FULLCOVARIANCE`` in the configuration options of the task, 

169 respectively). Parameters of interest such as tghe gain and noise 

170 are derived from the fits. 

171 

172 Astier+19: "The Shape of the Photon Transfer Curve 

173 of CCD sensors", arXiv:1905.08677 

174 """ 

175 

176 ConfigClass = PhotonTransferCurveSolveConfig 

177 _DefaultName = 'cpPhotonTransferCurveSolve' 

178 

179 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

180 """Ensure that the input and output dimensions are passed along. 

181 

182 Parameters 

183 ---------- 

184 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext` 

185 Butler to operate on. 

186 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection` 

187 Input data refs to load. 

188 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection` 

189 Output data refs to persist. 

190 """ 

191 inputs = butlerQC.get(inputRefs) 

192 outputs = self.run(inputCovariances=inputs['inputCovariances'], camera=inputs['camera']) 

193 butlerQC.put(outputs, outputRefs) 

194 

195 def run(self, inputCovariances, camera=None, inputExpList=None): 

196 """Fit measure covariances to different models. 

197 

198 Parameters 

199 ---------- 

200 inputCovariances : `list` [`lsst.ip.isr.PhotonTransferCurveDataset`] 

201 List of lsst.ip.isr.PhotonTransferCurveDataset datasets. 

202 

203 camera : `lsst.afw.cameraGeom.Camera`, optional 

204 Input camera. 

205 

206 inputExpList : `list` [`~lsst.afw.image.ExposureF`], optional 

207 List of exposures. 

208 

209 Returns 

210 ------- 

211 results : `lsst.pipe.base.Struct` 

212 The results struct containing: 

213 

214 ``outputPtcDatset`` 

215 Final PTC dataset, containing information such as the 

216 means, variances, and exposure times 

217 (`lsst.ip.isr.PhotonTransferCurveDataset`). 

218 """ 

219 # Assemble partial PTC datasets into a single dataset. 

220 ampNames = np.unique(inputCovariances[0].ampNames) 

221 datasetPtc = PhotonTransferCurveDataset(ampNames, self.config.ptcFitType, 

222 self.config.maximumRangeCovariancesAstier) 

223 for partialPtcDataset in inputCovariances: 

224 if partialPtcDataset.ptcFitType == 'DUMMY': 

225 continue 

226 for ampName in ampNames: 

227 datasetPtc.inputExpIdPairs[ampName].append(partialPtcDataset.inputExpIdPairs[ampName]) 

228 if type(partialPtcDataset.rawExpTimes[ampName]) is list: 

229 datasetPtc.rawExpTimes[ampName].append(partialPtcDataset.rawExpTimes[ampName][0]) 

230 else: 

231 datasetPtc.rawExpTimes[ampName].append(partialPtcDataset.rawExpTimes[ampName]) 

232 if type(partialPtcDataset.rawMeans[ampName]) is list: 

233 datasetPtc.rawMeans[ampName].append(partialPtcDataset.rawMeans[ampName][0]) 

234 else: 

235 datasetPtc.rawMeans[ampName].append(partialPtcDataset.rawMeans[ampName]) 

236 if type(partialPtcDataset.rawVars[ampName]) is list: 

237 datasetPtc.rawVars[ampName].append(partialPtcDataset.rawVars[ampName][0]) 

238 else: 

239 datasetPtc.rawVars[ampName].append(partialPtcDataset.rawVars[ampName]) 

240 if type(partialPtcDataset.expIdMask[ampName]) is list: 

241 datasetPtc.expIdMask[ampName].append(partialPtcDataset.expIdMask[ampName][0]) 

242 else: 

243 datasetPtc.expIdMask[ampName].append(partialPtcDataset.expIdMask[ampName]) 

244 datasetPtc.covariances[ampName].append(np.array(partialPtcDataset.covariances[ampName][0])) 

245 datasetPtc.covariancesSqrtWeights[ampName].append( 

246 np.array(partialPtcDataset.covariancesSqrtWeights[ampName][0])) 

247 # Sort arrays that are filled so far in the final dataset by 

248 # rawMeans index 

249 for ampName in ampNames: 

250 index = np.argsort(np.ravel(np.array(datasetPtc.rawMeans[ampName]))) 

251 datasetPtc.inputExpIdPairs[ampName] = np.array(datasetPtc.inputExpIdPairs[ampName])[index] 

252 datasetPtc.rawExpTimes[ampName] = np.array(datasetPtc.rawExpTimes[ampName])[index] 

253 datasetPtc.rawMeans[ampName] = np.array(datasetPtc.rawMeans[ampName])[index] 

254 datasetPtc.rawVars[ampName] = np.array(datasetPtc.rawVars[ampName])[index] 

255 datasetPtc.expIdMask[ampName] = np.array(datasetPtc.expIdMask[ampName])[index] 

256 datasetPtc.covariances[ampName] = np.array(datasetPtc.covariances[ampName])[index] 

257 datasetPtc.covariancesSqrtWeights[ampName] = np.array( 

258 datasetPtc.covariancesSqrtWeights[ampName])[index] 

259 if self.config.ptcFitType == "FULLCOVARIANCE": 

260 # Calculate covariances and fit them, including the PTC, 

261 # to Astier+19 full model (Eq. 20) First, fit get the flat 

262 # pairs that are masked, fitting C_00 vs mu to the 

263 # EXPAPPROXIMATION model (Eq. 16 in Astier+19). The 

264 # points at these fluxes will also be masked when 

265 # calculating the other covariances, C_ij) 

266 tempDatasetPtc = copy.copy(datasetPtc) 

267 tempDatasetPtc.ptcFitType = "EXPAPPROXIMATION" 

268 tempDatasetPtc = self.fitPtc(tempDatasetPtc) 

269 for ampName in datasetPtc.ampNames: 

270 datasetPtc.expIdMask[ampName] = tempDatasetPtc.expIdMask[ampName] 

271 datasetPtc.fitType = "FULLCOVARIANCE" 

272 datasetPtc = self.fitCovariancesAstier(datasetPtc) 

273 # The other options are: self.config.ptcFitType in 

274 # ("EXPAPPROXIMATION", "POLYNOMIAL") 

275 else: 

276 # Fit the PTC to a polynomial or to Astier+19 exponential 

277 # approximation (Eq. 16). Fill up 

278 # PhotonTransferCurveDataset object. 

279 datasetPtc = self.fitPtc(datasetPtc) 

280 if inputExpList is not None: 

281 # It should be a list of exposures, to get the detector. 

282 detector = inputExpList[0].getDetector() 

283 else: 

284 detector = None 

285 datasetPtc.updateMetadata(setDate=True, camera=camera, detector=detector) 

286 

287 return pipeBase.Struct( 

288 outputPtcDataset=datasetPtc, 

289 ) 

290 

291 def fitCovariancesAstier(self, dataset): 

292 """Fit measured flat covariances to full model in Astier+19. 

293 

294 Parameters 

295 ---------- 

296 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

297 The dataset containing information such as the means, 

298 (co)variances, and exposure times. 

299 

300 Returns 

301 ------- 

302 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

303 This is the same dataset as the input paramter, however, 

304 it has been modified to include information such as the 

305 fit vectors and the fit parameters. See the class 

306 `PhotonTransferCurveDatase`. 

307 """ 

308 covFits, covFitsNoB = fitDataFullCovariance(dataset) 

309 dataset = self.getOutputPtcDataCovAstier(dataset, covFits, covFitsNoB) 

310 

311 return dataset 

312 

313 def getOutputPtcDataCovAstier(self, dataset, covFits, covFitsNoB): 

314 """Get output data for PhotonTransferCurveCovAstierDataset from CovFit 

315 objects. 

316 

317 Parameters 

318 ---------- 

319 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

320 The dataset containing information such as the means, 

321 variances and exposure times. 

322 covFits : `dict` 

323 Dictionary of CovFit objects, with amp names as keys. 

324 covFitsNoB : `dict` 

325 Dictionary of CovFit objects, with amp names as keys, and 

326 'b=0' in Eq. 20 of Astier+19. 

327 

328 Returns 

329 ------- 

330 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

331 This is the same dataset as the input paramter, however, 

332 it has been modified to include extra information such as 

333 the mask 1D array, gains, reoudout noise, measured signal, 

334 measured variance, modeled variance, a, and b coefficient 

335 matrices (see Astier+19) per amplifier. See the class 

336 `PhotonTransferCurveDatase`. 

337 """ 

338 assert(len(covFits) == len(covFitsNoB)) 

339 

340 for i, amp in enumerate(dataset.ampNames): 

341 lenInputTimes = len(dataset.rawExpTimes[amp]) 

342 # Not used when ptcFitType is 'FULLCOVARIANCE' 

343 dataset.ptcFitPars[amp] = [np.nan] 

344 dataset.ptcFitParsError[amp] = [np.nan] 

345 dataset.ptcFitChiSq[amp] = np.nan 

346 if amp in covFits: 

347 fit = covFits[amp] 

348 fitNoB = covFitsNoB[amp] 

349 # Save full covariances, covariances models, and their weights 

350 # dataset.expIdMask is already full 

351 dataset.covariances[amp] = fit.cov 

352 dataset.covariancesModel[amp] = fit.evalCovModel() 

353 dataset.covariancesSqrtWeights[amp] = fit.sqrtW 

354 dataset.aMatrix[amp] = fit.getA() 

355 dataset.bMatrix[amp] = fit.getB() 

356 dataset.covariancesModelNoB[amp] = fitNoB.evalCovModel() 

357 dataset.aMatrixNoB[amp] = fitNoB.getA() 

358 

359 (meanVecFinal, varVecFinal, varVecModel, 

360 wc, varMask) = fit.getFitData(0, 0, divideByMu=False) 

361 gain = fit.getGain() 

362 

363 dataset.gain[amp] = gain 

364 dataset.gainErr[amp] = fit.getGainErr() 

365 dataset.noise[amp] = np.sqrt(fit.getRon()) 

366 dataset.noiseErr[amp] = fit.getRonErr() 

367 dataset.finalVars[amp] = varVecFinal 

368 dataset.finalModelVars[amp] = varVecModel 

369 dataset.finalMeans[amp] = meanVecFinal 

370 

371 else: 

372 # Bad amp 

373 # Entries need to have proper dimensions so read/write 

374 # with astropy.Table works. 

375 matrixSide = self.config.maximumRangeCovariancesAstier 

376 nanMatrix = np.full((matrixSide, matrixSide), np.nan) 

377 listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan) 

378 

379 dataset.covariances[amp] = listNanMatrix 

380 dataset.covariancesModel[amp] = listNanMatrix 

381 dataset.covariancesSqrtWeights[amp] = listNanMatrix 

382 dataset.aMatrix[amp] = nanMatrix 

383 dataset.bMatrix[amp] = nanMatrix 

384 dataset.covariancesModelNoB[amp] = listNanMatrix 

385 dataset.aMatrixNoB[amp] = nanMatrix 

386 

387 dataset.expIdMask[amp] = np.repeat(np.nan, lenInputTimes) 

388 dataset.gain[amp] = np.nan 

389 dataset.gainErr[amp] = np.nan 

390 dataset.noise[amp] = np.nan 

391 dataset.noiseErr[amp] = np.nan 

392 dataset.finalVars[amp] = np.repeat(np.nan, lenInputTimes) 

393 dataset.finalModelVars[amp] = np.repeat(np.nan, lenInputTimes) 

394 dataset.finalMeans[amp] = np.repeat(np.nan, lenInputTimes) 

395 

396 return dataset 

397 

398 @staticmethod 

399 def _initialParsForPolynomial(order): 

400 assert(order >= 2) 

401 pars = np.zeros(order, dtype=float) 

402 pars[0] = 10 

403 pars[1] = 1 

404 pars[2:] = 0.0001 

405 return pars 

406 

407 @staticmethod 

408 def _boundsForPolynomial(initialPars, lowers=[], uppers=[]): 

409 if not len(lowers): 

410 lowers = [np.NINF for p in initialPars] 

411 if not len(uppers): 

412 uppers = [np.inf for p in initialPars] 

413 lowers[1] = 0 # no negative gains 

414 return (lowers, uppers) 

415 

416 @staticmethod 

417 def _boundsForAstier(initialPars, lowers=[], uppers=[]): 

418 if not len(lowers): 

419 lowers = [np.NINF for p in initialPars] 

420 if not len(uppers): 

421 uppers = [np.inf for p in initialPars] 

422 return (lowers, uppers) 

423 

424 @staticmethod 

425 def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative, 

426 minMeanRatioTest, minVarPivotSearch): 

427 """Return a boolean array to mask bad points. 

428 

429 Parameters 

430 ---------- 

431 means : `numpy.array` 

432 Input array with mean signal values. 

433 variances : `numpy.array` 

434 Input array with variances at each mean value. 

435 maxDeviationPositive : `float` 

436 Maximum deviation from being constant for the variance/mean 

437 ratio, in the positive direction. 

438 maxDeviationNegative : `float` 

439 Maximum deviation from being constant for the variance/mean 

440 ratio, in the negative direction. 

441 minMeanRatioTest : `float` 

442 Minimum signal value (in ADU) after which to start examining 

443 the ratios var/mean. 

444 minVarPivotSearch : `float` 

445 Minimum variance point (in ADU^2) after which the pivot point 

446 wher the variance starts decreasing should be sought. 

447 

448 Returns 

449 ------ 

450 goodPoints : `numpy.array` [`bool`] 

451 Boolean array to select good (`True`) and bad (`False`) 

452 points. 

453 

454 Notes 

455 ----- 

456 A linear function has a constant ratio, so find the median 

457 value of the ratios, and exclude the points that deviate from 

458 that by more than a factor of maxDeviationPositive/negative. 

459 Asymmetric deviations are supported as we expect the PTC to 

460 turn down as the flux increases, but sometimes it anomalously 

461 turns upwards just before turning over, which ruins the fits, 

462 so it is wise to be stricter about restricting positive 

463 outliers than negative ones. 

464 

465 Too high and points that are so bad that fit will fail will be 

466 included Too low and the non-linear points will be excluded, 

467 biasing the NL fit. This function also masks points after the 

468 variance starts decreasing. 

469 """ 

470 assert(len(means) == len(variances)) 

471 ratios = [b/a for (a, b) in zip(means, variances)] 

472 medianRatio = np.nanmedian(ratios) 

473 ratioDeviations = [0.0 if a < minMeanRatioTest else (r/medianRatio)-1 

474 for (a, r) in zip(means, ratios)] 

475 

476 # so that it doesn't matter if the deviation is expressed as 

477 # positive or negative 

478 maxDeviationPositive = abs(maxDeviationPositive) 

479 maxDeviationNegative = -1. * abs(maxDeviationNegative) 

480 

481 goodPoints = np.array([True if (r < maxDeviationPositive and r > maxDeviationNegative) 

482 else False for r in ratioDeviations]) 

483 

484 # Eliminate points beyond which the variance decreases 

485 pivot = np.where(np.array(np.diff(variances)) < 0)[0] 

486 if len(pivot) > 0: 

487 # For small values, sometimes the variance decreases slightly 

488 # Only look when var > self.config.minVarPivotSearch 

489 pivot = [p for p in pivot if variances[p] > minVarPivotSearch] 

490 if len(pivot) > 0: 

491 pivot = np.min(pivot) 

492 goodPoints[pivot+1:len(goodPoints)] = False 

493 

494 return goodPoints 

495 

496 def _makeZeroSafe(self, array, substituteValue=1e-9): 

497 """""" 

498 array = np.array(array) 

499 nBad = Counter(np.ravel(array))[0] 

500 if nBad == 0: 

501 return array 

502 

503 index, = np.where(array == 0) 

504 if len(index): 

505 msg = f"Found {nBad} zeros in array at elements {index}" 

506 self.log.warning(msg) 

507 

508 array[index] = substituteValue 

509 

510 return array 

511 

512 def fitPtc(self, dataset): 

513 """Fit the photon transfer curve to a polynomial or to Astier+19 

514 approximation. 

515 

516 Fit the photon transfer curve with either a polynomial of the order 

517 specified in the task config, or using the exponential approximation 

518 in Astier+19 (Eq. 16). 

519 

520 Sigma clipping is performed iteratively for the fit, as well as an 

521 initial clipping of data points that are more than 

522 config.initialNonLinearityExclusionThreshold away from lying on a 

523 straight line. This other step is necessary because the photon transfer 

524 curve turns over catastrophically at very high flux (because saturation 

525 drops the variance to ~0) and these far outliers cause the initial fit 

526 to fail, meaning the sigma cannot be calculated to perform the 

527 sigma-clipping. 

528 

529 Parameters 

530 ---------- 

531 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

532 The dataset containing the means, variances and exposure times. 

533 

534 Returns 

535 ------- 

536 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

537 This is the same dataset as the input parameter, however, 

538 it has been modified to include information such as the 

539 fit vectors and the fit parameters. See the class 

540 `PhotonTransferCurveDatase`. 

541 

542 Raises 

543 ------ 

544 RuntimeError: 

545 Raises if dataset.ptcFitType is None or empty. 

546 """ 

547 if dataset.ptcFitType: 

548 ptcFitType = dataset.ptcFitType 

549 else: 

550 raise RuntimeError("ptcFitType is None of empty in PTC dataset.") 

551 matrixSide = self.config.maximumRangeCovariancesAstier 

552 nanMatrix = np.empty((matrixSide, matrixSide)) 

553 nanMatrix[:] = np.nan 

554 

555 for amp in dataset.ampNames: 

556 lenInputTimes = len(dataset.rawExpTimes[amp]) 

557 listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide)) 

558 listNanMatrix[:] = np.nan 

559 

560 dataset.covariancesModel[amp] = listNanMatrix 

561 dataset.aMatrix[amp] = nanMatrix 

562 dataset.bMatrix[amp] = nanMatrix 

563 dataset.covariancesModelNoB[amp] = listNanMatrix 

564 dataset.aMatrixNoB[amp] = nanMatrix 

565 

566 def errFunc(p, x, y): 

567 return ptcFunc(p, x) - y 

568 

569 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers 

570 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers 

571 

572 for i, ampName in enumerate(dataset.ampNames): 

573 timeVecOriginal = np.ravel(np.array(dataset.rawExpTimes[ampName])) 

574 meanVecOriginal = np.ravel(np.array(dataset.rawMeans[ampName])) 

575 varVecOriginal = np.ravel(np.array(dataset.rawVars[ampName])) 

576 varVecOriginal = self._makeZeroSafe(varVecOriginal) 

577 

578 goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal, 

579 self.config.initialNonLinearityExclusionThresholdPositive, 

580 self.config.initialNonLinearityExclusionThresholdNegative, 

581 self.config.minMeanRatioTest, 

582 self.config.minVarPivotSearch) 

583 if not (goodPoints.any()): 

584 msg = (f"SERIOUS: All points in goodPoints: {goodPoints} are bad." 

585 f"Setting {ampName} to BAD.") 

586 self.log.warning(msg) 

587 # Fill entries with NaNs 

588 self.fillBadAmp(dataset, ptcFitType, ampName) 

589 continue 

590 

591 mask = goodPoints 

592 

593 if ptcFitType == 'EXPAPPROXIMATION': 

594 ptcFunc = funcAstier 

595 parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noisei^2 

596 # lowers and uppers obtained from BOT data studies by 

597 # C. Lage (UC Davis, 11/2020). 

598 bounds = self._boundsForAstier(parsIniPtc, lowers=[-1e-4, 0.5, -2000], 

599 uppers=[1e-4, 2.5, 2000]) 

600 if ptcFitType == 'POLYNOMIAL': 

601 ptcFunc = funcPolynomial 

602 parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1) 

603 bounds = self._boundsForPolynomial(parsIniPtc) 

604 

605 # Before bootstrap fit, do an iterative fit to get rid of outliers 

606 count = 1 

607 while count <= maxIterationsPtcOutliers: 

608 # Note that application of the mask actually shrinks the array 

609 # to size rather than setting elements to zero (as we want) so 

610 # always update mask itself and re-apply to the original data 

611 meanTempVec = meanVecOriginal[mask] 

612 varTempVec = varVecOriginal[mask] 

613 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec)) 

614 pars = res.x 

615 

616 # change this to the original from the temp because 

617 # the masks are ANDed meaning once a point is masked 

618 # it's always masked, and the masks must always be the 

619 # same length for broadcasting 

620 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal) 

621 newMask = np.array([True if np.abs(r) < sigmaCutPtcOutliers else False for r in sigResids]) 

622 mask = mask & newMask 

623 if not (mask.any() and newMask.any()): 

624 msg = (f"SERIOUS: All points in either mask: {mask} or newMask: {newMask} are bad. " 

625 f"Setting {ampName} to BAD.") 

626 self.log.warning(msg) 

627 # Fill entries with NaNs 

628 self.fillBadAmp(dataset, ptcFitType, ampName) 

629 break 

630 nDroppedTotal = Counter(mask)[False] 

631 self.log.debug("Iteration %d: discarded %d points in total for %s", 

632 count, nDroppedTotal, ampName) 

633 count += 1 

634 # objects should never shrink 

635 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal)) 

636 if not (mask.any() and newMask.any()): 

637 continue 

638 dataset.expIdMask[ampName] = np.array(dataset.expIdMask[ampName]) 

639 # store the final mask 

640 if len(dataset.expIdMask[ampName]): 

641 dataset.expIdMask[ampName] &= mask # bitwise_and if there is already a mask 

642 else: 

643 dataset.expIdMask[ampName] = mask 

644 parsIniPtc = pars 

645 meanVecFinal = meanVecOriginal[mask] 

646 varVecFinal = varVecOriginal[mask] 

647 

648 if Counter(mask)[False] > 0: 

649 self.log.info("Number of points discarded in PTC of amplifier %s:" 

650 " %d out of %d", ampName, Counter(mask)[False], len(meanVecOriginal)) 

651 

652 if (len(meanVecFinal) < len(parsIniPtc)): 

653 msg = (f"SERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of " 

654 f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.") 

655 self.log.warning(msg) 

656 # Fill entries with NaNs 

657 self.fillBadAmp(dataset, ptcFitType, ampName) 

658 continue 

659 # Fit the PTC 

660 if self.config.doFitBootstrap: 

661 parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal, 

662 varVecFinal, ptcFunc, 

663 weightsY=1./np.sqrt(varVecFinal)) 

664 else: 

665 parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal, 

666 varVecFinal, ptcFunc, 

667 weightsY=1./np.sqrt(varVecFinal)) 

668 dataset.ptcFitPars[ampName] = parsFit 

669 dataset.ptcFitParsError[ampName] = parsFitErr 

670 dataset.ptcFitChiSq[ampName] = reducedChiSqPtc 

671 # Masked variances (measured and modeled) and means. Need 

672 # to pad the array so astropy.Table does not crash (the 

673 # mask may vary per amp). 

674 padLength = len(dataset.rawExpTimes[ampName]) - len(varVecFinal) 

675 dataset.finalVars[ampName] = np.pad(varVecFinal, (0, padLength), 'constant', 

676 constant_values=np.nan) 

677 dataset.finalModelVars[ampName] = np.pad(ptcFunc(parsFit, meanVecFinal), (0, padLength), 

678 'constant', constant_values=np.nan) 

679 dataset.finalMeans[ampName] = np.pad(meanVecFinal, (0, padLength), 'constant', 

680 constant_values=np.nan) 

681 if ptcFitType == 'EXPAPPROXIMATION': 

682 ptcGain = parsFit[1] 

683 ptcGainErr = parsFitErr[1] 

684 ptcNoise = np.sqrt(np.fabs(parsFit[2])) 

685 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2])) 

686 if ptcFitType == 'POLYNOMIAL': 

687 ptcGain = 1./parsFit[1] 

688 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1]) 

689 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain 

690 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain 

691 dataset.gain[ampName] = ptcGain 

692 dataset.gainErr[ampName] = ptcGainErr 

693 dataset.noise[ampName] = ptcNoise 

694 dataset.noiseErr[ampName] = ptcNoiseErr 

695 

696 if not len(dataset.ptcFitType) == 0: 

697 dataset.ptcFitType = ptcFitType 

698 if len(dataset.badAmps) == 0: 

699 dataset.badAmps = np.repeat(np.nan, len(list(dataset.rawExpTimes.values())[0])) 

700 

701 return dataset 

702 

703 def fillBadAmp(self, dataset, ptcFitType, ampName): 

704 """Fill the dataset with NaNs if there are not enough good points. 

705 

706 Parameters 

707 ---------- 

708 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

709 The dataset containing the means, variances and exposure times. 

710 ptcFitType : {'POLYNOMIAL', 'EXPAPPROXIMATION'} 

711 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or 

712 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC. 

713 ampName : `str` 

714 Amplifier name. 

715 """ 

716 dataset.badAmps.append(ampName) 

717 dataset.expIdMask[ampName] = np.repeat(False, len(dataset.rawExpTimes[ampName])) 

718 dataset.gain[ampName] = np.nan 

719 dataset.gainErr[ampName] = np.nan 

720 dataset.noise[ampName] = np.nan 

721 dataset.noiseErr[ampName] = np.nan 

722 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if 

723 ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3)) 

724 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if 

725 ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3)) 

726 dataset.ptcFitChiSq[ampName] = np.nan 

727 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

728 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

729 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

730 

731 return