Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22import numpy as np 

23from collections import Counter 

24 

25import lsst.pex.config as pexConfig 

26import lsst.pipe.base as pipeBase 

27from lsst.cp.pipe.utils import (fitLeastSq, fitBootstrap, funcPolynomial, funcAstier) 

28 

29from scipy.optimize import least_squares 

30 

31import lsst.pipe.base.connectionTypes as cT 

32 

33from .astierCovPtcUtils import fitDataFullCovariance 

34 

35from lsst.ip.isr import PhotonTransferCurveDataset 

36 

37from lsst.cp.pipe._lookupStaticCalibration import lookupStaticCalibration 

38 

39import copy 

40 

41 

42__all__ = ['PhotonTransferCurveSolveConfig', 'PhotonTransferCurveSolveTask'] 

43 

44 

45class PhotonTransferCurveSolveConnections(pipeBase.PipelineTaskConnections, 

46 dimensions=("instrument", "detector")): 

47 inputCovariances = cT.Input( 

48 name="ptcCovariances", 

49 doc="Tuple with measured covariances from flats.", 

50 storageClass="PhotonTransferCurveDataset", 

51 dimensions=("instrument", "exposure", "detector"), 

52 multiple=True, 

53 ) 

54 camera = cT.PrerequisiteInput( 

55 name="camera", 

56 doc="Camera the input data comes from.", 

57 storageClass="Camera", 

58 dimensions=("instrument",), 

59 isCalibration=True, 

60 lookupFunction=lookupStaticCalibration, 

61 ) 

62 outputPtcDataset = cT.Output( 

63 name="ptcDatsetProposal", 

64 doc="Output proposed ptc dataset.", 

65 storageClass="PhotonTransferCurveDataset", 

66 dimensions=("instrument", "detector"), 

67 multiple=False, 

68 isCalibration=True, 

69 ) 

70 

71 

72class PhotonTransferCurveSolveConfig(pipeBase.PipelineTaskConfig, 

73 pipelineConnections=PhotonTransferCurveSolveConnections): 

74 """Configuration for fitting measured covariances. 

75 """ 

76 ptcFitType = pexConfig.ChoiceField( 

77 dtype=str, 

78 doc="Fit PTC to Eq. 16, Eq. 20 in Astier+19, or to a polynomial.", 

79 default="POLYNOMIAL", 

80 allowed={ 

81 "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').", 

82 "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).", 

83 "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)" 

84 } 

85 ) 

86 maximumRangeCovariancesAstier = pexConfig.Field( 

87 dtype=int, 

88 doc="Maximum range of covariances as in Astier+19", 

89 default=8, 

90 ) 

91 sigmaClipFullFitCovariancesAstier = pexConfig.Field( 

92 dtype=float, 

93 doc="sigma clip for full model fit for FULLCOVARIANCE ptcFitType ", 

94 default=5.0, 

95 ) 

96 maxIterFullFitCovariancesAstier = pexConfig.Field( 

97 dtype=int, 

98 doc="Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType", 

99 default=3, 

100 ) 

101 polynomialFitDegree = pexConfig.Field( 

102 dtype=int, 

103 doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.", 

104 default=3, 

105 ) 

106 sigmaCutPtcOutliers = pexConfig.Field( 

107 dtype=float, 

108 doc="Sigma cut for outlier rejection in PTC.", 

109 default=5.0, 

110 ) 

111 maxIterationsPtcOutliers = pexConfig.Field( 

112 dtype=int, 

113 doc="Maximum number of iterations for outlier rejection in PTC.", 

114 default=2, 

115 ) 

116 initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField( 

117 dtype=float, 

118 doc="Initially exclude data points with a variance that are more than a factor of this from being" 

119 " linear in the positive direction, from the PTC fit. Note that these points will also be" 

120 " excluded from the non-linearity fit. This is done before the iterative outlier rejection," 

121 " to allow an accurate determination of the sigmas for said iterative fit.", 

122 default=0.05, 

123 min=0.0, 

124 max=1.0, 

125 ) 

126 initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField( 

127 dtype=float, 

128 doc="Initially exclude data points with a variance that are more than a factor of this from being" 

129 " linear in the negative direction, from the PTC fit. Note that these points will also be" 

130 " excluded from the non-linearity fit. This is done before the iterative outlier rejection," 

131 " to allow an accurate determination of the sigmas for said iterative fit.", 

132 default=0.25, 

133 min=0.0, 

134 max=1.0, 

135 ) 

136 minMeanRatioTest = pexConfig.Field( 

137 dtype=float, 

138 doc="In the initial test to screen out bad points with a ratio test, points with low" 

139 " flux can get inadvertantly screened. This test only screens out points with flux" 

140 " above this value.", 

141 default=20000, 

142 ) 

143 minVarPivotSearch = pexConfig.Field( 

144 dtype=float, 

145 doc="The code looks for a pivot signal point after which the variance starts decreasing at high-flux" 

146 " to exclude then form the PTC model fit. However, sometimes at low fluxes, the variance" 

147 " decreases slightly. Set this variable for the variance value, in ADU^2, after which the pivot " 

148 " should be sought.", 

149 default=10000, 

150 ) 

151 doFitBootstrap = pexConfig.Field( 

152 dtype=bool, 

153 doc="Use bootstrap for the PTC fit parameters and errors?.", 

154 default=False, 

155 ) 

156 

157 

158class PhotonTransferCurveSolveTask(pipeBase.PipelineTask, 

159 pipeBase.CmdLineTask): 

160 """Task to fit the PTC from flat covariances. 

161 This task assembles the list of individual PTC datasets produced 

162 by `PhotonTransferCurveSolveTask` into one single final PTC dataset. 

163 The task fits the measured (co)variances to a polynomial model or to 

164 the models described in equations 16 and 20 of Astier+19 

165 (referred to as `POLYNOMIAL`, `EXPAPPROXIMATION`, and `FULLCOVARIANCE` 

166 in the configuration options of the task, respectively). Parameters 

167 of interest such as tghe gain and noise are derived from the fits. 

168 

169 Astier+19: "The Shape of the Photon Transfer Curve 

170 of CCD sensors", arXiv:1905.08677 

171 """ 

172 ConfigClass = PhotonTransferCurveSolveConfig 

173 _DefaultName = 'cpPhotonTransferCurveSolve' 

174 

175 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

176 """Ensure that the input and output dimensions are passed along. 

177 

178 Parameters 

179 ---------- 

180 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext` 

181 Butler to operate on. 

182 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection` 

183 Input data refs to load. 

184 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection` 

185 Output data refs to persist. 

186 """ 

187 inputs = butlerQC.get(inputRefs) 

188 outputs = self.run(inputCovariances=inputs['inputCovariances'], camera=inputs['camera']) 

189 butlerQC.put(outputs, outputRefs) 

190 

191 def run(self, inputCovariances, camera=None, inputExpList=None): 

192 """Fit measure covariances to different models. 

193 

194 Parameters 

195 ---------- 

196 inputCovariances : `list` [`lsst.ip.isr.PhotonTransferCurveDataset`] 

197 List of lsst.ip.isr.PhotonTransferCurveDataset datasets. 

198 

199 camera : `lsst.afw.cameraGeom.Camera`, optional 

200 Input camera. 

201 

202 inputExpList : `list` [`~lsst.afw.image.exposure.exposure.ExposureF`], optional 

203 List of exposures. 

204 

205 Returns 

206 ------- 

207 results : `lsst.pipe.base.Struct` 

208 The results struct containing: 

209 ``outputPtcDatset`` : `lsst.ip.isr.PhotonTransferCurveDataset` 

210 Final PTC dataset, containing information such as the means, variances, 

211 and exposure times. 

212 """ 

213 # Assemble partial PTC datasets into a single dataset. 

214 ampNames = np.unique(inputCovariances[0].ampNames) 

215 datasetPtc = PhotonTransferCurveDataset(ampNames, self.config.ptcFitType, 

216 self.config.maximumRangeCovariancesAstier) 

217 for partialPtcDataset in inputCovariances: 

218 if partialPtcDataset.ptcFitType == 'DUMMY': 

219 continue 

220 for ampName in ampNames: 

221 datasetPtc.inputExpIdPairs[ampName].append(partialPtcDataset.inputExpIdPairs[ampName]) 

222 if type(partialPtcDataset.rawExpTimes[ampName]) is list: 

223 datasetPtc.rawExpTimes[ampName].append(partialPtcDataset.rawExpTimes[ampName][0]) 

224 else: 

225 datasetPtc.rawExpTimes[ampName].append(partialPtcDataset.rawExpTimes[ampName]) 

226 if type(partialPtcDataset.rawMeans[ampName]) is list: 

227 datasetPtc.rawMeans[ampName].append(partialPtcDataset.rawMeans[ampName][0]) 

228 else: 

229 datasetPtc.rawMeans[ampName].append(partialPtcDataset.rawMeans[ampName]) 

230 if type(partialPtcDataset.rawVars[ampName]) is list: 

231 datasetPtc.rawVars[ampName].append(partialPtcDataset.rawVars[ampName][0]) 

232 else: 

233 datasetPtc.rawVars[ampName].append(partialPtcDataset.rawVars[ampName]) 

234 if type(partialPtcDataset.expIdMask[ampName]) is list: 

235 datasetPtc.expIdMask[ampName].append(partialPtcDataset.expIdMask[ampName][0]) 

236 else: 

237 datasetPtc.expIdMask[ampName].append(partialPtcDataset.expIdMask[ampName]) 

238 datasetPtc.covariances[ampName].append(np.array(partialPtcDataset.covariances[ampName][0])) 

239 datasetPtc.covariancesSqrtWeights[ampName].append( 

240 np.array(partialPtcDataset.covariancesSqrtWeights[ampName][0])) 

241 # Sort arrays that are filled so far in the final dataset by rawMeans index 

242 for ampName in ampNames: 

243 index = np.argsort(np.ravel(np.array(datasetPtc.rawMeans[ampName]))) 

244 datasetPtc.inputExpIdPairs[ampName] = np.array(datasetPtc.inputExpIdPairs[ampName])[index] 

245 datasetPtc.rawExpTimes[ampName] = np.array(datasetPtc.rawExpTimes[ampName])[index] 

246 datasetPtc.rawMeans[ampName] = np.array(datasetPtc.rawMeans[ampName])[index] 

247 datasetPtc.rawVars[ampName] = np.array(datasetPtc.rawVars[ampName])[index] 

248 datasetPtc.expIdMask[ampName] = np.array(datasetPtc.expIdMask[ampName])[index] 

249 datasetPtc.covariances[ampName] = np.array(datasetPtc.covariances[ampName])[index] 

250 datasetPtc.covariancesSqrtWeights[ampName] = np.array( 

251 datasetPtc.covariancesSqrtWeights[ampName])[index] 

252 if self.config.ptcFitType == "FULLCOVARIANCE": 

253 # Calculate covariances and fit them, including the PTC, to Astier+19 full model (Eq. 20) 

254 # First, fit get the flat pairs that are masked, fitting C_00 vs mu to 

255 # the EXPAPPROXIMATION model (Eq. 16 in Astier+19). 

256 # The points at these fluxes will also be masked when calculating the other covariances, C_ij) 

257 tempDatasetPtc = copy.copy(datasetPtc) 

258 tempDatasetPtc.ptcFitType = "EXPAPPROXIMATION" 

259 tempDatasetPtc = self.fitPtc(tempDatasetPtc) 

260 for ampName in datasetPtc.ampNames: 

261 datasetPtc.expIdMask[ampName] = tempDatasetPtc.expIdMask[ampName] 

262 datasetPtc.fitType = "FULLCOVARIANCE" 

263 datasetPtc = self.fitCovariancesAstier(datasetPtc) 

264 # The other options are: self.config.ptcFitType in ("EXPAPPROXIMATION", "POLYNOMIAL") 

265 else: 

266 # Fit the PTC to a polynomial or to Astier+19 exponential approximation (Eq. 16). 

267 # Fill up PhotonTransferCurveDataset object. 

268 datasetPtc = self.fitPtc(datasetPtc) 

269 if inputExpList is not None: 

270 # It should be a list of exposures, to get the detector. 

271 detector = inputExpList[0].getDetector() 

272 else: 

273 detector = None 

274 datasetPtc.updateMetadata(setDate=True, camera=camera, detector=detector) 

275 

276 return pipeBase.Struct( 

277 outputPtcDataset=datasetPtc, 

278 ) 

279 

280 def fitCovariancesAstier(self, dataset): 

281 """Fit measured flat covariances to full model in Astier+19. 

282 

283 Parameters 

284 ---------- 

285 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

286 The dataset containing information such as the means, (co)variances, 

287 and exposure times. 

288 

289 Returns 

290 ------- 

291 dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

292 This is the same dataset as the input paramter, however, it has been modified 

293 to include information such as the fit vectors and the fit parameters. See 

294 the class `PhotonTransferCurveDatase`. 

295 """ 

296 

297 covFits, covFitsNoB = fitDataFullCovariance(dataset) 

298 dataset = self.getOutputPtcDataCovAstier(dataset, covFits, covFitsNoB) 

299 

300 return dataset 

301 

302 def getOutputPtcDataCovAstier(self, dataset, covFits, covFitsNoB): 

303 """Get output data for PhotonTransferCurveCovAstierDataset from CovFit objects. 

304 

305 Parameters 

306 ---------- 

307 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

308 The dataset containing information such as the means, variances and exposure times. 

309 covFits: `dict` 

310 Dictionary of CovFit objects, with amp names as keys. 

311 covFitsNoB : `dict` 

312 Dictionary of CovFit objects, with amp names as keys, and 'b=0' in Eq. 20 of Astier+19. 

313 

314 Returns 

315 ------- 

316 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

317 This is the same dataset as the input paramter, however, it has been modified 

318 to include extra information such as the mask 1D array, gains, reoudout noise, measured signal, 

319 measured variance, modeled variance, a, and b coefficient matrices (see Astier+19) per amplifier. 

320 See the class `PhotonTransferCurveDatase`. 

321 """ 

322 assert(len(covFits) == len(covFitsNoB)) 

323 

324 for i, amp in enumerate(dataset.ampNames): 

325 lenInputTimes = len(dataset.rawExpTimes[amp]) 

326 # Not used when ptcFitType is 'FULLCOVARIANCE' 

327 dataset.ptcFitPars[amp] = [np.nan] 

328 dataset.ptcFitParsError[amp] = [np.nan] 

329 dataset.ptcFitChiSq[amp] = np.nan 

330 if amp in covFits: 

331 fit = covFits[amp] 

332 fitNoB = covFitsNoB[amp] 

333 # Save full covariances, covariances models, and their weights 

334 # dataset.expIdMask is already full 

335 dataset.covariances[amp] = fit.cov 

336 dataset.covariancesModel[amp] = fit.evalCovModel() 

337 dataset.covariancesSqrtWeights[amp] = fit.sqrtW 

338 dataset.aMatrix[amp] = fit.getA() 

339 dataset.bMatrix[amp] = fit.getB() 

340 dataset.covariancesModelNoB[amp] = fitNoB.evalCovModel() 

341 dataset.aMatrixNoB[amp] = fitNoB.getA() 

342 

343 (meanVecFinal, varVecFinal, varVecModel, 

344 wc, varMask) = fit.getFitData(0, 0, divideByMu=False) 

345 gain = fit.getGain() 

346 

347 dataset.gain[amp] = gain 

348 dataset.gainErr[amp] = fit.getGainErr() 

349 dataset.noise[amp] = np.sqrt(fit.getRon()) 

350 dataset.noiseErr[amp] = fit.getRonErr() 

351 dataset.finalVars[amp] = varVecFinal 

352 dataset.finalModelVars[amp] = varVecModel 

353 dataset.finalMeans[amp] = meanVecFinal 

354 

355 else: 

356 # Bad amp 

357 # Entries need to have proper dimensions so read/write with astropy.Table works. 

358 matrixSide = self.config.maximumRangeCovariancesAstier 

359 nanMatrix = np.full((matrixSide, matrixSide), np.nan) 

360 listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan) 

361 

362 dataset.covariances[amp] = listNanMatrix 

363 dataset.covariancesModel[amp] = listNanMatrix 

364 dataset.covariancesSqrtWeights[amp] = listNanMatrix 

365 dataset.aMatrix[amp] = nanMatrix 

366 dataset.bMatrix[amp] = nanMatrix 

367 dataset.covariancesModelNoB[amp] = listNanMatrix 

368 dataset.aMatrixNoB[amp] = nanMatrix 

369 

370 dataset.expIdMask[amp] = np.repeat(np.nan, lenInputTimes) 

371 dataset.gain[amp] = np.nan 

372 dataset.gainErr[amp] = np.nan 

373 dataset.noise[amp] = np.nan 

374 dataset.noiseErr[amp] = np.nan 

375 dataset.finalVars[amp] = np.repeat(np.nan, lenInputTimes) 

376 dataset.finalModelVars[amp] = np.repeat(np.nan, lenInputTimes) 

377 dataset.finalMeans[amp] = np.repeat(np.nan, lenInputTimes) 

378 

379 return dataset 

380 

381 @staticmethod 

382 def _initialParsForPolynomial(order): 

383 assert(order >= 2) 

384 pars = np.zeros(order, dtype=float) 

385 pars[0] = 10 

386 pars[1] = 1 

387 pars[2:] = 0.0001 

388 return pars 

389 

390 @staticmethod 

391 def _boundsForPolynomial(initialPars, lowers=[], uppers=[]): 

392 if not len(lowers): 

393 lowers = [np.NINF for p in initialPars] 

394 if not len(uppers): 

395 uppers = [np.inf for p in initialPars] 

396 lowers[1] = 0 # no negative gains 

397 return (lowers, uppers) 

398 

399 @staticmethod 

400 def _boundsForAstier(initialPars, lowers=[], uppers=[]): 

401 if not len(lowers): 

402 lowers = [np.NINF for p in initialPars] 

403 if not len(uppers): 

404 uppers = [np.inf for p in initialPars] 

405 return (lowers, uppers) 

406 

407 @staticmethod 

408 def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative, 

409 minMeanRatioTest, minVarPivotSearch): 

410 """Return a boolean array to mask bad points. 

411 

412 Parameters 

413 ---------- 

414 means : `numpy.array` 

415 Input array with mean signal values. 

416 variances : `numpy.array` 

417 Input array with variances at each mean value. 

418 maxDeviationPositive : `float` 

419 Maximum deviation from being constant for the variance/mean 

420 ratio, in the positive direction. 

421 maxDeviationNegative : `float` 

422 Maximum deviation from being constant for the variance/mean 

423 ratio, in the negative direction. 

424 minMeanRatioTest : `float` 

425 Minimum signal value (in ADU) after which to start examining 

426 the ratios var/mean. 

427 minVarPivotSearch : `float` 

428 Minimum variance point (in ADU^2) after which the pivot point 

429 wher the variance starts decreasing should be sought. 

430 

431 Returns 

432 ------ 

433 goodPoints : `numpy.array` [`bool`] 

434 Boolean array to select good (`True`) and bad (`False`) 

435 points. 

436 

437 Notes 

438 ----- 

439 A linear function has a constant ratio, so find the median 

440 value of the ratios, and exclude the points that deviate 

441 from that by more than a factor of maxDeviationPositive/negative. 

442 Asymmetric deviations are supported as we expect the PTC to turn 

443 down as the flux increases, but sometimes it anomalously turns 

444 upwards just before turning over, which ruins the fits, so it 

445 is wise to be stricter about restricting positive outliers than 

446 negative ones. 

447 Too high and points that are so bad that fit will fail will be included 

448 Too low and the non-linear points will be excluded, biasing the NL fit. 

449 This function also masks points after the variance starts decreasing. 

450 """ 

451 

452 assert(len(means) == len(variances)) 

453 ratios = [b/a for (a, b) in zip(means, variances)] 

454 medianRatio = np.nanmedian(ratios) 

455 ratioDeviations = [0.0 if a < minMeanRatioTest else (r/medianRatio)-1 

456 for (a, r) in zip(means, ratios)] 

457 

458 # so that it doesn't matter if the deviation is expressed as positive or negative 

459 maxDeviationPositive = abs(maxDeviationPositive) 

460 maxDeviationNegative = -1. * abs(maxDeviationNegative) 

461 

462 goodPoints = np.array([True if (r < maxDeviationPositive and r > maxDeviationNegative) 

463 else False for r in ratioDeviations]) 

464 

465 # Eliminate points beyond which the variance decreases 

466 pivot = np.where(np.array(np.diff(variances)) < 0)[0] 

467 if len(pivot) > 0: 

468 # For small values, sometimes the variance decreases slightly 

469 # Only look when var > self.config.minVarPivotSearch 

470 pivot = [p for p in pivot if variances[p] > minVarPivotSearch] 

471 if len(pivot) > 0: 

472 pivot = np.min(pivot) 

473 goodPoints[pivot+1:len(goodPoints)] = False 

474 

475 return goodPoints 

476 

477 def _makeZeroSafe(self, array, substituteValue=1e-9): 

478 """""" 

479 array = np.array(array) 

480 nBad = Counter(np.ravel(array))[0] 

481 if nBad == 0: 

482 return array 

483 

484 index, = np.where(array == 0) 

485 if len(index): 

486 msg = f"Found {nBad} zeros in array at elements {index}" 

487 self.log.warn(msg) 

488 

489 array[index] = substituteValue 

490 

491 return array 

492 

493 def fitPtc(self, dataset): 

494 """Fit the photon transfer curve to a polynomial or to Astier+19 approximation. 

495 

496 Fit the photon transfer curve with either a polynomial of the order 

497 specified in the task config, or using the exponential approximation 

498 in Astier+19 (Eq. 16). 

499 

500 Sigma clipping is performed iteratively for the fit, as well as an 

501 initial clipping of data points that are more than 

502 config.initialNonLinearityExclusionThreshold away from lying on a 

503 straight line. This other step is necessary because the photon transfer 

504 curve turns over catastrophically at very high flux (because saturation 

505 drops the variance to ~0) and these far outliers cause the initial fit 

506 to fail, meaning the sigma cannot be calculated to perform the 

507 sigma-clipping. 

508 

509 Parameters 

510 ---------- 

511 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

512 The dataset containing the means, variances and exposure times. 

513 

514 Returns 

515 ------- 

516 dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

517 This is the same dataset as the input parameter, however, it has been modified 

518 to include information such as the fit vectors and the fit parameters. See 

519 the class `PhotonTransferCurveDatase`. 

520 

521 Raises 

522 ------ 

523 RuntimeError: 

524 Raises if dataset.ptcFitType is None or empty. 

525 """ 

526 if dataset.ptcFitType: 

527 ptcFitType = dataset.ptcFitType 

528 else: 

529 raise RuntimeError("ptcFitType is None of empty in PTC dataset.") 

530 matrixSide = self.config.maximumRangeCovariancesAstier 

531 nanMatrix = np.empty((matrixSide, matrixSide)) 

532 nanMatrix[:] = np.nan 

533 

534 for amp in dataset.ampNames: 

535 lenInputTimes = len(dataset.rawExpTimes[amp]) 

536 listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide)) 

537 listNanMatrix[:] = np.nan 

538 

539 dataset.covariancesModel[amp] = listNanMatrix 

540 dataset.aMatrix[amp] = nanMatrix 

541 dataset.bMatrix[amp] = nanMatrix 

542 dataset.covariancesModelNoB[amp] = listNanMatrix 

543 dataset.aMatrixNoB[amp] = nanMatrix 

544 

545 def errFunc(p, x, y): 

546 return ptcFunc(p, x) - y 

547 

548 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers 

549 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers 

550 

551 for i, ampName in enumerate(dataset.ampNames): 

552 timeVecOriginal = np.ravel(np.array(dataset.rawExpTimes[ampName])) 

553 meanVecOriginal = np.ravel(np.array(dataset.rawMeans[ampName])) 

554 varVecOriginal = np.ravel(np.array(dataset.rawVars[ampName])) 

555 varVecOriginal = self._makeZeroSafe(varVecOriginal) 

556 

557 goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal, 

558 self.config.initialNonLinearityExclusionThresholdPositive, 

559 self.config.initialNonLinearityExclusionThresholdNegative, 

560 self.config.minMeanRatioTest, 

561 self.config.minVarPivotSearch) 

562 if not (goodPoints.any()): 

563 msg = (f"SERIOUS: All points in goodPoints: {goodPoints} are bad." 

564 f"Setting {ampName} to BAD.") 

565 self.log.warn(msg) 

566 # Fill entries with NaNs 

567 self.fillBadAmp(dataset, ptcFitType, ampName) 

568 continue 

569 

570 mask = goodPoints 

571 

572 if ptcFitType == 'EXPAPPROXIMATION': 

573 ptcFunc = funcAstier 

574 parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noisei^2 

575 # lowers and uppers obtained from BOT data studies by C. Lage (UC Davis, 11/2020). 

576 bounds = self._boundsForAstier(parsIniPtc, lowers=[-1e-4, 0.5, -2000], 

577 uppers=[1e-4, 2.5, 2000]) 

578 if ptcFitType == 'POLYNOMIAL': 

579 ptcFunc = funcPolynomial 

580 parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1) 

581 bounds = self._boundsForPolynomial(parsIniPtc) 

582 

583 # Before bootstrap fit, do an iterative fit to get rid of outliers 

584 count = 1 

585 while count <= maxIterationsPtcOutliers: 

586 # Note that application of the mask actually shrinks the array 

587 # to size rather than setting elements to zero (as we want) so 

588 # always update mask itself and re-apply to the original data 

589 meanTempVec = meanVecOriginal[mask] 

590 varTempVec = varVecOriginal[mask] 

591 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec)) 

592 pars = res.x 

593 

594 # change this to the original from the temp because the masks are ANDed 

595 # meaning once a point is masked it's always masked, and the masks must 

596 # always be the same length for broadcasting 

597 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal) 

598 newMask = np.array([True if np.abs(r) < sigmaCutPtcOutliers else False for r in sigResids]) 

599 mask = mask & newMask 

600 if not (mask.any() and newMask.any()): 

601 msg = (f"SERIOUS: All points in either mask: {mask} or newMask: {newMask} are bad. " 

602 f"Setting {ampName} to BAD.") 

603 self.log.warn(msg) 

604 # Fill entries with NaNs 

605 self.fillBadAmp(dataset, ptcFitType, ampName) 

606 break 

607 nDroppedTotal = Counter(mask)[False] 

608 self.log.debug(f"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}") 

609 count += 1 

610 # objects should never shrink 

611 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal)) 

612 if not (mask.any() and newMask.any()): 

613 continue 

614 dataset.expIdMask[ampName] = np.array(dataset.expIdMask[ampName]) 

615 # store the final mask 

616 if len(dataset.expIdMask[ampName]): 

617 dataset.expIdMask[ampName] &= mask # bitwise_and if there is already a mask 

618 else: 

619 dataset.expIdMask[ampName] = mask 

620 parsIniPtc = pars 

621 meanVecFinal = meanVecOriginal[mask] 

622 varVecFinal = varVecOriginal[mask] 

623 

624 if Counter(mask)[False] > 0: 

625 self.log.info((f"Number of points discarded in PTC of amplifier {ampName}:" 

626 f" {Counter(mask)[False]} out of {len(meanVecOriginal)}")) 

627 

628 if (len(meanVecFinal) < len(parsIniPtc)): 

629 msg = (f"SERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of " 

630 f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.") 

631 self.log.warn(msg) 

632 # Fill entries with NaNs 

633 self.fillBadAmp(dataset, ptcFitType, ampName) 

634 continue 

635 # Fit the PTC 

636 if self.config.doFitBootstrap: 

637 parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal, 

638 varVecFinal, ptcFunc, 

639 weightsY=1./np.sqrt(varVecFinal)) 

640 else: 

641 parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal, 

642 varVecFinal, ptcFunc, 

643 weightsY=1./np.sqrt(varVecFinal)) 

644 dataset.ptcFitPars[ampName] = parsFit 

645 dataset.ptcFitParsError[ampName] = parsFitErr 

646 dataset.ptcFitChiSq[ampName] = reducedChiSqPtc 

647 # Masked variances (measured and modeled) and means. Need to pad the array so astropy.Table does 

648 # not crash (the mask may vary per amp). 

649 padLength = len(dataset.rawExpTimes[ampName]) - len(varVecFinal) 

650 dataset.finalVars[ampName] = np.pad(varVecFinal, (0, padLength), 'constant', 

651 constant_values=np.nan) 

652 dataset.finalModelVars[ampName] = np.pad(ptcFunc(parsFit, meanVecFinal), (0, padLength), 

653 'constant', constant_values=np.nan) 

654 dataset.finalMeans[ampName] = np.pad(meanVecFinal, (0, padLength), 'constant', 

655 constant_values=np.nan) 

656 if ptcFitType == 'EXPAPPROXIMATION': 

657 ptcGain = parsFit[1] 

658 ptcGainErr = parsFitErr[1] 

659 ptcNoise = np.sqrt(np.fabs(parsFit[2])) 

660 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2])) 

661 if ptcFitType == 'POLYNOMIAL': 

662 ptcGain = 1./parsFit[1] 

663 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1]) 

664 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain 

665 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain 

666 dataset.gain[ampName] = ptcGain 

667 dataset.gainErr[ampName] = ptcGainErr 

668 dataset.noise[ampName] = ptcNoise 

669 dataset.noiseErr[ampName] = ptcNoiseErr 

670 

671 if not len(dataset.ptcFitType) == 0: 

672 dataset.ptcFitType = ptcFitType 

673 if len(dataset.badAmps) == 0: 

674 dataset.badAmps = np.repeat(np.nan, len(list(dataset.rawExpTimes.values())[0])) 

675 

676 return dataset 

677 

678 def fillBadAmp(self, dataset, ptcFitType, ampName): 

679 """Fill the dataset with NaNs if there are not enough good points. 

680 

681 Parameters 

682 ---------- 

683 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

684 The dataset containing the means, variances and exposure times. 

685 ptcFitType : `str` 

686 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or 

687 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC. 

688 ampName : `str` 

689 Amplifier name. 

690 """ 

691 dataset.badAmps.append(ampName) 

692 dataset.expIdMask[ampName] = np.repeat(False, len(dataset.rawExpTimes[ampName])) 

693 dataset.gain[ampName] = np.nan 

694 dataset.gainErr[ampName] = np.nan 

695 dataset.noise[ampName] = np.nan 

696 dataset.noiseErr[ampName] = np.nan 

697 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if 

698 ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3)) 

699 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if 

700 ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3)) 

701 dataset.ptcFitChiSq[ampName] = np.nan 

702 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

703 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

704 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

705 

706 return