Coverage for python/lsst/cp/pipe/ptc/cpSolvePtcTask.py: 13%

Shortcuts on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

299 statements  

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22import numpy as np 

23from collections import Counter 

24 

25import lsst.pex.config as pexConfig 

26import lsst.pipe.base as pipeBase 

27from lsst.cp.pipe.utils import (fitLeastSq, fitBootstrap, funcPolynomial, funcAstier) 

28 

29from scipy.optimize import least_squares 

30from itertools import groupby 

31from operator import itemgetter 

32 

33import lsst.pipe.base.connectionTypes as cT 

34 

35from .astierCovPtcUtils import fitDataFullCovariance 

36 

37from lsst.ip.isr import PhotonTransferCurveDataset 

38 

39from lsst.cp.pipe._lookupStaticCalibration import lookupStaticCalibration 

40 

41import copy 

42 

43 

44__all__ = ['PhotonTransferCurveSolveConfig', 'PhotonTransferCurveSolveTask'] 

45 

46 

47class PhotonTransferCurveSolveConnections(pipeBase.PipelineTaskConnections, 

48 dimensions=("instrument", "detector")): 

49 inputCovariances = cT.Input( 

50 name="ptcCovariances", 

51 doc="Tuple with measured covariances from flats.", 

52 storageClass="PhotonTransferCurveDataset", 

53 dimensions=("instrument", "exposure", "detector"), 

54 multiple=True, 

55 ) 

56 camera = cT.PrerequisiteInput( 

57 name="camera", 

58 doc="Camera the input data comes from.", 

59 storageClass="Camera", 

60 dimensions=("instrument",), 

61 isCalibration=True, 

62 lookupFunction=lookupStaticCalibration, 

63 ) 

64 outputPtcDataset = cT.Output( 

65 name="ptcDatsetProposal", 

66 doc="Output proposed ptc dataset.", 

67 storageClass="PhotonTransferCurveDataset", 

68 dimensions=("instrument", "detector"), 

69 multiple=False, 

70 isCalibration=True, 

71 ) 

72 

73 

74class PhotonTransferCurveSolveConfig(pipeBase.PipelineTaskConfig, 

75 pipelineConnections=PhotonTransferCurveSolveConnections): 

76 """Configuration for fitting measured covariances. 

77 """ 

78 

79 ptcFitType = pexConfig.ChoiceField( 

80 dtype=str, 

81 doc="Fit PTC to Eq. 16, Eq. 20 in Astier+19, or to a polynomial.", 

82 default="POLYNOMIAL", 

83 allowed={ 

84 "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').", 

85 "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).", 

86 "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)" 

87 } 

88 ) 

89 maximumRangeCovariancesAstier = pexConfig.Field( 

90 dtype=int, 

91 doc="Maximum range of covariances as in Astier+19", 

92 default=8, 

93 ) 

94 sigmaClipFullFitCovariancesAstier = pexConfig.Field( 

95 dtype=float, 

96 doc="sigma clip for full model fit for FULLCOVARIANCE ptcFitType ", 

97 default=5.0, 

98 ) 

99 maxIterFullFitCovariancesAstier = pexConfig.Field( 

100 dtype=int, 

101 doc="Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType", 

102 default=3, 

103 ) 

104 polynomialFitDegree = pexConfig.Field( 

105 dtype=int, 

106 doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.", 

107 default=3, 

108 ) 

109 sigmaCutPtcOutliers = pexConfig.Field( 

110 dtype=float, 

111 doc="Sigma cut for outlier rejection in PTC.", 

112 default=5.0, 

113 ) 

114 maxIterationsPtcOutliers = pexConfig.RangeField( 

115 dtype=int, 

116 doc="Maximum number of iterations for outlier rejection in PTC.", 

117 default=2, 

118 min=0 

119 ) 

120 minVarPivotSearch = pexConfig.Field( 

121 dtype=float, 

122 doc="The code looks for a pivot signal point after which the variance starts decreasing at high-flux" 

123 " to exclude then from the PTC model fit. However, sometimes at low fluxes, the variance" 

124 " decreases slightly. Set this variable for the variance value, in ADU^2, after which the pivot " 

125 " should be sought.", 

126 default=10000, 

127 ) 

128 consecutivePointsVarDecreases = pexConfig.RangeField( 

129 dtype=int, 

130 doc="Required number of consecutive points/fluxes in the PTC where the variance " 

131 "decreases in order to find a first estimate of the PTC turn-off. ", 

132 default=2, 

133 min=2 

134 ) 

135 doFitBootstrap = pexConfig.Field( 

136 dtype=bool, 

137 doc="Use bootstrap for the PTC fit parameters and errors?.", 

138 default=False, 

139 ) 

140 

141 

142class PhotonTransferCurveSolveTask(pipeBase.PipelineTask, 

143 pipeBase.CmdLineTask): 

144 """Task to fit the PTC from flat covariances. 

145 

146 This task assembles the list of individual PTC datasets produced 

147 by ``PhotonTransferCurveSolveTask`` into one single final PTC 

148 dataset. The task fits the measured (co)variances to a polynomial 

149 model or to the models described in equations 16 and 20 of 

150 Astier+19 (referred to as ``POLYNOMIAL``, ``EXPAPPROXIMATION``, 

151 and ``FULLCOVARIANCE`` in the configuration options of the task, 

152 respectively). Parameters of interest such as tghe gain and noise 

153 are derived from the fits. 

154 

155 Astier+19: "The Shape of the Photon Transfer Curve 

156 of CCD sensors", arXiv:1905.08677 

157 """ 

158 

159 ConfigClass = PhotonTransferCurveSolveConfig 

160 _DefaultName = 'cpPhotonTransferCurveSolve' 

161 

162 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

163 """Ensure that the input and output dimensions are passed along. 

164 

165 Parameters 

166 ---------- 

167 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext` 

168 Butler to operate on. 

169 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection` 

170 Input data refs to load. 

171 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection` 

172 Output data refs to persist. 

173 """ 

174 inputs = butlerQC.get(inputRefs) 

175 outputs = self.run(inputCovariances=inputs['inputCovariances'], camera=inputs['camera']) 

176 butlerQC.put(outputs, outputRefs) 

177 

178 def run(self, inputCovariances, camera=None, inputExpList=None): 

179 """Fit measure covariances to different models. 

180 

181 Parameters 

182 ---------- 

183 inputCovariances : `list` [`lsst.ip.isr.PhotonTransferCurveDataset`] 

184 List of lsst.ip.isr.PhotonTransferCurveDataset datasets. 

185 

186 camera : `lsst.afw.cameraGeom.Camera`, optional 

187 Input camera. 

188 

189 inputExpList : `list` [`~lsst.afw.image.ExposureF`], optional 

190 List of exposures. 

191 

192 Returns 

193 ------- 

194 results : `lsst.pipe.base.Struct` 

195 The results struct containing: 

196 

197 ``outputPtcDatset`` 

198 Final PTC dataset, containing information such as the 

199 means, variances, and exposure times 

200 (`lsst.ip.isr.PhotonTransferCurveDataset`). 

201 """ 

202 # Assemble partial PTC datasets into a single dataset. 

203 ampNames = np.unique(inputCovariances[0].ampNames) 

204 datasetPtc = PhotonTransferCurveDataset(ampNames, self.config.ptcFitType, 

205 self.config.maximumRangeCovariancesAstier) 

206 for partialPtcDataset in inputCovariances: 

207 if partialPtcDataset.ptcFitType == 'DUMMY': 

208 continue 

209 for ampName in ampNames: 

210 datasetPtc.inputExpIdPairs[ampName].append(partialPtcDataset.inputExpIdPairs[ampName]) 

211 if type(partialPtcDataset.rawExpTimes[ampName]) is list: 

212 datasetPtc.rawExpTimes[ampName].append(partialPtcDataset.rawExpTimes[ampName][0]) 

213 else: 

214 datasetPtc.rawExpTimes[ampName].append(partialPtcDataset.rawExpTimes[ampName]) 

215 if type(partialPtcDataset.rawMeans[ampName]) is list: 

216 datasetPtc.rawMeans[ampName].append(partialPtcDataset.rawMeans[ampName][0]) 

217 else: 

218 datasetPtc.rawMeans[ampName].append(partialPtcDataset.rawMeans[ampName]) 

219 if type(partialPtcDataset.rawVars[ampName]) is list: 

220 datasetPtc.rawVars[ampName].append(partialPtcDataset.rawVars[ampName][0]) 

221 else: 

222 datasetPtc.rawVars[ampName].append(partialPtcDataset.rawVars[ampName]) 

223 if type(partialPtcDataset.expIdMask[ampName]) is list: 

224 datasetPtc.expIdMask[ampName].append(partialPtcDataset.expIdMask[ampName][0]) 

225 else: 

226 datasetPtc.expIdMask[ampName].append(partialPtcDataset.expIdMask[ampName]) 

227 datasetPtc.covariances[ampName].append(np.array(partialPtcDataset.covariances[ampName][0])) 

228 datasetPtc.covariancesSqrtWeights[ampName].append( 

229 np.array(partialPtcDataset.covariancesSqrtWeights[ampName][0])) 

230 # Sort arrays that are filled so far in the final dataset by 

231 # rawMeans index 

232 for ampName in ampNames: 

233 index = np.argsort(np.ravel(np.array(datasetPtc.rawMeans[ampName]))) 

234 datasetPtc.inputExpIdPairs[ampName] = np.array(datasetPtc.inputExpIdPairs[ampName])[index] 

235 datasetPtc.rawExpTimes[ampName] = np.array(datasetPtc.rawExpTimes[ampName])[index] 

236 datasetPtc.rawMeans[ampName] = np.array(datasetPtc.rawMeans[ampName])[index] 

237 datasetPtc.rawVars[ampName] = np.array(datasetPtc.rawVars[ampName])[index] 

238 datasetPtc.expIdMask[ampName] = np.array(datasetPtc.expIdMask[ampName])[index] 

239 datasetPtc.covariances[ampName] = np.array(datasetPtc.covariances[ampName])[index] 

240 datasetPtc.covariancesSqrtWeights[ampName] = np.array( 

241 datasetPtc.covariancesSqrtWeights[ampName])[index] 

242 if self.config.ptcFitType == "FULLCOVARIANCE": 

243 # Calculate covariances and fit them, including the PTC, 

244 # to Astier+19 full model (Eq. 20) First, fit get the flat 

245 # pairs that are masked, fitting C_00 vs mu to the 

246 # EXPAPPROXIMATION model (Eq. 16 in Astier+19). The 

247 # points at these fluxes will also be masked when 

248 # calculating the other covariances, C_ij) 

249 tempDatasetPtc = copy.copy(datasetPtc) 

250 tempDatasetPtc.ptcFitType = "EXPAPPROXIMATION" 

251 tempDatasetPtc = self.fitPtc(tempDatasetPtc) 

252 for ampName in datasetPtc.ampNames: 

253 datasetPtc.expIdMask[ampName] = tempDatasetPtc.expIdMask[ampName] 

254 datasetPtc.fitType = "FULLCOVARIANCE" 

255 datasetPtc = self.fitCovariancesAstier(datasetPtc) 

256 # The other options are: self.config.ptcFitType in 

257 # ("EXPAPPROXIMATION", "POLYNOMIAL") 

258 else: 

259 # Fit the PTC to a polynomial or to Astier+19 exponential 

260 # approximation (Eq. 16). Fill up 

261 # PhotonTransferCurveDataset object. 

262 datasetPtc = self.fitPtc(datasetPtc) 

263 if inputExpList is not None: 

264 # It should be a list of exposures, to get the detector. 

265 detector = inputExpList[0].getDetector() 

266 else: 

267 detector = None 

268 datasetPtc.updateMetadata(setDate=True, camera=camera, detector=detector) 

269 

270 return pipeBase.Struct( 

271 outputPtcDataset=datasetPtc, 

272 ) 

273 

274 def fitCovariancesAstier(self, dataset): 

275 """Fit measured flat covariances to full model in Astier+19. 

276 

277 Parameters 

278 ---------- 

279 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

280 The dataset containing information such as the means, 

281 (co)variances, and exposure times. 

282 

283 Returns 

284 ------- 

285 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

286 This is the same dataset as the input paramter, however, 

287 it has been modified to include information such as the 

288 fit vectors and the fit parameters. See the class 

289 `PhotonTransferCurveDatase`. 

290 """ 

291 covFits, covFitsNoB = fitDataFullCovariance(dataset) 

292 dataset = self.getOutputPtcDataCovAstier(dataset, covFits, covFitsNoB) 

293 

294 return dataset 

295 

296 def getOutputPtcDataCovAstier(self, dataset, covFits, covFitsNoB): 

297 """Get output data for PhotonTransferCurveCovAstierDataset from CovFit 

298 objects. 

299 

300 Parameters 

301 ---------- 

302 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

303 The dataset containing information such as the means, 

304 variances and exposure times. 

305 covFits : `dict` 

306 Dictionary of CovFit objects, with amp names as keys. 

307 covFitsNoB : `dict` 

308 Dictionary of CovFit objects, with amp names as keys, and 

309 'b=0' in Eq. 20 of Astier+19. 

310 

311 Returns 

312 ------- 

313 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

314 This is the same dataset as the input paramter, however, 

315 it has been modified to include extra information such as 

316 the mask 1D array, gains, reoudout noise, measured signal, 

317 measured variance, modeled variance, a, and b coefficient 

318 matrices (see Astier+19) per amplifier. See the class 

319 `PhotonTransferCurveDatase`. 

320 """ 

321 assert(len(covFits) == len(covFitsNoB)) 

322 

323 for i, amp in enumerate(dataset.ampNames): 

324 lenInputTimes = len(dataset.rawExpTimes[amp]) 

325 # Not used when ptcFitType is 'FULLCOVARIANCE' 

326 dataset.ptcFitPars[amp] = [np.nan] 

327 dataset.ptcFitParsError[amp] = [np.nan] 

328 dataset.ptcFitChiSq[amp] = np.nan 

329 if amp in covFits: 

330 fit = covFits[amp] 

331 fitNoB = covFitsNoB[amp] 

332 # Save full covariances, covariances models, and their weights 

333 # dataset.expIdMask is already full 

334 dataset.covariances[amp] = fit.cov 

335 dataset.covariancesModel[amp] = fit.evalCovModel() 

336 dataset.covariancesSqrtWeights[amp] = fit.sqrtW 

337 dataset.aMatrix[amp] = fit.getA() 

338 dataset.bMatrix[amp] = fit.getB() 

339 dataset.covariancesModelNoB[amp] = fitNoB.evalCovModel() 

340 dataset.aMatrixNoB[amp] = fitNoB.getA() 

341 

342 (meanVecFinal, varVecFinal, varVecModel, 

343 wc, varMask) = fit.getFitData(0, 0, divideByMu=False) 

344 gain = fit.getGain() 

345 

346 dataset.gain[amp] = gain 

347 dataset.gainErr[amp] = fit.getGainErr() 

348 dataset.noise[amp] = np.sqrt(fit.getRon()) 

349 dataset.noiseErr[amp] = fit.getRonErr() 

350 dataset.finalVars[amp] = varVecFinal 

351 dataset.finalModelVars[amp] = varVecModel 

352 dataset.finalMeans[amp] = meanVecFinal 

353 

354 else: 

355 # Bad amp 

356 # Entries need to have proper dimensions so read/write 

357 # with astropy.Table works. 

358 matrixSide = self.config.maximumRangeCovariancesAstier 

359 nanMatrix = np.full((matrixSide, matrixSide), np.nan) 

360 listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan) 

361 

362 dataset.covariances[amp] = listNanMatrix 

363 dataset.covariancesModel[amp] = listNanMatrix 

364 dataset.covariancesSqrtWeights[amp] = listNanMatrix 

365 dataset.aMatrix[amp] = nanMatrix 

366 dataset.bMatrix[amp] = nanMatrix 

367 dataset.covariancesModelNoB[amp] = listNanMatrix 

368 dataset.aMatrixNoB[amp] = nanMatrix 

369 

370 dataset.expIdMask[amp] = np.repeat(np.nan, lenInputTimes) 

371 dataset.gain[amp] = np.nan 

372 dataset.gainErr[amp] = np.nan 

373 dataset.noise[amp] = np.nan 

374 dataset.noiseErr[amp] = np.nan 

375 dataset.finalVars[amp] = np.repeat(np.nan, lenInputTimes) 

376 dataset.finalModelVars[amp] = np.repeat(np.nan, lenInputTimes) 

377 dataset.finalMeans[amp] = np.repeat(np.nan, lenInputTimes) 

378 

379 return dataset 

380 

381 @staticmethod 

382 def _initialParsForPolynomial(order): 

383 assert(order >= 2) 

384 pars = np.zeros(order, dtype=float) 

385 pars[0] = 10 

386 pars[1] = 1 

387 pars[2:] = 0.0001 

388 return pars 

389 

390 @staticmethod 

391 def _boundsForPolynomial(initialPars, lowers=[], uppers=[]): 

392 if not len(lowers): 

393 lowers = [np.NINF for p in initialPars] 

394 if not len(uppers): 

395 uppers = [np.inf for p in initialPars] 

396 lowers[1] = 0 # no negative gains 

397 return (lowers, uppers) 

398 

399 @staticmethod 

400 def _boundsForAstier(initialPars, lowers=[], uppers=[]): 

401 if not len(lowers): 

402 lowers = [np.NINF for p in initialPars] 

403 if not len(uppers): 

404 uppers = [np.inf for p in initialPars] 

405 return (lowers, uppers) 

406 

407 @staticmethod 

408 def _getInitialGoodPoints(means, variances, minVarPivotSearch, consecutivePointsVarDecreases): 

409 """Return a boolean array to mask bad points. 

410 

411 Parameters 

412 ---------- 

413 means : `numpy.array` 

414 Input array with mean signal values. 

415 variances : `numpy.array` 

416 Input array with variances at each mean value. 

417 minVarPivotSearch : `float` 

418 The variance (in ADU^2), above which, the point 

419 of decreasing variance should be sought. 

420 consecutivePointsVarDecreases : `int` 

421 Required number of consecutive points/fluxes 

422 in the PTC where the variance 

423 decreases in order to find a first 

424 estimate of the PTC turn-off. 

425 

426 Returns 

427 ------ 

428 goodPoints : `numpy.array` [`bool`] 

429 Boolean array to select good (`True`) and bad (`False`) 

430 points. 

431 

432 Notes 

433 ----- 

434 Eliminate points beyond which the variance decreases. 

435 """ 

436 goodPoints = np.ones_like(means, dtype=bool) 

437 # Variances are sorted and should monotonically increase 

438 pivotList = np.where(np.array(np.diff(variances)) < 0)[0] 

439 if len(pivotList) > 0: 

440 # For small values, sometimes the variance decreases slightly 

441 # Only look when var > self.config.minVarPivotSearch 

442 pivotList = [p for p in pivotList if variances[p] > minVarPivotSearch] 

443 # Require that the varince decreases during 

444 # consecutivePointsVarDecreases 

445 # consecutive points. This will give a first 

446 # estimate of the PTC turn-off, which 

447 # may be updated (reduced) further in the code. 

448 if len(pivotList) > 1: 

449 # enumerate(pivotList) creates tuples (index, value), for 

450 # each value in pivotList. The lambda function subtracts 

451 # each value from the index. 

452 # groupby groups elements by equal key value. 

453 for k, g in groupby(enumerate(pivotList), lambda x: x[0]-x[1]): 

454 group = (map(itemgetter(1), g)) 

455 # Form groups of consecute values from pivotList 

456 group = list(map(int, group)) 

457 # values in pivotList are indices where np.diff(variances) 

458 # is negative, i.e., where the variance starts decreasing. 

459 # Find the first group of consecutive numbers when 

460 # variance decreases. 

461 if len(group) >= consecutivePointsVarDecreases: 

462 pivotIndex = np.min(group) 

463 goodPoints[pivotIndex+1:] = False 

464 break 

465 

466 return goodPoints 

467 

468 def _makeZeroSafe(self, array, substituteValue=1e-9): 

469 """""" 

470 array = np.array(array) 

471 nBad = Counter(np.ravel(array))[0] 

472 if nBad == 0: 

473 return array 

474 

475 index, = np.where(array == 0) 

476 if len(index): 

477 msg = f"Found {nBad} zeros in array at elements {index}" 

478 self.log.warning(msg) 

479 

480 array[index] = substituteValue 

481 

482 return array 

483 

484 def fitPtc(self, dataset): 

485 """Fit the photon transfer curve to a polynomial or to Astier+19 

486 approximation. 

487 

488 Fit the photon transfer curve with either a polynomial of the order 

489 specified in the task config, or using the exponential approximation 

490 in Astier+19 (Eq. 16). 

491 

492 Sigma clipping is performed iteratively for the fit, as well as an 

493 initial clipping of data points that are more than 

494 config.initialNonLinearityExclusionThreshold away from lying on a 

495 straight line. This other step is necessary because the photon transfer 

496 curve turns over catastrophically at very high flux (because saturation 

497 drops the variance to ~0) and these far outliers cause the initial fit 

498 to fail, meaning the sigma cannot be calculated to perform the 

499 sigma-clipping. 

500 

501 Parameters 

502 ---------- 

503 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

504 The dataset containing the means, variances and exposure times. 

505 

506 Returns 

507 ------- 

508 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

509 This is the same dataset as the input parameter, however, 

510 it has been modified to include information such as the 

511 fit vectors and the fit parameters. See the class 

512 `PhotonTransferCurveDatase`. 

513 

514 Raises 

515 ------ 

516 RuntimeError: 

517 Raises if dataset.ptcFitType is None or empty. 

518 """ 

519 if dataset.ptcFitType: 

520 ptcFitType = dataset.ptcFitType 

521 else: 

522 raise RuntimeError("ptcFitType is None of empty in PTC dataset.") 

523 matrixSide = self.config.maximumRangeCovariancesAstier 

524 nanMatrix = np.empty((matrixSide, matrixSide)) 

525 nanMatrix[:] = np.nan 

526 

527 for amp in dataset.ampNames: 

528 lenInputTimes = len(dataset.rawExpTimes[amp]) 

529 listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide)) 

530 listNanMatrix[:] = np.nan 

531 

532 dataset.covariancesModel[amp] = listNanMatrix 

533 dataset.aMatrix[amp] = nanMatrix 

534 dataset.bMatrix[amp] = nanMatrix 

535 dataset.covariancesModelNoB[amp] = listNanMatrix 

536 dataset.aMatrixNoB[amp] = nanMatrix 

537 

538 def errFunc(p, x, y): 

539 return ptcFunc(p, x) - y 

540 

541 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers 

542 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers 

543 

544 for i, ampName in enumerate(dataset.ampNames): 

545 timeVecOriginal = np.ravel(np.array(dataset.rawExpTimes[ampName])) 

546 meanVecOriginal = np.ravel(np.array(dataset.rawMeans[ampName])) 

547 varVecOriginal = np.ravel(np.array(dataset.rawVars[ampName])) 

548 varVecOriginal = self._makeZeroSafe(varVecOriginal) 

549 

550 # Discard points when the variance starts to decrease after two 

551 # consecutive signal levels 

552 goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal, 

553 self.config.minVarPivotSearch, 

554 self.config.consecutivePointsVarDecreases) 

555 # Check if all points are bad from the 'cpExtractPtcTask' 

556 initialExpIdMask = np.ravel(np.array(dataset.expIdMask[ampName])) 

557 

558 if not (goodPoints.any() and initialExpIdMask.any()): 

559 msg = (f"SERIOUS: All points in goodPoints: {goodPoints} or " 

560 f"in initialExpIdMask: {initialExpIdMask} are bad." 

561 f"Setting {ampName} to BAD.") 

562 self.log.warning(msg) 

563 # Fill entries with NaNs 

564 self.fillBadAmp(dataset, ptcFitType, ampName) 

565 continue 

566 

567 mask = goodPoints 

568 

569 if ptcFitType == 'EXPAPPROXIMATION': 

570 ptcFunc = funcAstier 

571 parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noisei^2 

572 # lowers and uppers obtained from BOT data studies by 

573 # C. Lage (UC Davis, 11/2020). 

574 bounds = self._boundsForAstier(parsIniPtc, lowers=[-1e-4, 0.5, -2000], 

575 uppers=[1e-4, 2.5, 2000]) 

576 if ptcFitType == 'POLYNOMIAL': 

577 ptcFunc = funcPolynomial 

578 parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1) 

579 bounds = self._boundsForPolynomial(parsIniPtc) 

580 

581 # Before bootstrap fit, do an iterative fit to get rid of outliers. 

582 # This further process of outlier rejection be skipped 

583 # if self.config.maxIterationsPtcOutliers = 0. 

584 # We already did some initial outlier rejection about in 

585 # self._getInitialGoodPoints. 

586 count = 1 

587 newMask = np.ones_like(meanVecOriginal, dtype=bool) 

588 pars = parsIniPtc 

589 while count <= maxIterationsPtcOutliers: 

590 # Note that application of the mask actually shrinks the array 

591 # to size rather than setting elements to zero (as we want) so 

592 # always update mask itself and re-apply to the original data 

593 meanTempVec = meanVecOriginal[mask] 

594 varTempVec = varVecOriginal[mask] 

595 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec)) 

596 pars = res.x 

597 

598 # change this to the original from the temp because 

599 # the masks are ANDed meaning once a point is masked 

600 # it's always masked, and the masks must always be the 

601 # same length for broadcasting 

602 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal) 

603 newMask = np.array([True if np.abs(r) < sigmaCutPtcOutliers else False for r in sigResids]) 

604 mask = mask & newMask 

605 if not (mask.any() and newMask.any()): 

606 msg = (f"SERIOUS: All points in either mask: {mask} or newMask: {newMask} are bad. " 

607 f"Setting {ampName} to BAD.") 

608 self.log.warning(msg) 

609 # Fill entries with NaNs 

610 self.fillBadAmp(dataset, ptcFitType, ampName) 

611 break 

612 nDroppedTotal = Counter(mask)[False] 

613 self.log.debug("Iteration %d: discarded %d points in total for %s", 

614 count, nDroppedTotal, ampName) 

615 count += 1 

616 # objects should never shrink 

617 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal)) 

618 if not (mask.any() and newMask.any()): 

619 continue 

620 dataset.expIdMask[ampName] = np.array(dataset.expIdMask[ampName]) 

621 # store the final mask 

622 if len(dataset.expIdMask[ampName]): 

623 dataset.expIdMask[ampName] &= mask # bitwise_and if there is already a mask 

624 else: 

625 dataset.expIdMask[ampName] = mask 

626 parsIniPtc = pars 

627 meanVecFinal = meanVecOriginal[mask] 

628 varVecFinal = varVecOriginal[mask] 

629 

630 if Counter(mask)[False] > 0: 

631 self.log.info("Number of points discarded in PTC of amplifier %s:" 

632 " %d out of %d", ampName, Counter(mask)[False], len(meanVecOriginal)) 

633 

634 if (len(meanVecFinal) < len(parsIniPtc)): 

635 msg = (f"SERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of " 

636 f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.") 

637 self.log.warning(msg) 

638 # Fill entries with NaNs 

639 self.fillBadAmp(dataset, ptcFitType, ampName) 

640 continue 

641 # Fit the PTC 

642 if self.config.doFitBootstrap: 

643 parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal, 

644 varVecFinal, ptcFunc, 

645 weightsY=1./np.sqrt(varVecFinal)) 

646 else: 

647 parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal, 

648 varVecFinal, ptcFunc, 

649 weightsY=1./np.sqrt(varVecFinal)) 

650 dataset.ptcFitPars[ampName] = parsFit 

651 dataset.ptcFitParsError[ampName] = parsFitErr 

652 dataset.ptcFitChiSq[ampName] = reducedChiSqPtc 

653 # Masked variances (measured and modeled) and means. Need 

654 # to pad the array so astropy.Table does not crash (the 

655 # mask may vary per amp). 

656 padLength = len(dataset.rawExpTimes[ampName]) - len(varVecFinal) 

657 dataset.finalVars[ampName] = np.pad(varVecFinal, (0, padLength), 'constant', 

658 constant_values=np.nan) 

659 dataset.finalModelVars[ampName] = np.pad(ptcFunc(parsFit, meanVecFinal), (0, padLength), 

660 'constant', constant_values=np.nan) 

661 dataset.finalMeans[ampName] = np.pad(meanVecFinal, (0, padLength), 'constant', 

662 constant_values=np.nan) 

663 if ptcFitType == 'EXPAPPROXIMATION': 

664 ptcGain = parsFit[1] 

665 ptcGainErr = parsFitErr[1] 

666 ptcNoise = np.sqrt(np.fabs(parsFit[2])) 

667 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2])) 

668 if ptcFitType == 'POLYNOMIAL': 

669 ptcGain = 1./parsFit[1] 

670 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1]) 

671 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain 

672 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain 

673 dataset.gain[ampName] = ptcGain 

674 dataset.gainErr[ampName] = ptcGainErr 

675 dataset.noise[ampName] = ptcNoise 

676 dataset.noiseErr[ampName] = ptcNoiseErr 

677 

678 if not len(dataset.ptcFitType) == 0: 

679 dataset.ptcFitType = ptcFitType 

680 if len(dataset.badAmps) == 0: 

681 dataset.badAmps = np.repeat(np.nan, len(list(dataset.rawExpTimes.values())[0])) 

682 

683 return dataset 

684 

685 def fillBadAmp(self, dataset, ptcFitType, ampName): 

686 """Fill the dataset with NaNs if there are not enough good points. 

687 

688 Parameters 

689 ---------- 

690 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

691 The dataset containing the means, variances and exposure times. 

692 ptcFitType : {'POLYNOMIAL', 'EXPAPPROXIMATION'} 

693 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or 

694 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC. 

695 ampName : `str` 

696 Amplifier name. 

697 """ 

698 dataset.badAmps.append(ampName) 

699 dataset.expIdMask[ampName] = np.repeat(False, len(dataset.rawExpTimes[ampName])) 

700 dataset.gain[ampName] = np.nan 

701 dataset.gainErr[ampName] = np.nan 

702 dataset.noise[ampName] = np.nan 

703 dataset.noiseErr[ampName] = np.nan 

704 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if 

705 ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3)) 

706 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if 

707 ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3)) 

708 dataset.ptcFitChiSq[ampName] = np.nan 

709 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

710 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

711 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

712 

713 return