Coverage for python/lsst/cp/pipe/ptc/cpSolvePtcTask.py: 10%

398 statements  

« prev     ^ index     » next       coverage.py v6.5.0, created at 2023-04-05 09:03 +0000

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22import numpy as np 

23from collections import Counter 

24 

25import lsst.pex.config as pexConfig 

26import lsst.pipe.base as pipeBase 

27from lsst.cp.pipe.utils import (fitLeastSq, fitBootstrap, funcPolynomial, funcAstier, symmetrize) 

28 

29from scipy.signal import fftconvolve 

30from scipy.optimize import least_squares 

31from itertools import groupby 

32from operator import itemgetter 

33 

34import lsst.pipe.base.connectionTypes as cT 

35 

36from lsst.ip.isr import PhotonTransferCurveDataset 

37 

38from lsst.cp.pipe._lookupStaticCalibration import lookupStaticCalibration 

39 

40import copy 

41 

42 

43__all__ = ['PhotonTransferCurveSolveConfig', 'PhotonTransferCurveSolveTask'] 

44 

45 

46class PhotonTransferCurveSolveConnections(pipeBase.PipelineTaskConnections, 

47 dimensions=("instrument", "detector")): 

48 inputCovariances = cT.Input( 

49 name="ptcCovariances", 

50 doc="Tuple with measured covariances from flats.", 

51 storageClass="PhotonTransferCurveDataset", 

52 dimensions=("instrument", "exposure", "detector"), 

53 isCalibration=True, 

54 multiple=True, 

55 ) 

56 camera = cT.PrerequisiteInput( 

57 name="camera", 

58 doc="Camera the input data comes from.", 

59 storageClass="Camera", 

60 dimensions=("instrument",), 

61 isCalibration=True, 

62 lookupFunction=lookupStaticCalibration, 

63 ) 

64 outputPtcDataset = cT.Output( 

65 name="ptcDatsetProposal", 

66 doc="Output proposed ptc dataset.", 

67 storageClass="PhotonTransferCurveDataset", 

68 dimensions=("instrument", "detector"), 

69 multiple=False, 

70 isCalibration=True, 

71 ) 

72 

73 

74class PhotonTransferCurveSolveConfig(pipeBase.PipelineTaskConfig, 

75 pipelineConnections=PhotonTransferCurveSolveConnections): 

76 """Configuration for fitting measured covariances. 

77 """ 

78 

79 ptcFitType = pexConfig.ChoiceField( 

80 dtype=str, 

81 doc="Fit PTC to Eq. 16, Eq. 20 in Astier+19, or to a polynomial.", 

82 default="POLYNOMIAL", 

83 allowed={ 

84 "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').", 

85 "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).", 

86 "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)" 

87 } 

88 ) 

89 maximumRangeCovariancesAstier = pexConfig.Field( 

90 dtype=int, 

91 doc="Maximum range of covariances as in Astier+19", 

92 default=8, 

93 ) 

94 sigmaClipFullFitCovariancesAstier = pexConfig.Field( 

95 dtype=float, 

96 doc="sigma clip for full model fit for FULLCOVARIANCE ptcFitType ", 

97 default=5.0, 

98 ) 

99 maxIterFullFitCovariancesAstier = pexConfig.Field( 

100 dtype=int, 

101 doc="Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType", 

102 default=3, 

103 ) 

104 polynomialFitDegree = pexConfig.Field( 

105 dtype=int, 

106 doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.", 

107 default=3, 

108 ) 

109 sigmaCutPtcOutliers = pexConfig.Field( 

110 dtype=float, 

111 doc="Sigma cut for outlier rejection in PTC.", 

112 default=5.0, 

113 ) 

114 maxIterationsPtcOutliers = pexConfig.RangeField( 

115 dtype=int, 

116 doc="Maximum number of iterations for outlier rejection in PTC.", 

117 default=2, 

118 min=0 

119 ) 

120 minVarPivotSearch = pexConfig.Field( 

121 dtype=float, 

122 doc="The code looks for a pivot signal point after which the variance starts decreasing at high-flux" 

123 " to exclude then from the PTC model fit. However, sometimes at low fluxes, the variance" 

124 " decreases slightly. Set this variable for the variance value, in ADU^2, after which the pivot " 

125 " should be sought.", 

126 default=10000, 

127 ) 

128 consecutivePointsVarDecreases = pexConfig.RangeField( 

129 dtype=int, 

130 doc="Required number of consecutive points/fluxes in the PTC where the variance " 

131 "decreases in order to find a first estimate of the PTC turn-off. ", 

132 default=2, 

133 min=2 

134 ) 

135 doFitBootstrap = pexConfig.Field( 

136 dtype=bool, 

137 doc="Use bootstrap for the PTC fit parameters and errors?.", 

138 default=False, 

139 ) 

140 binSize = pexConfig.Field( 

141 dtype=int, 

142 doc="Bin the image by this factor in both dimensions.", 

143 default=1, 

144 ) 

145 

146 

147class PhotonTransferCurveSolveTask(pipeBase.PipelineTask): 

148 """Task to fit the PTC from flat covariances. 

149 

150 The first task of the PTC measurement pipeline, 

151 ``PhotonTransferCurveMeasureTask`` (and assumed to have been run 

152 before this task), produced a list of 

153 `~lsst.ip.isr.PhotonTransferCurveDataset` objects. Each dataset 

154 contains the mean signal and covariances of the 

155 difference image of the flat-field images taken at 

156 the same exposure time. The list also contains dummy 

157 datasets (with no measurements), whose purpose is to have 

158 the input and output dimensions of ``PhotonTransferCurveMeasureTask`` 

159 match. 

160 

161 This task, ``PhotonTransferCurveSolveTask``, assembles the list 

162 of individual PTC datasets produced 

163 by ``PhotonTransferCurveMeasureTask`` into one single final PTC 

164 dataset, discarding the dummy datset as appropiate. 

165 The task fits the measured (co)variances to one of three models: 

166 a polynomial model of a given order, or the models described 

167 in equations 16 and 20 of Astier+19. These options are referred 

168 to as ``POLYNOMIAL``, ``EXPAPPROXIMATION``, and ``FULLCOVARIANCE`` 

169 in the configuration options of the task, respectively). 

170 Parameters of interest such as the gain and noise are derived 

171 from the fits. The ``FULLCOVARIANCE`` model is fitted to the 

172 full covariance data (as oppossed to the other two models, which 

173 are fit to the variance vs mean measurements only). 

174 

175 Astier+19: "The Shape of the Photon Transfer Curve 

176 of CCD sensors", arXiv:1905.08677 

177 """ 

178 

179 ConfigClass = PhotonTransferCurveSolveConfig 

180 _DefaultName = 'cpPhotonTransferCurveSolve' 

181 

182 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

183 """Ensure that the input and output dimensions are passed along. 

184 

185 Parameters 

186 ---------- 

187 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext` 

188 Butler to operate on. 

189 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection` 

190 Input data refs to load. 

191 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection` 

192 Output data refs to persist. 

193 """ 

194 inputs = butlerQC.get(inputRefs) 

195 detId = inputRefs.inputCovariances[0].dataId['detector'] 

196 outputs = self.run(inputCovariances=inputs['inputCovariances'], camera=inputs['camera'], detId=detId) 

197 butlerQC.put(outputs, outputRefs) 

198 

199 def run(self, inputCovariances, camera=None, detId=0): 

200 """Fit measured covariances to different models. 

201 

202 Parameters 

203 ---------- 

204 inputCovariances : `list` [`lsst.ip.isr.PhotonTransferCurveDataset`] 

205 List of lsst.ip.isr.PhotonTransferCurveDataset datasets. 

206 camera : `lsst.afw.cameraGeom.Camera`, optional 

207 Input camera. 

208 detId : `int` 

209 Detector ID to locate the detector in the camera and 

210 populate the `lsst.ip.isr.PhotonTransferCurveDataset` 

211 metadata. 

212 Returns 

213 ------- 

214 results : `lsst.pipe.base.Struct` 

215 The resultins structure contains: 

216 

217 ``outputPtcDatset`` 

218 Final PTC dataset, containing information such as the 

219 means, variances, and exposure times 

220 (`lsst.ip.isr.PhotonTransferCurveDataset`). 

221 """ 

222 # Find the ampNames from a non-dummy ptc. 

223 ampNames = [] 

224 for partialPtcDataset in inputCovariances: 

225 if partialPtcDataset.ptcFitType != 'DUMMY': 

226 ampNames = partialPtcDataset.ampNames 

227 break 

228 

229 # Assemble individual PTC datasets into a single PTC dataset. 

230 datasetPtc = PhotonTransferCurveDataset(ampNames=ampNames, 

231 ptcFitType=self.config.ptcFitType, 

232 covMatrixSide=self.config.maximumRangeCovariancesAstier) 

233 for partialPtcDataset in inputCovariances: 

234 # Ignore dummy datasets 

235 if partialPtcDataset.ptcFitType == 'DUMMY': 

236 continue 

237 for ampName in ampNames: 

238 # The partial dataset consists of lists of values for each 

239 # quantity. In the case of the input exposure pairs, this is a 

240 # list of tuples. In all cases we only want the first 

241 # (and only) element of the list. 

242 datasetPtc.inputExpIdPairs[ampName].append(partialPtcDataset.inputExpIdPairs[ampName][0]) 

243 datasetPtc.rawExpTimes[ampName] = np.append(datasetPtc.rawExpTimes[ampName], 

244 partialPtcDataset.rawExpTimes[ampName][0]) 

245 datasetPtc.rawMeans[ampName] = np.append(datasetPtc.rawMeans[ampName], 

246 partialPtcDataset.rawMeans[ampName][0]) 

247 datasetPtc.rawVars[ampName] = np.append(datasetPtc.rawVars[ampName], 

248 partialPtcDataset.rawVars[ampName][0]) 

249 datasetPtc.expIdMask[ampName] = np.append(datasetPtc.expIdMask[ampName], 

250 partialPtcDataset.expIdMask[ampName][0]) 

251 datasetPtc.covariances[ampName] = np.append( 

252 datasetPtc.covariances[ampName].ravel(), 

253 partialPtcDataset.covariances[ampName].ravel() 

254 ).reshape( 

255 ( 

256 len(datasetPtc.rawExpTimes[ampName]), 

257 datasetPtc.covMatrixSide, 

258 datasetPtc.covMatrixSide, 

259 ) 

260 ) 

261 datasetPtc.covariancesSqrtWeights[ampName] = np.append( 

262 datasetPtc.covariancesSqrtWeights[ampName].ravel(), 

263 partialPtcDataset.covariancesSqrtWeights[ampName].ravel() 

264 ).reshape( 

265 ( 

266 len(datasetPtc.rawExpTimes[ampName]), 

267 datasetPtc.covMatrixSide, 

268 datasetPtc.covMatrixSide, 

269 ) 

270 ) 

271 

272 # Sort arrays that are filled so far in the final dataset by 

273 # rawMeans index 

274 for ampName in ampNames: 

275 index = np.argsort(datasetPtc.rawMeans[ampName]) 

276 datasetPtc.inputExpIdPairs[ampName] = np.array( 

277 datasetPtc.inputExpIdPairs[ampName] 

278 )[index].tolist() 

279 datasetPtc.rawExpTimes[ampName] = datasetPtc.rawExpTimes[ampName][index] 

280 datasetPtc.rawMeans[ampName] = datasetPtc.rawMeans[ampName][index] 

281 datasetPtc.rawVars[ampName] = datasetPtc.rawVars[ampName][index] 

282 datasetPtc.expIdMask[ampName] = datasetPtc.expIdMask[ampName][index] 

283 datasetPtc.covariances[ampName] = datasetPtc.covariances[ampName][index] 

284 datasetPtc.covariancesSqrtWeights[ampName] = datasetPtc.covariances[ampName][index] 

285 

286 if self.config.ptcFitType == "FULLCOVARIANCE": 

287 # Fit the measured covariances vs mean signal to 

288 # the Astier+19 full model (Eq. 20). Before that 

289 # do a preliminary fit to the variance (C_00) vs mean 

290 # signal (mu) curve using the EXPAPPROXIMATION model 

291 # (Eq. 16 in Astier+19) in order to 

292 # get the flat pairs that are masked. The 

293 # points at these fluxes will also be masked when 

294 # calculating the other elements of the covariance 

295 # matrix, C_ij, i!=j). 

296 

297 # Preliminary fit, usign a temp dataset to get the mask 

298 tempDatasetPtc = copy.copy(datasetPtc) 

299 tempDatasetPtc.ptcFitType = "EXPAPPROXIMATION" 

300 tempDatasetPtc = self.fitMeasurementsToModel(tempDatasetPtc) 

301 

302 # "FULLCOVARIANCE", using the mask obtained from the 

303 # previous fit. 

304 for ampName in datasetPtc.ampNames: 

305 datasetPtc.expIdMask[ampName] = tempDatasetPtc.expIdMask[ampName] 

306 datasetPtc.fitType = "FULLCOVARIANCE" 

307 datasetPtc = self.fitMeasurementsToModel(datasetPtc) 

308 # The other options are: self.config.ptcFitType in 

309 # ("EXPAPPROXIMATION", "POLYNOMIAL") 

310 else: 

311 # Fit the PTC to a polynomial or to Astier+19 exponential 

312 # approximation (Eq. 16). Fill up 

313 # PhotonTransferCurveDataset object. 

314 datasetPtc = self.fitMeasurementsToModel(datasetPtc) 

315 

316 if camera: 

317 detector = camera[detId] 

318 else: 

319 detector = None 

320 datasetPtc.updateMetadataFromExposures(inputCovariances) 

321 datasetPtc.updateMetadata(setDate=True, camera=camera, detector=detector) 

322 

323 return pipeBase.Struct( 

324 outputPtcDataset=datasetPtc, 

325 ) 

326 

327 def fitMeasurementsToModel(self, dataset): 

328 """Fit the measured covariances vs mean signal to a 

329 polynomial or one of the models in Astier+19 

330 (Eq. 16 or Eq.20). 

331 

332 Parameters 

333 ---------- 

334 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

335 The dataset containing information such as the means, 

336 (co)variances, and exposure times. 

337 

338 Returns 

339 ------- 

340 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

341 This is the same dataset as the input parameter, however, 

342 it has been modified to include information such as the 

343 fit vectors and the fit parameters. See the class 

344 `PhotonTransferCurveDatase`. 

345 """ 

346 fitType = dataset.ptcFitType 

347 if fitType in ["FULLCOVARIANCE", ]: 

348 # This model uses the full covariance matrix in the fit. 

349 # The PTC is technically defined as variance vs signal, 

350 # with variance = Cov_00 

351 dataset = self.fitDataFullCovariance(dataset) 

352 elif fitType in ["POLYNOMIAL", "EXPAPPROXIMATION"]: 

353 # The PTC is technically defined as variance vs signal 

354 dataset = self.fitPtc(dataset) 

355 else: 

356 raise RuntimeError( 

357 f"Fitting option {fitType} not one of " 

358 "'POLYNOMIAL', 'EXPAPPROXIMATION', or 'FULLCOVARIANCE'" 

359 ) 

360 

361 return dataset 

362 

363 def fitDataFullCovariance(self, dataset): 

364 """Fit measured flat covariances to the full model in 

365 Astier+19 (Eq. 20). 

366 

367 Parameters 

368 ---------- 

369 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

370 The dataset containing information such as the means, 

371 (co)variances, and exposure times. 

372 

373 Returns 

374 ------- 

375 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

376 This is the same dataset as the input parameter, however, 

377 it has been modified to include information such as the 

378 fit vectors and the fit parameters. See the class 

379 `PhotonTransferCurveDatase`. 

380 

381 Notes 

382 ----- 

383 The parameters of the full model for C_ij(mu) ("C_ij" and "mu" 

384 in ADU^2 and ADU, respectively) in Astier+19 (Eq. 20) are: 

385 

386 - "a" coefficients (r by r matrix), units: 1/e 

387 - "b" coefficients (r by r matrix), units: 1/e 

388 - noise matrix (r by r matrix), units: e^2 

389 - gain, units: e/ADU 

390 

391 "b" appears in Eq. 20 only through the "ab" combination, which 

392 is defined in this code as "c=ab". 

393 

394 Total number of parameters: #entries(a) + #entries(c) + #entries(noise) 

395 + 1. This is equivalent to r^2 + r^2 + r^2 + 1, where "r" is the 

396 maximum lag considered for the covariances calculation, and the 

397 extra "1" is the gain. If "b" is 0, then "c" is 0, and len(pInit) will 

398 have r^2 fewer entries. 

399 """ 

400 matrixSide = self.config.maximumRangeCovariancesAstier 

401 lenParams = matrixSide*matrixSide 

402 

403 for ampName in dataset.ampNames: 

404 lenInputTimes = len(dataset.rawExpTimes[ampName]) 

405 # Not used when ptcFitType is 'FULLCOVARIANCE' 

406 dataset.ptcFitPars[ampName] = [np.nan] 

407 dataset.ptcFitParsError[ampName] = [np.nan] 

408 dataset.ptcFitChiSq[ampName] = np.nan 

409 

410 if ampName in dataset.badAmps: 

411 # Bad amp 

412 # Entries need to have proper dimensions so read/write 

413 # with astropy.Table works. 

414 nanMatrix = np.full((matrixSide, matrixSide), np.nan) 

415 listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan) 

416 dataset.covariancesModel[ampName] = listNanMatrix 

417 dataset.covariancesSqrtWeights[ampName] = listNanMatrix 

418 dataset.aMatrix[ampName] = nanMatrix 

419 dataset.bMatrix[ampName] = nanMatrix 

420 dataset.covariancesModelNoB[ampName] = listNanMatrix 

421 dataset.aMatrixNoB[ampName] = nanMatrix 

422 

423 dataset.expIdMask[ampName] = np.repeat(False, lenInputTimes) 

424 dataset.gain[ampName] = np.nan 

425 dataset.gainErr[ampName] = np.nan 

426 dataset.noise[ampName] = np.nan 

427 dataset.noiseErr[ampName] = np.nan 

428 dataset.finalVars[ampName] = np.repeat(np.nan, lenInputTimes) 

429 dataset.finalModelVars[ampName] = np.repeat(np.nan, lenInputTimes) 

430 dataset.finalMeans[ampName] = np.repeat(np.nan, lenInputTimes) 

431 continue 

432 

433 muAtAmp = dataset.rawMeans[ampName] 

434 maskAtAmp = dataset.expIdMask[ampName] 

435 if len(maskAtAmp) == 0: 

436 maskAtAmp = np.repeat(True, len(muAtAmp)) 

437 

438 muAtAmp = muAtAmp[maskAtAmp] 

439 covAtAmp = np.nan_to_num(dataset.covariances[ampName])[maskAtAmp] 

440 covSqrtWeightsAtAmp = np.nan_to_num(dataset.covariancesSqrtWeights[ampName])[maskAtAmp] 

441 

442 # Initial fit, to approximate parameters, with c=0 

443 a0, c0, noise0, gain0 = self.initialFitFullCovariance(muAtAmp, covAtAmp, covSqrtWeightsAtAmp) 

444 

445 # Fit full model (Eq. 20 of Astier+19) and same model with 

446 # b=0 (c=0 in this code) 

447 pInit = np.concatenate((a0.flatten(), c0.flatten(), noise0.flatten(), np.array(gain0)), axis=None) 

448 functionsDict = {'fullModel': self.funcFullCovarianceModel, 

449 'fullModelNoB': self.funcFullCovarianceModelNoB} 

450 fitResults = {'fullModel': {'a': [], 'c': [], 'noise': [], 'gain': [], 'paramsErr': []}, 

451 'fullModelNoB': {'a': [], 'c': [], 'noise': [], 'gain': [], 'paramsErr': []}} 

452 for key in functionsDict: 

453 params, paramsErr, _ = fitLeastSq(pInit, muAtAmp, 

454 covAtAmp.flatten(), functionsDict[key], 

455 weightsY=covSqrtWeightsAtAmp.flatten()) 

456 a = params[:lenParams].reshape((matrixSide, matrixSide)) 

457 c = params[lenParams:2*lenParams].reshape((matrixSide, matrixSide)) 

458 noise = params[2*lenParams:3*lenParams].reshape((matrixSide, matrixSide)) 

459 gain = params[-1] 

460 

461 fitResults[key]['a'] = a 

462 fitResults[key]['c'] = c 

463 fitResults[key]['noise'] = noise 

464 fitResults[key]['gain'] = gain 

465 fitResults[key]['paramsErr'] = paramsErr 

466 

467 # Put the information in the PTC dataset 

468 

469 # Not used when ptcFitType is 'FULLCOVARIANCE' 

470 dataset.ptcFitPars[ampName] = [np.nan] 

471 dataset.ptcFitParsError[ampName] = [np.nan] 

472 dataset.ptcFitChiSq[ampName] = np.nan 

473 

474 # Save full covariances, covariances models, and their weights. 

475 # dataset.expIdMask is already full, but needs to be 

476 # converted to bool. 

477 dataset.expIdMask[ampName] = np.array(dataset.expIdMask[ampName], dtype=bool) 

478 dataset.covariances[ampName] = covAtAmp 

479 dataset.covariancesModel[ampName] = self.evalCovModel(muAtAmp, 

480 fitResults['fullModel']['a'], 

481 fitResults['fullModel']['c'], 

482 fitResults['fullModel']['noise'], 

483 fitResults['fullModel']['gain']) 

484 dataset.covariancesSqrtWeights[ampName] = covSqrtWeightsAtAmp 

485 dataset.aMatrix[ampName] = fitResults['fullModel']['a'] 

486 dataset.bMatrix[ampName] = fitResults['fullModel']['c']/fitResults['fullModel']['a'] 

487 dataset.covariancesModelNoB[ampName] = self.evalCovModel(muAtAmp, 

488 fitResults['fullModelNoB']['a'], 

489 fitResults['fullModelNoB']['c'], 

490 fitResults['fullModelNoB']['noise'], 

491 fitResults['fullModelNoB']['gain'], 

492 setBtoZero=True) 

493 dataset.aMatrixNoB[ampName] = fitResults['fullModelNoB']['a'] 

494 dataset.gain[ampName] = fitResults['fullModel']['gain'] 

495 dataset.gainErr[ampName] = fitResults['fullModel']['paramsErr'][-1] 

496 readoutNoise = fitResults['fullModel']['noise'][0][0] 

497 readoutNoiseSqrt = np.sqrt(np.fabs(readoutNoise)) 

498 dataset.noise[ampName] = readoutNoise 

499 readoutNoiseSigma = fitResults['fullModel']['paramsErr'][2*lenParams] 

500 dataset.noiseErr[ampName] = 0.5*(readoutNoiseSigma/np.fabs(readoutNoise))*readoutNoiseSqrt 

501 dataset.finalVars[ampName] = covAtAmp[:, 0, 0] 

502 dataset.finalModelVars[ampName] = dataset.covariancesModel[ampName][:, 0, 0] 

503 dataset.finalMeans[ampName] = muAtAmp 

504 

505 return dataset 

506 

507 def initialFitFullCovariance(self, mu, cov, sqrtW): 

508 """ Performs a crude parabolic fit of the data in order to start 

509 the full fit close to the solution, setting b=0 (c=0) in Eq. 20 

510 of Astier+19. 

511 

512 Parameters 

513 ---------- 

514 mu : `numpy.array`, (N,) 

515 Signal `mu` (ADU) 

516 cov : `numpy.array`, (N, M, M) 

517 Covariance arrays of size `(M, M)` (with 

518 `M = config.maximumRangeCovariancesAstier`), 

519 indexed by mean signal `mu`. 

520 sqrtW : `numpy.array`, (N,) 

521 Covariance weights, defined as 1./sqrt(Variances) 

522 

523 Returns 

524 ------- 

525 a : `numpy.array`, (M, M) 

526 "a" parameter per flux in Eq. 20 of Astier+19. 

527 c : `numpy.array`, (M, M) 

528 "c"="ab" parameter per flux in Eq. 20 of Astier+19. 

529 noise : `numpy.array`, (M, M) 

530 "noise" parameter per flux in Eq. 20 of Astier+19. 

531 gain : `float` 

532 Amplifier gain (e/ADU) 

533 """ 

534 matrixSide = self.config.maximumRangeCovariancesAstier 

535 

536 # Initialize fit parameters 

537 a = np.zeros((matrixSide, matrixSide)) 

538 c = np.zeros((matrixSide, matrixSide)) 

539 noise = np.zeros((matrixSide, matrixSide)) 

540 gain = 1. 

541 

542 # iterate the fit to account for higher orders 

543 # the chi2 does not necessarily go down, so one could 

544 # stop when it increases 

545 oldChi2 = 1e30 

546 for _ in range(5): 

547 model = np.nan_to_num(self.evalCovModel(mu, a, c, noise, gain, setBtoZero=True)) 

548 # loop on lags 

549 for i in range(matrixSide): 

550 for j in range(matrixSide): 

551 # fit a parabola for a given lag 

552 parsFit = np.polyfit(mu, cov[:, i, j] - model[:, i, j], 

553 2, w=sqrtW[:, i, j]) 

554 # model equation (Eq. 20) in Astier+19, with c=a*b=0: 

555 a[i, j] += parsFit[0] 

556 noise[i, j] += parsFit[2] 

557 if(i + j == 0): 

558 gain = 1./(1/gain+parsFit[1]) 

559 weightedRes = (model - cov)*sqrtW 

560 chi2 = (weightedRes.flatten()**2).sum() 

561 if chi2 > oldChi2: 

562 break 

563 oldChi2 = chi2 

564 

565 return a, c, noise, gain 

566 

567 def funcFullCovarianceModel(self, params, x): 

568 """Model to fit covariances from flat fields; Equation 20 of 

569 Astier+19. 

570 

571 Parameters 

572 ---------- 

573 params : `list` 

574 Parameters of the model: aMatrix, CMatrix, noiseMatrix, 

575 gain (e/ADU). 

576 x : `numpy.array`, (N,) 

577 Signal `mu` (ADU) 

578 

579 Returns 

580 ------- 

581 y : `numpy.array`, (N,) 

582 Covariance matrix. 

583 """ 

584 matrixSide = self.config.maximumRangeCovariancesAstier 

585 lenParams = matrixSide*matrixSide 

586 aMatrix = params[:lenParams].reshape((matrixSide, matrixSide)) 

587 cMatrix = params[lenParams:2*lenParams].reshape((matrixSide, matrixSide)) 

588 noiseMatrix = params[2*lenParams:3*lenParams].reshape((matrixSide, matrixSide)) 

589 gain = params[-1] 

590 

591 return self.evalCovModel(x, aMatrix, cMatrix, noiseMatrix, gain).flatten() 

592 

593 def funcFullCovarianceModelNoB(self, params, x): 

594 """Model to fit covariances from flat fields; Equation 20 of 

595 Astier+19, with b=0 (equivalent to c=a*b=0 in this code). 

596 

597 Parameters 

598 ---------- 

599 params : `list` 

600 Parameters of the model: aMatrix, CMatrix, noiseMatrix, 

601 gain (e/ADU). 

602 x : `numpy.array`, (N,) 

603 Signal mu (ADU) 

604 

605 Returns 

606 ------- 

607 y : `numpy.array`, (N,) 

608 Covariance matrix. 

609 """ 

610 matrixSide = self.config.maximumRangeCovariancesAstier 

611 lenParams = matrixSide*matrixSide 

612 aMatrix = params[:lenParams].reshape((matrixSide, matrixSide)) 

613 cMatrix = params[lenParams:2*lenParams].reshape((matrixSide, matrixSide)) 

614 noiseMatrix = params[2*lenParams:3*lenParams].reshape((matrixSide, matrixSide)) 

615 gain = params[-1] 

616 

617 return self.evalCovModel(x, aMatrix, cMatrix, noiseMatrix, gain, setBtoZero=True).flatten() 

618 

619 def evalCovModel(self, mu, aMatrix, cMatrix, noiseMatrix, gain, setBtoZero=False): 

620 """Computes full covariances model (Eq. 20 of Astier+19). 

621 

622 Parameters 

623 ---------- 

624 mu : `numpy.array`, (N,) 

625 List of mean signals. 

626 aMatrix : `numpy.array`, (M, M) 

627 "a" parameter per flux in Eq. 20 of Astier+19. 

628 cMatrix : `numpy.array`, (M, M) 

629 "c"="ab" parameter per flux in Eq. 20 of Astier+19. 

630 noiseMatrix : `numpy.array`, (M, M) 

631 "noise" parameter per flux in Eq. 20 of Astier+19. 

632 gain : `float` 

633 Amplifier gain (e/ADU) 

634 setBtoZero=False : `bool`, optional 

635 Set "b" parameter in full model (see Astier+19) to zero. 

636 

637 Returns 

638 ------- 

639 covModel : `numpy.array`, (N, M, M) 

640 Covariances model. 

641 

642 Notes 

643 ----- 

644 By default, computes the covModel for the mu's stored(self.mu). 

645 Returns cov[Nmu, M, M]. The variance for the PTC is 

646 cov[:, 0, 0]. mu and cov are in ADUs and ADUs squared. To use 

647 electrons for both, the gain should be set to 1. This routine 

648 implements the model in Astier+19 (1905.08677). 

649 The parameters of the full model for C_ij(mu) ("C_ij" and "mu" 

650 in ADU^2 and ADU, respectively) in Astier+19 (Eq. 20) are: 

651 

652 - "a" coefficients (M by M matrix), units: 1/e 

653 - "b" coefficients (M by M matrix), units: 1/e 

654 - noise matrix (M by M matrix), units: e^2 

655 - gain, units: e/ADU 

656 

657 "b" appears in Eq. 20 only through the "ab" combination, which 

658 is defined in this code as "c=ab". 

659 """ 

660 matrixSide = self.config.maximumRangeCovariancesAstier 

661 sa = (matrixSide, matrixSide) 

662 # pad a with zeros and symmetrize 

663 aEnlarged = np.zeros((int(sa[0]*1.5)+1, int(sa[1]*1.5)+1)) 

664 aEnlarged[0:sa[0], 0:sa[1]] = aMatrix 

665 aSym = symmetrize(aEnlarged) 

666 # pad c with zeros and symmetrize 

667 cEnlarged = np.zeros((int(sa[0]*1.5)+1, int(sa[1]*1.5)+1)) 

668 cEnlarged[0:sa[0], 0:sa[1]] = cMatrix 

669 cSym = symmetrize(cEnlarged) 

670 a2 = fftconvolve(aSym, aSym, mode='same') 

671 a3 = fftconvolve(a2, aSym, mode='same') 

672 ac = fftconvolve(aSym, cSym, mode='same') 

673 (xc, yc) = np.unravel_index(np.abs(aSym).argmax(), a2.shape) 

674 

675 a1 = aMatrix[np.newaxis, :, :] 

676 a2 = a2[np.newaxis, xc:xc + matrixSide, yc:yc + matrixSide] 

677 a3 = a3[np.newaxis, xc:xc + matrixSide, yc:yc + matrixSide] 

678 ac = ac[np.newaxis, xc:xc + matrixSide, yc:yc + matrixSide] 

679 c1 = cMatrix[np.newaxis, ::] 

680 

681 # assumes that mu is 1d 

682 bigMu = mu[:, np.newaxis, np.newaxis]*gain 

683 # c(=a*b in Astier+19) also has a contribution to the last 

684 # term, that is absent for now. 

685 if setBtoZero: 

686 c1 = np.zeros_like(c1) 

687 ac = np.zeros_like(ac) 

688 covModel = (bigMu/(gain*gain)*(a1*bigMu+2./3.*(bigMu*bigMu)*(a2 + c1) 

689 + (1./3.*a3 + 5./6.*ac)*(bigMu*bigMu*bigMu)) + noiseMatrix[np.newaxis, :, :]/gain**2) 

690 # add the Poisson term, and the read out noise (variance) 

691 covModel[:, 0, 0] += mu/gain 

692 

693 return covModel 

694 

695 # EXPAPPROXIMATION and POLYNOMIAL fit methods 

696 @staticmethod 

697 def _initialParsForPolynomial(order): 

698 assert(order >= 2) 

699 pars = np.zeros(order, dtype=float) 

700 pars[0] = 10 

701 pars[1] = 1 

702 pars[2:] = 0.0001 

703 return pars 

704 

705 @staticmethod 

706 def _boundsForPolynomial(initialPars, lowers=[], uppers=[]): 

707 if not len(lowers): 

708 lowers = [np.NINF for p in initialPars] 

709 if not len(uppers): 

710 uppers = [np.inf for p in initialPars] 

711 lowers[1] = 0 # no negative gains 

712 return (lowers, uppers) 

713 

714 @staticmethod 

715 def _boundsForAstier(initialPars, lowers=[], uppers=[]): 

716 if not len(lowers): 

717 lowers = [np.NINF for p in initialPars] 

718 if not len(uppers): 

719 uppers = [np.inf for p in initialPars] 

720 return (lowers, uppers) 

721 

722 @staticmethod 

723 def _getInitialGoodPoints(means, variances, minVarPivotSearch, consecutivePointsVarDecreases): 

724 """Return a boolean array to mask bad points. 

725 

726 Parameters 

727 ---------- 

728 means : `numpy.array` 

729 Input array with mean signal values. 

730 variances : `numpy.array` 

731 Input array with variances at each mean value. 

732 minVarPivotSearch : `float` 

733 The variance (in ADU^2), above which, the point 

734 of decreasing variance should be sought. 

735 consecutivePointsVarDecreases : `int` 

736 Required number of consecutive points/fluxes 

737 in the PTC where the variance 

738 decreases in order to find a first 

739 estimate of the PTC turn-off. 

740 

741 Returns 

742 ------ 

743 goodPoints : `numpy.array` [`bool`] 

744 Boolean array to select good (`True`) and bad (`False`) 

745 points. 

746 

747 Notes 

748 ----- 

749 Eliminate points beyond which the variance decreases. 

750 """ 

751 goodPoints = np.ones_like(means, dtype=bool) 

752 # Variances are sorted and should monotonically increase 

753 pivotList = np.where(np.array(np.diff(variances)) < 0)[0] 

754 if len(pivotList) > 0: 

755 # For small values, sometimes the variance decreases slightly 

756 # Only look when var > self.config.minVarPivotSearch 

757 pivotList = [p for p in pivotList if variances[p] > minVarPivotSearch] 

758 # Require that the varince decreases during 

759 # consecutivePointsVarDecreases 

760 # consecutive points. This will give a first 

761 # estimate of the PTC turn-off, which 

762 # may be updated (reduced) further in the code. 

763 if len(pivotList) > 1: 

764 # enumerate(pivotList) creates tuples (index, value), for 

765 # each value in pivotList. The lambda function subtracts 

766 # each value from the index. 

767 # groupby groups elements by equal key value. 

768 for k, g in groupby(enumerate(pivotList), lambda x: x[0]-x[1]): 

769 group = (map(itemgetter(1), g)) 

770 # Form groups of consecute values from pivotList 

771 group = list(map(int, group)) 

772 # values in pivotList are indices where np.diff(variances) 

773 # is negative, i.e., where the variance starts decreasing. 

774 # Find the first group of consecutive numbers when 

775 # variance decreases. 

776 if len(group) >= consecutivePointsVarDecreases: 

777 pivotIndex = np.min(group) 

778 goodPoints[pivotIndex+1:] = False 

779 break 

780 

781 # Finally, we filter out any infinities or NaNs. 

782 goodPoints[(~np.isfinite(means)) | (~np.isfinite(variances))] = False 

783 

784 return goodPoints 

785 

786 def _makeZeroSafe(self, array, substituteValue=1e-9): 

787 """""" 

788 array = np.array(array) 

789 nBad = Counter(np.ravel(array))[0] 

790 if nBad == 0: 

791 return array 

792 

793 index, = np.where(array == 0) 

794 if len(index): 

795 msg = f"Found {nBad} zeros in array at elements {index}" 

796 self.log.warning(msg) 

797 

798 array[index] = substituteValue 

799 

800 return array 

801 

802 def fitPtc(self, dataset): 

803 """Fit the photon transfer curve to a polynomial or to the 

804 Astier+19 approximation (Eq. 16). 

805 

806 Fit the photon transfer curve with either a polynomial of 

807 the order specified in the task config, or using the 

808 exponential approximation in Astier+19 (Eq. 16). 

809 

810 Sigma clipping is performed iteratively for the fit, as 

811 well as an initial clipping of data points that are more 

812 than `config.initialNonLinearityExclusionThreshold` away 

813 from lying on a straight line. This other step is necessary 

814 because the photon transfer curve turns over catastrophically 

815 at very high flux (because saturation 

816 drops the variance to ~0) and these far outliers cause the 

817 initial fit to fail, meaning the sigma cannot be calculated 

818 to perform the sigma-clipping. 

819 

820 Parameters 

821 ---------- 

822 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

823 The dataset containing the means, variances and 

824 exposure times. 

825 

826 Returns 

827 ------- 

828 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

829 This is the same dataset as the input parameter, however, 

830 it has been modified to include information such as the 

831 fit vectors and the fit parameters. See the class 

832 `PhotonTransferCurveDatase`. 

833 

834 Raises 

835 ------ 

836 RuntimeError 

837 Raised if dataset.ptcFitType is None or empty. 

838 """ 

839 if dataset.ptcFitType: 

840 ptcFitType = dataset.ptcFitType 

841 else: 

842 raise RuntimeError("ptcFitType is None of empty in PTC dataset.") 

843 matrixSide = self.config.maximumRangeCovariancesAstier 

844 nanMatrix = np.empty((matrixSide, matrixSide)) 

845 nanMatrix[:] = np.nan 

846 

847 for amp in dataset.ampNames: 

848 lenInputTimes = len(dataset.rawExpTimes[amp]) 

849 listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide)) 

850 listNanMatrix[:] = np.nan 

851 

852 dataset.covariancesModel[amp] = listNanMatrix 

853 dataset.aMatrix[amp] = nanMatrix 

854 dataset.bMatrix[amp] = nanMatrix 

855 dataset.covariancesModelNoB[amp] = listNanMatrix 

856 dataset.aMatrixNoB[amp] = nanMatrix 

857 

858 def errFunc(p, x, y): 

859 return ptcFunc(p, x) - y 

860 

861 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers 

862 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers 

863 

864 for i, ampName in enumerate(dataset.ampNames): 

865 timeVecOriginal = np.ravel(np.array(dataset.rawExpTimes[ampName])) 

866 meanVecOriginal = np.ravel(np.array(dataset.rawMeans[ampName])) 

867 varVecOriginal = np.ravel(np.array(dataset.rawVars[ampName])) 

868 varVecOriginal = self._makeZeroSafe(varVecOriginal) 

869 

870 # Discard points when the variance starts to decrease after two 

871 # consecutive signal levels 

872 goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal, 

873 self.config.minVarPivotSearch, 

874 self.config.consecutivePointsVarDecreases) 

875 

876 # Check if all points are bad from the 'cpExtractPtcTask' 

877 initialExpIdMask = np.ravel(np.array(dataset.expIdMask[ampName])) 

878 

879 if not (goodPoints.any() and initialExpIdMask.any()): 

880 msg = (f"SERIOUS: All points in goodPoints: {goodPoints} or " 

881 f"in initialExpIdMask: {initialExpIdMask} are bad." 

882 f"Setting {ampName} to BAD.") 

883 self.log.warning(msg) 

884 # Fill entries with NaNs 

885 self.fillBadAmp(dataset, ptcFitType, ampName) 

886 continue 

887 

888 # Save the point where the variance starts decreasing as the 

889 # PTC turnoff point 

890 ptcTurnoff = meanVecOriginal[goodPoints][-1] 

891 dataset.ptcTurnoff[ampName] = ptcTurnoff 

892 

893 mask = goodPoints 

894 

895 if ptcFitType == 'EXPAPPROXIMATION': 

896 ptcFunc = funcAstier 

897 parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noise^2 

898 # lowers and uppers obtained from BOT data studies by 

899 # C. Lage (UC Davis, 11/2020). 

900 if self.config.binSize > 1: 

901 bounds = self._boundsForAstier(parsIniPtc) 

902 else: 

903 bounds = self._boundsForAstier(parsIniPtc, lowers=[-1e-4, 0.5, -2000], 

904 uppers=[1e-4, 2.5, 2000]) 

905 if ptcFitType == 'POLYNOMIAL': 

906 ptcFunc = funcPolynomial 

907 parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1) 

908 bounds = self._boundsForPolynomial(parsIniPtc) 

909 

910 # Before bootstrap fit, do an iterative fit to get rid of outliers. 

911 # This further process of outlier rejection be skipped 

912 # if self.config.maxIterationsPtcOutliers = 0. 

913 # We already did some initial outlier rejection above in 

914 # self._getInitialGoodPoints. 

915 count = 1 

916 newMask = np.ones_like(meanVecOriginal, dtype=bool) 

917 pars = parsIniPtc 

918 while count <= maxIterationsPtcOutliers: 

919 # Note that application of the mask actually shrinks the array 

920 # to size rather than setting elements to zero (as we want) so 

921 # always update mask itself and re-apply to the original data 

922 meanTempVec = meanVecOriginal[mask] 

923 varTempVec = varVecOriginal[mask] 

924 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec)) 

925 pars = res.x 

926 

927 # change this to the original from the temp because 

928 # the masks are ANDed meaning once a point is masked 

929 # it's always masked, and the masks must always be the 

930 # same length for broadcasting 

931 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal) 

932 newMask = np.array([True if np.abs(r) < sigmaCutPtcOutliers else False for r in sigResids]) 

933 mask = mask & newMask 

934 if not (mask.any() and newMask.any()): 

935 msg = (f"SERIOUS: All points in either mask: {mask} or newMask: {newMask} are bad. " 

936 f"Setting {ampName} to BAD.") 

937 self.log.warning(msg) 

938 # Fill entries with NaNs 

939 self.fillBadAmp(dataset, ptcFitType, ampName) 

940 break 

941 nDroppedTotal = Counter(mask)[False] 

942 self.log.debug("Iteration %d: discarded %d points in total for %s", 

943 count, nDroppedTotal, ampName) 

944 count += 1 

945 # objects should never shrink 

946 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal)) 

947 if not (mask.any() and newMask.any()): 

948 continue 

949 dataset.expIdMask[ampName] = np.array(dataset.expIdMask[ampName]) 

950 # store the final mask 

951 if len(dataset.expIdMask[ampName]): 

952 dataset.expIdMask[ampName] &= mask # bitwise_and if there is already a mask 

953 else: 

954 dataset.expIdMask[ampName] = mask 

955 # In case there was a previous mask stored 

956 mask = dataset.expIdMask[ampName] 

957 parsIniPtc = pars 

958 meanVecFinal = meanVecOriginal[mask] 

959 varVecFinal = varVecOriginal[mask] 

960 

961 if Counter(mask)[False] > 0: 

962 self.log.info("Number of points discarded in PTC of amplifier %s:" 

963 " %d out of %d", ampName, Counter(mask)[False], len(meanVecOriginal)) 

964 

965 if (len(meanVecFinal) < len(parsIniPtc)): 

966 msg = (f"SERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of " 

967 f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.") 

968 self.log.warning(msg) 

969 # Fill entries with NaNs 

970 self.fillBadAmp(dataset, ptcFitType, ampName) 

971 continue 

972 # Fit the PTC. 

973 # The variance of the variance is Var(v)=2*v^2/Npix. This is 

974 # already calculated in `makeCovArray` of CpPtcExtract. 

975 # dataset.covariancesSqrtWeights[ampName][:,0,0] 

976 # has 1/sqrt(Var(v)). 

977 weightsY = dataset.covariancesSqrtWeights[ampName][:, 0, 0][mask] 

978 if self.config.doFitBootstrap: 

979 parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal, 

980 varVecFinal, ptcFunc, 

981 weightsY=weightsY) 

982 else: 

983 parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal, 

984 varVecFinal, ptcFunc, 

985 weightsY=weightsY) 

986 dataset.ptcFitPars[ampName] = parsFit 

987 dataset.ptcFitParsError[ampName] = parsFitErr 

988 dataset.ptcFitChiSq[ampName] = reducedChiSqPtc 

989 # Masked variances (measured and modeled) and means. Need 

990 # to pad the array so astropy.Table does not crash (the 

991 # mask may vary per amp). 

992 padLength = len(dataset.rawExpTimes[ampName]) - len(varVecFinal) 

993 dataset.finalVars[ampName] = np.pad(varVecFinal, (0, padLength), 'constant', 

994 constant_values=np.nan) 

995 dataset.finalModelVars[ampName] = np.pad(ptcFunc(parsFit, meanVecFinal), (0, padLength), 

996 'constant', constant_values=np.nan) 

997 dataset.finalMeans[ampName] = np.pad(meanVecFinal, (0, padLength), 'constant', 

998 constant_values=np.nan) 

999 if ptcFitType == 'EXPAPPROXIMATION': 

1000 ptcGain = parsFit[1] 

1001 ptcGainErr = parsFitErr[1] 

1002 ptcNoise = np.sqrt(np.fabs(parsFit[2])) 

1003 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2])) 

1004 if ptcFitType == 'POLYNOMIAL': 

1005 ptcGain = 1./parsFit[1] 

1006 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1]) 

1007 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain 

1008 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain 

1009 dataset.gain[ampName] = ptcGain 

1010 dataset.gainErr[ampName] = ptcGainErr 

1011 dataset.noise[ampName] = ptcNoise 

1012 dataset.noiseErr[ampName] = ptcNoiseErr 

1013 

1014 if not len(dataset.ptcFitType) == 0: 

1015 dataset.ptcFitType = ptcFitType 

1016 if len(dataset.badAmps) == 0: 

1017 dataset.badAmps = np.repeat(np.nan, len(list(dataset.rawExpTimes.values())[0])) 

1018 

1019 return dataset 

1020 

1021 def fillBadAmp(self, dataset, ptcFitType, ampName): 

1022 """Fill the dataset with NaNs if there are not enough 

1023 good points. 

1024 

1025 Parameters 

1026 ---------- 

1027 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

1028 The dataset containing the means, variances and 

1029 exposure times. 

1030 ptcFitType : {'POLYNOMIAL', 'EXPAPPROXIMATION'} 

1031 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or 

1032 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC. 

1033 ampName : `str` 

1034 Amplifier name. 

1035 """ 

1036 dataset.badAmps.append(ampName) 

1037 dataset.expIdMask[ampName] = np.repeat(False, len(dataset.rawExpTimes[ampName])) 

1038 dataset.gain[ampName] = np.nan 

1039 dataset.gainErr[ampName] = np.nan 

1040 dataset.noise[ampName] = np.nan 

1041 dataset.noiseErr[ampName] = np.nan 

1042 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if 

1043 ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3)) 

1044 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if 

1045 ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3)) 

1046 dataset.ptcFitChiSq[ampName] = np.nan 

1047 dataset.ptcTurnoff[ampName] = np.nan 

1048 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

1049 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

1050 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

1051 

1052 return