Coverage for python / lsst / cp / pipe / ptc / cpPtcSolve.py: 10%

585 statements  

« prev     ^ index     » next       coverage.py v7.13.5, created at 2026-05-05 18:52 +0000

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22import copy 

23import warnings 

24import numpy as np 

25from collections import Counter 

26from itertools import groupby 

27from operator import itemgetter 

28from scipy.signal import fftconvolve 

29from scipy.optimize import least_squares 

30from scipy.stats import median_abs_deviation 

31 

32import lsst.pex.config as pexConfig 

33import lsst.pipe.base as pipeBase 

34import lsst.pipe.base.connectionTypes as cT 

35from lsst.ip.isr import PhotonTransferCurveDataset 

36from lsst.cp.pipe.utils import (fitLeastSq, fitBootstrap, 

37 funcAstier, funcAstierWithRolloff, 

38 symmetrize, Pol2D, ampOffsetGainRatioFixup) 

39 

40from deprecated.sphinx import deprecated 

41 

42__all__ = ['PhotonTransferCurveSolveConfig', 'PhotonTransferCurveSolveTask'] 

43 

44 

45class PhotonTransferCurveSolveConnections(pipeBase.PipelineTaskConnections, 

46 dimensions=("instrument", "detector")): 

47 inputCovariances = cT.Input( 

48 name="ptcCovariances", 

49 doc="Tuple with measured covariances from flats.", 

50 storageClass="PhotonTransferCurveDataset", 

51 dimensions=("instrument", "exposure", "detector"), 

52 isCalibration=True, 

53 multiple=True, 

54 ) 

55 camera = cT.PrerequisiteInput( 

56 name="camera", 

57 doc="Camera the input data comes from.", 

58 storageClass="Camera", 

59 dimensions=("instrument",), 

60 isCalibration=True, 

61 ) 

62 outputPtcDataset = cT.Output( 

63 name="ptcDatsetProposal", 

64 doc="Output proposed ptc dataset.", 

65 storageClass="PhotonTransferCurveDataset", 

66 dimensions=("instrument", "detector"), 

67 multiple=False, 

68 isCalibration=True, 

69 ) 

70 

71 

72class PhotonTransferCurveSolveConfig(pipeBase.PipelineTaskConfig, 

73 pipelineConnections=PhotonTransferCurveSolveConnections): 

74 """Configuration for fitting measured covariances. 

75 """ 

76 

77 ptcFitType = pexConfig.ChoiceField( 

78 dtype=str, 

79 doc="Fit PTC to Eq. 16 or Eq. 20 in Astier+19.", 

80 default="EXPAPPROXIMATION", 

81 allowed={ 

82 "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).", 

83 "FULLCOVARIANCE_NO_B": "Full covariances model in Astier+19 (Eq. 15)", 

84 "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)", 

85 } 

86 ) 

87 minMeanSignal = pexConfig.DictField( 

88 keytype=str, 

89 itemtype=float, 

90 doc="Minimum values (inclusive) of mean signal (in adu) per amp to use." 

91 " The same cut is applied to all amps if this parameter [`dict`] is passed as " 

92 " {'ALL_AMPS': value}", 

93 default={'ALL_AMPS': 0.0}, 

94 ) 

95 maxMeanSignal = pexConfig.DictField( 

96 keytype=str, 

97 itemtype=float, 

98 doc="Maximum values (inclusive) of mean signal (in adu) below which to consider, per amp." 

99 " The same cut is applied to all amps if this dictionary is of the form" 

100 " {'ALL_AMPS': value}", 

101 default={'ALL_AMPS': 1e6}, 

102 ) 

103 maximumRangeCovariancesAstier = pexConfig.Field( 

104 dtype=int, 

105 doc="Maximum range of measured covariances as in Astier+19", 

106 default=10, 

107 ) 

108 maximumRangeCovariancesAstierFullCovFit = pexConfig.Field( 

109 dtype=int, 

110 doc="Maximum range up to where to fit covariances as in Astier+19, " 

111 "for the FULLCOVARIANCE model." 

112 "This is different from maximumRangeCovariancesAstier." 

113 "It should be less or equal than maximumRangeCovariancesAstier." 

114 "The number of parameters for this model is " 

115 "3*maximumRangeCovariancesAstierFullCovFit^2 + 1, so increase with care " 

116 "so that the fit is not too slow.", 

117 default=10, 

118 ) 

119 doSubtractLongRangeCovariances = pexConfig.Field( 

120 dtype=bool, 

121 doc="Subtract long-range covariances before FULLCOVARIANCE fit, " 

122 "beyond startLongRangeCovariances?", 

123 default=False, 

124 ) 

125 startLongRangeCovariances = pexConfig.Field( 

126 dtype=int, 

127 doc="If doSubtractLongRangeCovariances is True, subtract covariances " 

128 "beyond this range. It should be less than maximumRangeCovariancesAstier. ", 

129 default=4, 

130 ) 

131 polyDegLongRangeCovariances = pexConfig.Field( 

132 dtype=int, 

133 doc="If doSubtractLongRangeCovariances is True, polynomial " 

134 "degree to fit data beyond startLongRangeCovariances.", 

135 default=1, 

136 ) 

137 sigmaClipFullFitCovariancesAstier = pexConfig.Field( 

138 dtype=float, 

139 doc="sigma clip for full model fit for FULLCOVARIANCE ptcFitType ", 

140 default=5.0, 

141 ) 

142 maxIterFullFitCovariancesAstier = pexConfig.Field( 

143 dtype=int, 

144 doc="Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType", 

145 default=3, 

146 ) 

147 # TODO: DM-52720 - remove deprecated POLYNOMIAL fit 

148 # and legacy turnoff 

149 polynomialFitDegree = pexConfig.Field( 

150 dtype=int, 

151 doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.", 

152 default=3, 

153 deprecated="This field is no longer used. Will be removed after v30." 

154 ) 

155 doModelPtcRolloff = pexConfig.Field( 

156 dtype=bool, 

157 doc="Model the roll-off in the PTC turnoff as a exponential decay.", 

158 default=False, 

159 ) 

160 maxPtcRolloffDeviation = pexConfig.Field( 

161 dtype=float, 

162 doc="Maximum percent difference between the model with saturation rolloff and the " 

163 "model without to set the PTC turnoff. Only used if doModelPtcRolloff=True.", 

164 default=0.01, 

165 ) 

166 # TODO: DM-52720 - remove deprecated POLYNOMIAL fit 

167 # and legacy turnoff 

168 doLegacyTurnoffSelection = pexConfig.Field( 

169 dtype=bool, 

170 doc="Use 'legacy' computation for PTC turnoff selection. If set " 

171 "to False, then the KS test p-value selection will be used instead.", 

172 default=False, 

173 deprecated="This option has been deprecated and will be removed after v30.", 

174 ) 

175 sigmaCutPtcOutliers = pexConfig.Field( 

176 dtype=float, 

177 doc="Sigma cut for outlier rejection in PTC.", 

178 default=5.0, 

179 ) 

180 maxIterationsPtcOutliers = pexConfig.RangeField( 

181 dtype=int, 

182 doc="Maximum number of iterations for outlier rejection in PTC.", 

183 default=2, 

184 min=0 

185 ) 

186 maxSignalInitialPtcOutlierFit = pexConfig.Field( 

187 dtype=float, 

188 doc="Maximum signal considered for intial outlier fit. This should be below " 

189 "the PTC turnoff to ensure accurate outlier rejection. If " 

190 "scaleMaxSignalInitialPtcOutlierFit=True then the units are electrons; " 

191 "otherwise adu.", 

192 default=50_000., 

193 ) 

194 maxDeltaInitialPtcOutlierFit = pexConfig.Field( 

195 dtype=float, 

196 doc="If there are any outliers in the initial fit that have mean greater than " 

197 "maxSignalInitialPtcOutlierFit, then no points that have this delta " 

198 "mean from the previous ``good`` point are allowed. If " 

199 "scaleMaxSignalInitialPtcOutlierFit=True then the units are electrons; " 

200 "otherwise adu.", 

201 default=9_000., 

202 ) 

203 extendRollofSearchMaskSizeAdu = pexConfig.Field( 

204 dtype=float, 

205 doc="The amount to extend the mask beyond the initial PTC turnoff to fit " 

206 "the rolloff. Only used if doModelPtcRolloff=True (units: adu). ", 

207 default=5_000., 

208 ) 

209 scaleMaxSignalInitialPtcOutlierFit = pexConfig.Field( 

210 dtype=bool, 

211 doc="Scale maxSignalInitialPtcOutlierFit and maxDeltaInitialPtcOutlierFit " 

212 "by approximate gain? If yes then " 

213 "maxSignalInitialPtcOutlierFit and maxDeltaInitialPtcOutlierFit " 

214 "are assumed to have units of electrons, otherwise adu.", 

215 default=True, 

216 ) 

217 # TODO: DM-52720 - remove deprecated POLYNOMIAL fit 

218 # and legacy turnoff 

219 minVarPivotSearch = pexConfig.Field( 

220 dtype=float, 

221 doc="The code looks for a pivot signal point after which the variance starts decreasing at high-flux" 

222 " to exclude then from the PTC model fit. However, sometimes at low fluxes, the variance" 

223 " decreases slightly. Set this variable for the variance value, in adu^2, after which the pivot " 

224 " should be sought. Only used if doLegacyTurnoffSelection is True.", 

225 default=10000, 

226 deprecated="This option has been deprecated and will be removed after v30.", 

227 ) 

228 # TODO: DM-52720 - remove deprecated POLYNOMIAL fit 

229 # and legacy turnoff 

230 consecutivePointsVarDecreases = pexConfig.RangeField( 

231 dtype=int, 

232 doc="Required number of consecutive points/fluxes in the PTC where the variance " 

233 "decreases in order to find a first estimate of the PTC turn-off. " 

234 "Only used if doLegacyTurnoffSelection is True.", 

235 default=2, 

236 min=2, 

237 deprecated="This option has been deprecated and will be removed after v30.", 

238 ) 

239 ksTestMinPvalue = pexConfig.Field( 

240 dtype=float, 

241 doc="Minimum value of the Gaussian histogram KS test p-value to be used in PTC fit. ", 

242 default=0.01, 

243 ) 

244 doFitBootstrap = pexConfig.Field( 

245 dtype=bool, 

246 doc="Use bootstrap for the PTC fit parameters and errors?.", 

247 default=False, 

248 ) 

249 binSize = pexConfig.Field( 

250 dtype=int, 

251 doc="Bin the image by this factor in both dimensions.", 

252 default=1, 

253 ) 

254 # TODO: DM-52720 - remove deprecated POLYNOMIAL fit 

255 # and legacy turnoff 

256 doAmpOffsetGainRatioFixup = pexConfig.Field( 

257 dtype=bool, 

258 doc="Do gain ratio fixup based on amp offsets?", 

259 default=False, 

260 deprecated="This option has been deprecated and will be removed after v30.", 

261 ) 

262 # TODO: DM-52720 - remove deprecated POLYNOMIAL fit 

263 # and legacy turnoff 

264 ampOffsetGainRatioMinAdu = pexConfig.Field( 

265 dtype=float, 

266 doc="Minimum number of adu to use for amp offset gain ratio fixup.", 

267 default=1000.0, 

268 deprecated="This option has been deprecated and will be removed after v30.", 

269 ) 

270 # TODO: DM-52720 - remove deprecated POLYNOMIAL fit 

271 # and legacy turnoff 

272 ampOffsetGainRatioMaxAdu = pexConfig.Field( 

273 dtype=float, 

274 doc="Maximum number of adu to use for amp offset gain ratio fixup.", 

275 default=20000.0, 

276 deprecated="This option has been deprecated and will be removed after v30.", 

277 ) 

278 

279 def validate(self): 

280 super().validate() 

281 fitMatrixSide = self.maximumRangeCovariancesAstierFullCovFit 

282 measureMatrixSide = self.maximumRangeCovariancesAstier 

283 if self.ptcFitType == "FULLCOVARIANCE": 

284 if fitMatrixSide > measureMatrixSide: 

285 raise RuntimeError("Covariance fit size %s is larger than" 

286 "measurement size %s.", 

287 fitMatrixSide, measureMatrixSide) 

288 if self.doSubtractLongRangeCovariances: 

289 startLag = self.startLongRangeCovariances 

290 if measureMatrixSide < startLag: 

291 raise RuntimeError("Covariance measure size %s is smaller than long" 

292 "-range covariance starting point %s.", 

293 measureMatrixSide, startLag) 

294 

295 

296class PhotonTransferCurveSolveTask(pipeBase.PipelineTask): 

297 """Task to fit the PTC from flat covariances. 

298 

299 The first task of the PTC measurement pipeline, 

300 ``PhotonTransferCurveMeasureTask`` (and assumed to have been run 

301 before this task), produced a list of 

302 `~lsst.ip.isr.PhotonTransferCurveDataset` objects. Each dataset 

303 contains the mean signal and covariances of the 

304 difference image of the flat-field images taken at 

305 the same exposure time. The list also contains dummy 

306 datasets (with no measurements), whose purpose is to have 

307 the input and output dimensions of ``PhotonTransferCurveMeasureTask`` 

308 match. 

309 

310 This task, ``PhotonTransferCurveSolveTask``, assembles the list 

311 of individual PTC datasets produced 

312 by ``PhotonTransferCurveMeasureTask`` into one single final PTC 

313 dataset, discarding the dummy datset as appropiate. 

314 The task fits the measured (co)variances to one of three models: 

315 any of the models described in equations 16 and 20 of Astier+19 and 

316 equation 20 with specifically fixed to 0. These options are referred 

317 to as ``EXPAPPROXIMATION``, ``FULLCOVARIANCE``, and ``FULLCOVARIANCE_NO_B`` 

318 in the configuration options of the task, respectively). 

319 Parameters of interest such as the gain and noise are derived 

320 from the fits. The ``FULLCOVARIANCE`` model is fitted to the 

321 full covariance data (as oppossed to the other two models, which 

322 are fit to the variance vs mean measurements only). 

323 

324 Astier+19: "The Shape of the Photon Transfer Curve 

325 of CCD sensors", arXiv:1905.08677 

326 """ 

327 

328 ConfigClass = PhotonTransferCurveSolveConfig 

329 _DefaultName = 'cpPhotonTransferCurveSolve' 

330 

331 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

332 """Ensure that the input and output dimensions are passed along. 

333 

334 Parameters 

335 ---------- 

336 butlerQC : `~lsst.daf.butler.QuantumContext` 

337 Butler to operate on. 

338 inputRefs : `~lsst.pipe.base.InputQuantizedConnection` 

339 Input data refs to load. 

340 ouptutRefs : `~lsst.pipe.base.OutputQuantizedConnection` 

341 Output data refs to persist. 

342 """ 

343 inputs = butlerQC.get(inputRefs) 

344 detId = inputRefs.inputCovariances[0].dataId['detector'] 

345 outputs = self.run(inputCovariances=inputs['inputCovariances'], camera=inputs['camera'], detId=detId) 

346 butlerQC.put(outputs, outputRefs) 

347 

348 def run(self, inputCovariances, camera=None, detId=0): 

349 """Fit measured covariances to different models. 

350 

351 Parameters 

352 ---------- 

353 inputCovariances : `list` [`lsst.ip.isr.PhotonTransferCurveDataset`] 

354 List of lsst.ip.isr.PhotonTransferCurveDataset datasets. 

355 camera : `lsst.afw.cameraGeom.Camera`, optional 

356 Input camera. 

357 detId : `int` 

358 Detector ID to locate the detector in the camera and 

359 populate the `lsst.ip.isr.PhotonTransferCurveDataset` 

360 metadata. 

361 Returns 

362 ------- 

363 results : `lsst.pipe.base.Struct` 

364 The resultins structure contains: 

365 

366 ``outputPtcDatset`` 

367 Final PTC dataset, containing information such as the 

368 means, variances, and exposure times 

369 (`lsst.ip.isr.PhotonTransferCurveDataset`). 

370 """ 

371 # Find the ampNames from a non-dummy ptc. 

372 ampNames = [] 

373 for partialPtcDataset in inputCovariances: 

374 if partialPtcDataset.ptcFitType != 'DUMMY': 

375 ampNames = partialPtcDataset.ampNames 

376 break 

377 

378 # Each amp may have a different min and max adu signal 

379 # specified in the config. 

380 maxMeanSignalDict = {ampName: 1e6 for ampName in ampNames} 

381 minMeanSignalDict = {ampName: 0.0 for ampName in ampNames} 

382 for ampName in ampNames: 

383 if 'ALL_AMPS' in self.config.maxMeanSignal: 

384 maxMeanSignalDict[ampName] = self.config.maxMeanSignal['ALL_AMPS'] 

385 elif ampName in self.config.maxMeanSignal: 

386 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[ampName] 

387 

388 if 'ALL_AMPS' in self.config.minMeanSignal: 

389 minMeanSignalDict[ampName] = self.config.minMeanSignal['ALL_AMPS'] 

390 elif ampName in self.config.minMeanSignal: 

391 minMeanSignalDict[ampName] = self.config.minMeanSignal[ampName] 

392 

393 # Assemble individual PTC datasets into a single PTC dataset. 

394 datasetPtc = PhotonTransferCurveDataset( 

395 ampNames=ampNames, 

396 ptcFitType=self.config.ptcFitType, 

397 covMatrixSide=self.config.maximumRangeCovariancesAstier, 

398 covMatrixSideFullCovFit=self.config.maximumRangeCovariancesAstierFullCovFit) 

399 

400 for partialPtcDataset in inputCovariances: 

401 # Ignore dummy datasets 

402 if partialPtcDataset.ptcFitType == 'DUMMY': 

403 continue 

404 

405 # Apply min/max masking to the partial PTC. 

406 for ampName in ampNames: 

407 rawMean = partialPtcDataset.rawMeans[ampName][0] 

408 rawVar = partialPtcDataset.rawVars[ampName][0] 

409 if (rawMean <= minMeanSignalDict[ampName]) or (rawMean >= maxMeanSignalDict[ampName]) \ 

410 or not np.isfinite(rawMean) or not np.isfinite(rawVar): 

411 partialPtcDataset.expIdMask[ampName][0] = False 

412 

413 kspValue = partialPtcDataset.kspValues[ampName][0] 

414 if kspValue < self.config.ksTestMinPvalue: 

415 partialPtcDataset.expIdMask[ampName][0] = False 

416 

417 # Append to the full PTC. 

418 datasetPtc.appendPartialPtc(partialPtcDataset) 

419 

420 # Sort arrays that are filled so far in the final dataset by 

421 # rawMeans index. 

422 # First compute the mean across all the amps to make sure that they are 

423 # all sorted the same way. 

424 detectorMeans = np.zeros(len(datasetPtc.inputExpIdPairs[ampNames[0]])) 

425 

426 for i in range(len(detectorMeans)): 

427 arr = np.array([datasetPtc.rawMeans[ampName][i] for ampName in ampNames]) 

428 good, = (np.isfinite(arr)).nonzero() 

429 if good.size == 0: 

430 detectorMeans[i] = np.nan 

431 else: 

432 detectorMeans[i] = np.mean(arr[good]) 

433 

434 # Make sure this is sorted 

435 index = np.argsort(detectorMeans) 

436 datasetPtc.sort(index) 

437 

438 # Always start with an initial EXPAPPROXIMATION fit 

439 # Fit the measured covariances vs mean signal to 

440 # the Astier+19 full model (Eq. 20). Before that 

441 # do a preliminary fit to the variance (C_00) vs mean 

442 # signal (mu) curve using the EXPAPPROXIMATION model 

443 # (Eq. 16 in Astier+19) in order to 

444 # get the flat pairs that are masked. The 

445 # points at these fluxes will also be masked when 

446 # calculating the other elements of the covariance 

447 # matrix, C_ij, i!=j). 

448 tempDatasetPtc = copy.copy(datasetPtc) 

449 tempDatasetPtc.ptcFitType = "EXPAPPROXIMATION" 

450 tempDatasetPtc = self.fitPtc(tempDatasetPtc, computePtcTurnoff=True) 

451 

452 # Model the PTC rolloff 

453 if self.config.doModelPtcRolloff: 

454 tempDatasetPtc = self.fitPtcRolloff(tempDatasetPtc) 

455 

456 # Do the final fit 

457 # The turnoff was already computed during 

458 # the initial fit. 

459 tempDatasetPtc.fitType = self.config.ptcFitType 

460 if self.config.ptcFitType == "EXPAPPROXIMATION": 

461 # Fit with the exponential approximation 

462 datasetPtc = self.fitPtc(tempDatasetPtc, computePtcTurnoff=False) 

463 elif self.config.ptcFitType in ["FULLCOVARIANCE", "FULLCOVARIANCE_NO_B"]: 

464 # Fit with the full covariance model (with/without b) 

465 # This does not change the PTC turnoff. 

466 tempDatasetPtc.ptcFitType = self.config.ptcFitType 

467 datasetPtc = self.fitDataFullCovariance(tempDatasetPtc) 

468 else: 

469 # This fit type is not recognized 

470 raise RuntimeError( 

471 f"Fitting option {self.config.ptcFitType} not one of " 

472 "'EXPAPPROXIMATION', 'FULLCOVARIANCE', or 'FULLCOVARIANCE_NO_B'" 

473 ) 

474 

475 # Initial validation of PTC fit. 

476 for ampName in ampNames: 

477 # These may be all nan (esp. in tests) and will be filtered 

478 # as appropriate later. 

479 with warnings.catch_warnings(): 

480 warnings.simplefilter("ignore") 

481 overscanNoise = np.nanmedian(datasetPtc.noiseList[ampName]) # adu 

482 overscanNoise *= datasetPtc.gain[ampName] # now in electron 

483 noiseFitted = datasetPtc.noise[ampName] # electron 

484 

485 # Check if noise is close to noiseFitted 

486 if not np.isclose(noiseFitted, overscanNoise, rtol=0.05, atol=0.0, equal_nan=True): 

487 self.log.warning(f"Read noise from PTC fit ({noiseFitted}) is not consistent " 

488 f"with read noise measured from overscan ({overscanNoise}) for " 

489 f"amplifier {ampName}. Try adjusting the fit range.") 

490 

491 # Do amp-offset gain ratio fixup if configured. 

492 if self.config.doAmpOffsetGainRatioFixup: 

493 ampOffsetGainRatioFixup( 

494 datasetPtc, 

495 self.config.ampOffsetGainRatioMinAdu, 

496 self.config.ampOffsetGainRatioMaxAdu, 

497 log=self.log, 

498 ) 

499 

500 if camera: 

501 detector = camera[detId] 

502 else: 

503 detector = None 

504 datasetPtc.updateMetadataFromExposures(inputCovariances) 

505 datasetPtc.updateMetadata(setDate=True, camera=camera, detector=detector) 

506 

507 for ampName in ampNames: 

508 datasetPtc.overscanMedian[ampName] = np.nanmedian( 

509 datasetPtc.overscanMedianLevelList[ampName][datasetPtc.expIdMask[ampName]] 

510 ) 

511 datasetPtc.overscanMedianSigma[ampName] = median_abs_deviation( 

512 datasetPtc.overscanMedianLevelList[ampName][datasetPtc.expIdMask[ampName]], 

513 scale="normal", 

514 ) 

515 

516 return pipeBase.Struct( 

517 outputPtcDataset=datasetPtc, 

518 ) 

519 

520 def fitDataFullCovariance(self, dataset): 

521 """Fit measured flat covariances to the full model in 

522 Astier+19 (Eq. 20). 

523 

524 Parameters 

525 ---------- 

526 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

527 The dataset containing information such as the means, 

528 (co)variances, and exposure times. 

529 

530 Returns 

531 ------- 

532 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

533 This is the same dataset as the input parameter, however, 

534 it has been modified to include information such as the 

535 fit vectors and the fit parameters. See the class 

536 `PhotonTransferCurveDatase`. 

537 

538 Notes 

539 ----- 

540 The parameters of the full model for C_ij(mu) ("C_ij" and "mu" 

541 in adu^2 and adu, respectively) in Astier+19 (Eq. 20) are: 

542 

543 - "a" coefficients (r by r matrix), units: 1/electron 

544 - "b" coefficients (r by r matrix), units: 1/electron 

545 - noise matrix (r by r matrix), units: electron^2 

546 - gain, units: electron/adu 

547 

548 "b" appears in Eq. 20 only through the "ab" combination, which 

549 is defined in this code as "c=ab". 

550 

551 Total number of parameters: #entries(a) + #entries(c) + #entries(noise) 

552 + 1. This is equivalent to r^2 + r^2 + r^2 + 1, where "r" is the 

553 maximum lag considered for the covariances calculation, and the 

554 extra "1" is the gain. If "b" is 0, then "c" is 0, and len(pInit) will 

555 have r^2 fewer entries. 

556 """ 

557 matrixSide = dataset.covMatrixSide 

558 matrixSideFit = dataset.covMatrixSideFullCovFit 

559 lenParams = matrixSideFit*matrixSideFit 

560 

561 for ampName in dataset.ampNames: 

562 lenInputTimes = len(dataset.rawExpTimes[ampName]) 

563 # Not used when ptcFitType is 'FULLCOVARIANCE' 

564 dataset.ptcFitPars[ampName] = np.array([np.nan]) 

565 dataset.ptcFitParsError[ampName] = np.array([np.nan]) 

566 dataset.ptcFitChiSq[ampName] = np.nan 

567 

568 if ampName in dataset.badAmps: 

569 # Bad amp 

570 # Entries need to have proper dimensions so read/write 

571 # with astropy.Table works. 

572 nanMatrixFit = np.full((matrixSideFit, matrixSideFit), np.nan) 

573 listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan) 

574 listNanMatrixFit = np.full((lenInputTimes, matrixSideFit, matrixSideFit), np.nan) 

575 dataset.covariancesModel[ampName] = listNanMatrixFit 

576 dataset.covariancesSqrtWeights[ampName] = listNanMatrix 

577 dataset.aMatrix[ampName] = nanMatrixFit 

578 dataset.bMatrix[ampName] = nanMatrixFit 

579 dataset.noiseMatrix[ampName] = nanMatrixFit 

580 

581 dataset.expIdMask[ampName] = np.repeat(False, lenInputTimes) 

582 dataset.gain[ampName] = np.nan 

583 dataset.gainUnadjusted[ampName] = np.nan 

584 dataset.gainErr[ampName] = np.nan 

585 dataset.noise[ampName] = np.nan 

586 dataset.noiseErr[ampName] = np.nan 

587 dataset.finalVars[ampName] = np.repeat(np.nan, lenInputTimes) 

588 dataset.finalModelVars[ampName] = np.repeat(np.nan, lenInputTimes) 

589 dataset.finalMeans[ampName] = np.repeat(np.nan, lenInputTimes) 

590 continue 

591 

592 muAtAmp = dataset.rawMeans[ampName].copy() 

593 maskAtAmp = dataset.expIdMask[ampName] 

594 if len(maskAtAmp) == 0: 

595 maskAtAmp = np.repeat(True, len(muAtAmp)) 

596 

597 if np.sum(maskAtAmp) < matrixSide: 

598 self.log.warning("Not enough good points to fit PTC for amp %s.", ampName) 

599 

600 dataset.badAmps.append(ampName) 

601 nanMatrixFit = np.full((matrixSideFit, matrixSideFit), np.nan) 

602 listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan) 

603 listNanMatrixFit = np.full((lenInputTimes, matrixSideFit, matrixSideFit), np.nan) 

604 dataset.covariancesModel[ampName] = listNanMatrixFit 

605 dataset.covariancesSqrtWeights[ampName] = listNanMatrix 

606 dataset.aMatrix[ampName] = nanMatrixFit 

607 dataset.bMatrix[ampName] = nanMatrixFit 

608 dataset.noiseMatrix[ampName] = nanMatrixFit 

609 

610 dataset.expIdMask[ampName] = np.repeat(False, lenInputTimes) 

611 dataset.gain[ampName] = np.nan 

612 dataset.gainUnadjusted[ampName] = np.nan 

613 dataset.gainErr[ampName] = np.nan 

614 dataset.noise[ampName] = np.nan 

615 dataset.noiseErr[ampName] = np.nan 

616 dataset.finalVars[ampName] = np.repeat(np.nan, lenInputTimes) 

617 dataset.finalModelVars[ampName] = np.repeat(np.nan, lenInputTimes) 

618 dataset.finalMeans[ampName] = np.repeat(np.nan, lenInputTimes) 

619 continue 

620 

621 muAtAmpMasked = muAtAmp[maskAtAmp] 

622 covAtAmp = dataset.covariances[ampName] 

623 covAtAmpMasked = np.nan_to_num(covAtAmp)[maskAtAmp] 

624 covSqrtWeightsAtAmp = dataset.covariancesSqrtWeights[ampName] 

625 covSqrtWeightsAtAmpMasked = np.nan_to_num(covSqrtWeightsAtAmp)[maskAtAmp] 

626 

627 # Subtract long-range covariances 

628 if self.config.doSubtractLongRangeCovariances: 

629 startLag = self.config.startLongRangeCovariances 

630 covAtAmpMasked, covSqrtWeightsAtAmpMasked = self.subtractDistantOffset( 

631 muAtAmpMasked, covAtAmpMasked, 

632 covSqrtWeightsAtAmpMasked, 

633 start=startLag, 

634 degree=self.config.polyDegLongRangeCovariances) 

635 

636 # In principle, we could fit to a lag smaller than the measured 

637 # covariances. 

638 r = self.config.maximumRangeCovariancesAstierFullCovFit 

639 covAtAmpForFitMasked = covAtAmpMasked[:, :r, :r] 

640 covSqrtWeightsAtAmpForFitMasked = covSqrtWeightsAtAmpMasked[:, :r, :r] 

641 

642 # Initial fit, to approximate parameters, with c=0 

643 a0, c0, noiseMatrix0, gain0 = self.initialFitFullCovariance( 

644 muAtAmpMasked, 

645 covAtAmpForFitMasked, 

646 covSqrtWeightsAtAmpForFitMasked 

647 ) 

648 

649 # Fit full model (Eq. 20 of Astier+19) 

650 pInit = np.concatenate((a0.ravel(), c0.ravel(), noiseMatrix0.ravel(), np.array(gain0)), axis=None) 

651 

652 # Initialize empty results dictionary 

653 fitResults = {'a': [], 'c': [], 'noiseMatrix': [], 'gain': [], 'paramsErr': []} 

654 

655 # Pick the correct full covariance model function 

656 ptcModel = self.funcFullCovarianceModel 

657 if dataset.ptcFitType == "FULLCOVARIANCE_NO_B": 

658 ptcModel = self.funcFullCovarianceModelNoB 

659 pInit = np.concatenate((a0.ravel(), noiseMatrix0.ravel(), np.array(gain0)), axis=None) 

660 

661 params, paramsErr, _ = fitLeastSq( 

662 pInit, 

663 muAtAmpMasked, 

664 covAtAmpForFitMasked.ravel(), 

665 ptcModel, 

666 weightsY=covSqrtWeightsAtAmpForFitMasked.ravel(), 

667 ) 

668 

669 if dataset.ptcFitType == "FULLCOVARIANCE_NO_B": 

670 zeros = np.zeros_like(params[:lenParams]) 

671 params = np.insert(params, lenParams, zeros) 

672 paramsErr = np.insert(paramsErr, lenParams, zeros) 

673 

674 a = params[:lenParams].reshape((matrixSideFit, matrixSideFit)) 

675 c = params[lenParams:2*lenParams].reshape((matrixSideFit, matrixSideFit)) 

676 noiseMatrix = params[2*lenParams:3*lenParams].reshape((matrixSideFit, matrixSideFit)) 

677 gain = params[-1] 

678 

679 fitResults['a'] = a 

680 fitResults['c'] = c 

681 fitResults['noiseMatrix'] = noiseMatrix 

682 fitResults['gain'] = gain 

683 fitResults['paramsErr'] = paramsErr 

684 

685 # Put the information in the PTC dataset 

686 

687 # Not used when ptcFitType is 'FULLCOVARIANCE*' 

688 dataset.ptcFitPars[ampName] = np.array([np.nan]) 

689 dataset.ptcFitParsError[ampName] = np.array([np.nan]) 

690 dataset.ptcFitChiSq[ampName] = np.nan 

691 

692 # Save full covariances, covariances models, and their weights. 

693 # dataset.expIdMask is already full, but needs to be 

694 # converted to bool. 

695 dataset.expIdMask[ampName] = np.array(dataset.expIdMask[ampName], dtype=bool) 

696 dataset.covariances[ampName] = covAtAmp 

697 # We evaluate the covariance model everywhere, even the 

698 # masked amps. 

699 dataset.covariancesModel[ampName] = self.evalCovModel( 

700 muAtAmp, 

701 fitResults['a'], 

702 fitResults['c'], 

703 fitResults['noiseMatrix'], 

704 fitResults['gain'], 

705 setBtoZero=(dataset.ptcFitType == "FULLCOVARIANCE_NO_B"), 

706 ) 

707 dataset.covariancesSqrtWeights[ampName] = covSqrtWeightsAtAmp 

708 dataset.aMatrix[ampName] = fitResults['a'] 

709 dataset.bMatrix[ampName] = fitResults['c']/fitResults['a'] 

710 dataset.gain[ampName] = fitResults['gain'] 

711 dataset.gainUnadjusted[ampName] = fitResults['gain'] 

712 dataset.gainErr[ampName] = fitResults['paramsErr'][-1] 

713 readoutNoiseSquared = fitResults['noiseMatrix'][0][0] 

714 readoutNoise = np.sqrt(np.fabs(readoutNoiseSquared)) 

715 dataset.noise[ampName] = readoutNoise 

716 readoutNoiseSquaredSigma = fitResults['paramsErr'][2*lenParams] 

717 noiseErr = 0.5*(readoutNoiseSquaredSigma/np.fabs(readoutNoiseSquared))*readoutNoise 

718 dataset.noiseErr[ampName] = noiseErr 

719 dataset.noiseMatrix[ampName] = fitResults['noiseMatrix'] 

720 

721 dataset.finalVars[ampName] = covAtAmp[:, 0, 0].copy() 

722 dataset.finalVars[ampName][~maskAtAmp] = np.nan 

723 dataset.finalModelVars[ampName] = dataset.covariancesModel[ampName][:, 0, 0].copy() 

724 dataset.finalModelVars[ampName][~maskAtAmp] = np.nan 

725 dataset.finalMeans[ampName] = muAtAmp.copy() 

726 dataset.finalMeans[ampName][~maskAtAmp] = np.nan 

727 

728 return dataset 

729 

730 def initialFitFullCovariance(self, mu, cov, sqrtW): 

731 """ Performs a crude parabolic fit of the data in order to start 

732 the full fit close to the solution, setting b=0 (c=0) in Eq. 20 

733 of Astier+19. 

734 

735 Parameters 

736 ---------- 

737 mu : `numpy.array`, (N,) 

738 Signal `mu` (adu) 

739 cov : `numpy.array`, (N, M, M) 

740 Covariance arrays of size `(M, M)` (with 

741 `M = config.maximumRangeCovariancesAstier`), 

742 indexed by mean signal `mu`. 

743 sqrtW : `numpy.array`, (N,) 

744 Covariance weights, defined as 1./sqrt(Variances) 

745 

746 Returns 

747 ------- 

748 a : `numpy.array`, (M, M) 

749 "a" parameter per flux in Eq. 20 of Astier+19 

750 (units: 1/electron). 

751 c : `numpy.array`, (M, M) 

752 "c"="ab" parameter per flux in Eq. 20 of Astier+19. 

753 (units: 1/electron^2). 

754 noiseMatrix : `numpy.array`, (M, M) 

755 "noise" parameter per flux in Eq. 20 of Astier+19. 

756 (units: electron^2) 

757 gain : `float` 

758 Amplifier gain (electron/adu) 

759 """ 

760 matrixSideFit = self.config.maximumRangeCovariancesAstierFullCovFit 

761 

762 # Initialize fit parameters 

763 a = np.zeros((matrixSideFit, matrixSideFit)) 

764 c = np.zeros((matrixSideFit, matrixSideFit)) 

765 noiseMatrix = np.zeros((matrixSideFit, matrixSideFit)) 

766 gain = 1. 

767 

768 # iterate the fit to account for higher orders 

769 # the chi2 does not necessarily go down, so one could 

770 # stop when it increases 

771 oldChi2 = 1e30 

772 for _ in range(5): 

773 model = np.nan_to_num(self.evalCovModel(mu, a, c, noiseMatrix, gain, setBtoZero=True)) 

774 # loop on lags 

775 for i in range(matrixSideFit): 

776 for j in range(matrixSideFit): 

777 # fit a parabola for a given lag 

778 parsFit = np.polyfit(mu, cov[:, i, j] - model[:, i, j], 

779 2, w=sqrtW[:, i, j]) 

780 # model equation (Eq. 20) in Astier+19, with c=a*b=0: 

781 a[i, j] += parsFit[0] 

782 noiseMatrix[i, j] += parsFit[2] 

783 if i + j == 0: 

784 gain = 1./(1/gain+parsFit[1]) 

785 weightedRes = (model - cov)*sqrtW 

786 chi2 = (weightedRes.flatten()**2).sum() 

787 if chi2 > oldChi2: 

788 break 

789 oldChi2 = chi2 

790 

791 return a, c, noiseMatrix, gain 

792 

793 def funcFullCovarianceModel(self, params, x): 

794 """Model to fit covariances from flat fields; Equation 20 of 

795 Astier+19. 

796 

797 Parameters 

798 ---------- 

799 params : `list` 

800 Parameters of the model: aMatrix, CMatrix, noiseMatrix, 

801 gain (e/adu). 

802 x : `numpy.array`, (N,) 

803 Signal `mu` (adu) 

804 

805 Returns 

806 ------- 

807 y : `numpy.array`, (N,) 

808 Covariance matrix. 

809 """ 

810 matrixSideFit = self.config.maximumRangeCovariancesAstierFullCovFit 

811 lenParams = matrixSideFit*matrixSideFit 

812 aMatrix = params[:lenParams].reshape((matrixSideFit, matrixSideFit)) 

813 cMatrix = params[lenParams:2*lenParams].reshape((matrixSideFit, matrixSideFit)) 

814 noiseMatrix = params[2*lenParams:3*lenParams].reshape((matrixSideFit, matrixSideFit)) 

815 gain = params[-1] 

816 

817 return self.evalCovModel(x, aMatrix, cMatrix, noiseMatrix, gain).flatten() 

818 

819 def funcFullCovarianceModelNoB(self, params, x): 

820 """Model to fit covariances from flat fields; Equation 20 of 

821 Astier+19, with b=0 (equivalent to c=a*b=0 in this code). 

822 

823 Parameters 

824 ---------- 

825 params : `list` 

826 Parameters of the model: aMatrix, noiseMatrix, 

827 gain (e/adu). 

828 x : `numpy.array`, (N,) 

829 Signal mu (adu) 

830 

831 Returns 

832 ------- 

833 y : `numpy.array`, (N,) 

834 Covariance matrix. 

835 """ 

836 matrixSideFit = self.config.maximumRangeCovariancesAstierFullCovFit 

837 lenParams = matrixSideFit*matrixSideFit 

838 aMatrix = params[:lenParams].reshape((matrixSideFit, matrixSideFit)) 

839 cMatrix = np.zeros_like(aMatrix) 

840 noiseMatrix = params[lenParams:2*lenParams].reshape((matrixSideFit, matrixSideFit)) 

841 gain = params[-1] 

842 

843 return self.evalCovModel(x, aMatrix, cMatrix, noiseMatrix, gain, setBtoZero=True).flatten() 

844 

845 def evalCovModel(self, mu, aMatrix, cMatrix, noiseMatrix, gain, setBtoZero=False): 

846 """Computes full covariances model (Eq. 20 of Astier+19). 

847 

848 Parameters 

849 ---------- 

850 mu : `numpy.array`, (N,) 

851 List of mean signals (units: adu) 

852 aMatrix : `numpy.array`, (M, M) 

853 "a" parameter per flux in Eq. 20 of Astier+19 

854 (units: 1/electron) 

855 cMatrix : `numpy.array`, (M, M) 

856 "c"="ab" parameter per flux in Eq. 20 of Astier+19 

857 (units: 1/electron^2) 

858 noiseMatrix : `numpy.array`, (M, M) 

859 "noise" parameter per flux in Eq. 20 of Astier+19 

860 (units: electron^2) 

861 gain : `float` 

862 Amplifier gain (e/adu) 

863 setBtoZero=False : `bool`, optional 

864 Set "b" parameter in full model (see Astier+19) to zero. 

865 

866 Returns 

867 ------- 

868 covModel : `numpy.array`, (N, M, M) 

869 Covariances model. 

870 

871 Notes 

872 ----- 

873 By default, computes the covModel for the mu's stored(self.mu). 

874 Returns cov[Nmu, M, M]. The variance for the PTC is 

875 cov[:, 0, 0]. mu and cov are in adus and adus squared. To use 

876 electrons for both, the gain should be set to 1. This routine 

877 implements the model in Astier+19 (1905.08677). 

878 The parameters of the full model for C_ij(mu) ("C_ij" and "mu" 

879 in adu^2 and adu, respectively) in Astier+19 (Eq. 20) are: 

880 

881 - "a" coefficients (M by M matrix), units: 1/electron 

882 - "b" coefficients (M by M matrix), units: 1/electron 

883 - noise matrix (M by M matrix), units: electron^2 

884 - gain, units: electron/adu 

885 

886 "b" appears in Eq. 20 only through the "ab" combination, which 

887 is defined in this code as "c=ab". 

888 """ 

889 matrixSideFit = self.config.maximumRangeCovariancesAstierFullCovFit 

890 sa = (matrixSideFit, matrixSideFit) 

891 # pad a with zeros and symmetrize 

892 aEnlarged = np.zeros((int(sa[0]*1.5)+1, int(sa[1]*1.5)+1)) 

893 aEnlarged[0:sa[0], 0:sa[1]] = aMatrix 

894 aSym = symmetrize(aEnlarged) 

895 # pad c with zeros and symmetrize 

896 cEnlarged = np.zeros((int(sa[0]*1.5)+1, int(sa[1]*1.5)+1)) 

897 cEnlarged[0:sa[0], 0:sa[1]] = cMatrix 

898 cSym = symmetrize(cEnlarged) 

899 a2 = fftconvolve(aSym, aSym, mode='same') 

900 a3 = fftconvolve(a2, aSym, mode='same') 

901 ac = fftconvolve(aSym, cSym, mode='same') 

902 (xc, yc) = np.unravel_index(np.abs(aSym).argmax(), a2.shape) 

903 

904 a1 = aMatrix[np.newaxis, :, :] 

905 a2 = a2[np.newaxis, xc:xc + matrixSideFit, yc:yc + matrixSideFit] 

906 a3 = a3[np.newaxis, xc:xc + matrixSideFit, yc:yc + matrixSideFit] 

907 ac = ac[np.newaxis, xc:xc + matrixSideFit, yc:yc + matrixSideFit] 

908 c1 = cMatrix[np.newaxis, ::] 

909 

910 # assumes that mu is 1d 

911 bigMu = mu[:, np.newaxis, np.newaxis]*gain 

912 # c(=a*b in Astier+19) also has a contribution to the last 

913 # term, that is absent for now. 

914 if setBtoZero: 

915 c1 = np.zeros_like(c1) 

916 ac = np.zeros_like(ac) 

917 covModel = (bigMu/(gain*gain)*(a1*bigMu+2./3.*(bigMu*bigMu)*(a2 + c1) 

918 + (1./3.*a3 + 5./6.*ac)*(bigMu*bigMu*bigMu)) + noiseMatrix[np.newaxis, :, :]/gain**2) 

919 # add the Poisson term, and the read out noise (variance) 

920 covModel[:, 0, 0] += mu/gain 

921 

922 return covModel 

923 

924 def subtractDistantOffset(self, muAtAmpMasked, covAtAmpMasked, covSqrtWeightsAtAmpMasked, 

925 start, degree=1): 

926 """Subtract distant offset from the covariance matrices. 

927 

928 Parameters 

929 ---------- 

930 muAtAmpMasked : `numpy.array` 

931 Masked mean flux array for a particular amplifier. 

932 covAtAmpMasked : `numpy.array` 

933 Masked measured covariances for a particular amplifier. 

934 covSqrtWeightsAtAmpMasked : `numpy.array` 

935 Masked inverse covariance weights for a particular amplifier. 

936 start : int, optional 

937 The starting index to eliminate the core for the fit. 

938 degree : int, optional 

939 Degree of the polynomial fit. 

940 

941 Returns 

942 ------- 

943 covAtAmpMasked : `numpy.array` 

944 Subtracted measured covariances for a particular amplifier. 

945 covSqrtWeightsAtAmpMasked : `numpy.array` 

946 Masked inverse covariance weights for a particular amplifier. 

947 

948 Notes 

949 ----- 

950 Ported from https://gitlab.in2p3.fr/astier/bfptc by P. Astier. 

951 

952 This function subtracts a distant offset from the 

953 covariance matrices using polynomial fitting. The core 

954 of the matrices is eliminated for the fit. 

955 

956 The function modifies the internal state of the object, updating the 

957 covariance matrices and related attributes. 

958 """ 

959 for k in range(len(muAtAmpMasked)): 

960 # Make a copy because it will be altered 

961 w = np.copy(covSqrtWeightsAtAmpMasked[k, ...]) 

962 wShape = w.shape 

963 i, j = np.meshgrid(range(wShape[0]), range(wShape[1]), indexing='ij') 

964 

965 # Eliminate the core for the fit 

966 w[:start, :start] = 0 

967 

968 poly = Pol2D(i, j, covAtAmpMasked[k, ...], degree, w=w) 

969 back = poly.eval(i, j) 

970 

971 covAtAmpMasked[k, ...] -= back 

972 

973 return covAtAmpMasked, covSqrtWeightsAtAmpMasked 

974 

975 # EXPAPPROXIMATION fit method 

976 # TODO: DM-52720 - remove deprecated POLYNOMIAL fit 

977 # and legacy turnoff 

978 @staticmethod 

979 @deprecated(reason="POLYNOMIAL PTC fit is no longer supported. Will be removed after v30.", 

980 version="v30.0", category=FutureWarning) 

981 def _initialParsForPolynomial(order): 

982 assert order >= 2 

983 pars = np.zeros(order, dtype=float) 

984 pars[0] = 10 

985 pars[1] = 1 

986 pars[2:] = 0.0001 

987 return pars 

988 

989 @staticmethod 

990 # TODO: DM-52720 - remove deprecated POLYNOMIAL fit 

991 # and legacy turnoff 

992 @deprecated(reason="POLYNOMIAL PTC fit is no longer supported. Will be removed after v30.", 

993 version="v30.0", category=FutureWarning) 

994 def _boundsForPolynomial(initialPars, lowers=[], uppers=[]): 

995 if not len(lowers): 

996 lowers = [-np.inf for p in initialPars] 

997 if not len(uppers): 

998 uppers = [np.inf for p in initialPars] 

999 lowers[1] = 0 # no negative gains 

1000 return (lowers, uppers) 

1001 

1002 @staticmethod 

1003 def _boundsForAstier(initialPars, lowers=[], uppers=[]): 

1004 if not len(lowers): 

1005 lowers = [-np.inf for p in initialPars] 

1006 if not len(uppers): 

1007 uppers = [np.inf for p in initialPars] 

1008 return (lowers, uppers) 

1009 

1010 # TODO: DM-52720 - remove deprecated POLYNOMIAL fit 

1011 # and legacy turnoff 

1012 @staticmethod 

1013 @deprecated(reason="This is only used by doLegacyTurnoffSelection, which is deprecated, " 

1014 "so this deprecated too. Will be removed after v30.", 

1015 version="v30.0", category=FutureWarning) 

1016 def _getInitialGoodPoints(means, variances, minVarPivotSearch, consecutivePointsVarDecreases): 

1017 """Return a boolean array to mask bad points. 

1018 

1019 Parameters 

1020 ---------- 

1021 means : `numpy.array` 

1022 Input array with mean signal values. 

1023 variances : `numpy.array` 

1024 Input array with variances at each mean value. 

1025 minVarPivotSearch : `float` 

1026 The variance (in adu^2), above which, the point 

1027 of decreasing variance should be sought. 

1028 consecutivePointsVarDecreases : `int` 

1029 Required number of consecutive points/fluxes 

1030 in the PTC where the variance 

1031 decreases in order to find a first 

1032 estimate of the PTC turn-off. 

1033 

1034 Returns 

1035 ------ 

1036 goodPoints : `numpy.array` [`bool`] 

1037 Boolean array to select good (`True`) and bad (`False`) 

1038 points. 

1039 

1040 Notes 

1041 ----- 

1042 Eliminate points beyond which the variance decreases. 

1043 """ 

1044 goodPoints = np.ones_like(means, dtype=bool) 

1045 # Variances are sorted and should monotonically increase 

1046 pivotList = np.where(np.array(np.diff(variances)) < 0)[0] 

1047 if len(pivotList) > 0: 

1048 # For small values, sometimes the variance decreases slightly 

1049 # Only look when var > self.config.minVarPivotSearch 

1050 pivotList = [p for p in pivotList if variances[p] > minVarPivotSearch] 

1051 # Require that the varince decreases during 

1052 # consecutivePointsVarDecreases 

1053 # consecutive points. This will give a first 

1054 # estimate of the PTC turn-off, which 

1055 # may be updated (reduced) further in the code. 

1056 if len(pivotList) > 1: 

1057 # enumerate(pivotList) creates tuples (index, value), for 

1058 # each value in pivotList. The lambda function subtracts 

1059 # each value from the index. 

1060 # groupby groups elements by equal key value. 

1061 for k, g in groupby(enumerate(pivotList), lambda x: x[0]-x[1]): 

1062 group = (map(itemgetter(1), g)) 

1063 # Form groups of consecute values from pivotList 

1064 group = list(map(int, group)) 

1065 # values in pivotList are indices where np.diff(variances) 

1066 # is negative, i.e., where the variance starts decreasing. 

1067 # Find the first group of consecutive numbers when 

1068 # variance decreases. 

1069 if len(group) >= consecutivePointsVarDecreases: 

1070 pivotIndex = np.min(group) 

1071 goodPoints[pivotIndex+1:] = False 

1072 break 

1073 

1074 # Finally, we filter out any infinities or NaNs. 

1075 goodPoints[(~np.isfinite(means)) | (~np.isfinite(variances))] = False 

1076 

1077 return goodPoints 

1078 

1079 def _makeZeroSafe(self, array, substituteValue=1e-9): 

1080 """""" 

1081 array = np.array(array) 

1082 nBad = Counter(np.ravel(array))[0] 

1083 if nBad == 0: 

1084 return array 

1085 

1086 index, = np.where(array == 0) 

1087 if len(index): 

1088 msg = f"Found {nBad} zeros in array at elements {index}" 

1089 self.log.warning(msg) 

1090 

1091 array[index] = substituteValue 

1092 

1093 return array 

1094 

1095 def fitPtcRolloff(self, dataset): 

1096 """Fit the photon transfer curve to the 

1097 Astier+19 approximation (Eq. 16) with a roll-off model to try 

1098 and capture saturation effects. 

1099 

1100 Parameters 

1101 ---------- 

1102 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

1103 The dataset containing the means, variances and 

1104 exposure times. 

1105 

1106 Returns 

1107 ------- 

1108 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

1109 This is the same dataset as the input parameter, however, 

1110 it has been modified to include information such as the 

1111 fit vectors and the fit parameters. See the class 

1112 `PhotonTransferCurveDataset`. 

1113 """ 

1114 # For FULLCOVARIANCE model fit 

1115 matrixSideFit = dataset.covMatrixSideFullCovFit 

1116 nanMatrixFit = np.empty((matrixSideFit, matrixSideFit)) 

1117 nanMatrixFit[:] = np.nan 

1118 

1119 def errFunc(p, x, y): 

1120 return ptcFunc(p, x) - y 

1121 

1122 for i, ampName in enumerate(dataset.ampNames): 

1123 meanVec = dataset.rawMeans[ampName].copy() 

1124 varVec = dataset.rawVars[ampName].copy() 

1125 varVec = self._makeZeroSafe(varVec) 

1126 

1127 # Make sure we have this properly sorted. 

1128 goodPoints = dataset.expIdMask[ampName].copy() 

1129 

1130 if not np.isfinite(dataset.ptcTurnoff[ampName]): 

1131 msg = (f"SERIOUS: Initial PTC turnoff is not finite. " 

1132 f"Setting {ampName} to BAD.") 

1133 self.log.warning(msg) 

1134 # Fill entries with NaNs 

1135 self.fillBadAmp(dataset, ampName) 

1136 continue 

1137 

1138 mask = goodPoints.copy() 

1139 

1140 # Compute the extended mask 

1141 preTurnoff = dataset.ptcTurnoff[ampName] 

1142 turnoffIdx = np.argwhere(meanVec == preTurnoff)[0][0] 

1143 varianceAtTurnoff = varVec[turnoffIdx] 

1144 

1145 # Add points up to some threshold below the variance of the turnoff 

1146 pointsToFit = ( 

1147 (meanVec >= preTurnoff) 

1148 & (meanVec < preTurnoff+self.config.extendRollofSearchMaskSizeAdu) 

1149 & (varVec <= varianceAtTurnoff) 

1150 ) 

1151 

1152 # Retain the original mask below the turnoff 

1153 pointsToFit = np.logical_or(mask, pointsToFit) 

1154 if np.count_nonzero(pointsToFit) == np.count_nonzero(mask): 

1155 self.log.warning("Expanding fit to include saturation, but no points detected above " 

1156 f"initial computed PTC turnoff for amp {ampName}.") 

1157 dataset.expIdRolloffMask[ampName] = pointsToFit 

1158 dataset.ptcRolloff[ampName] = dataset.ptcTurnoff[ampName] 

1159 dataset.ptcRolloffError[ampName] = np.nan 

1160 dataset.ptcRolloffTau[ampName] = np.nan 

1161 dataset.ptcRolloffTauError[ampName] = np.nan 

1162 continue 

1163 

1164 # Fit initialization 

1165 ptcFunc = funcAstierWithRolloff 

1166 parsIniPtc = dataset.ptcFitPars[ampName] # a00, gain, noise^2 

1167 

1168 # Estimate initial parameters 

1169 estimateTurnoff = preTurnoff 

1170 estimateTau = -1200 # This turns out to be a generally good guess! 

1171 parsIniPtc = np.append( 

1172 parsIniPtc, 

1173 [estimateTurnoff, estimateTau], 

1174 ) 

1175 

1176 # Set initial bounds 

1177 bounds = self._boundsForAstier( 

1178 parsIniPtc, 

1179 lowers=[-1e-4, 0.1, 0, min(preTurnoff-20000, 0), -10000], 

1180 uppers=[0, 10.0, 2000, preTurnoff, -100], 

1181 ) 

1182 

1183 # Perform the fit 

1184 res = least_squares( 

1185 errFunc, 

1186 parsIniPtc, 

1187 bounds=bounds, 

1188 args=(meanVec[pointsToFit], varVec[pointsToFit]), 

1189 ) 

1190 pars = res.x 

1191 originalModelPars = pars[:-2] 

1192 J = res.jac 

1193 cov = np.linalg.inv(J.T @ J) 

1194 parErrors = np.sqrt(np.diag(cov)) 

1195 a00, gain, noiseSquared, rolloff, tau = pars 

1196 a00Err, gainErr, noiseSquaredError, rolloffError, tauError = parErrors 

1197 

1198 if not res.success: 

1199 self.log.warning( 

1200 "Fit with saturation roll off model did not succeed for amp %s.", 

1201 ampName, 

1202 ) 

1203 continue 

1204 else: 

1205 self.log.info("Fit with saturation roll off model returned estimates: " 

1206 "rolloff=%f adu (%f el) and tau=%f", 

1207 rolloff, rolloff * gain, tau) 

1208 

1209 # The PTC turnoff is not immediate, and we can tolerate some of 

1210 # the physics in the rolloff, so we define the turnoff as a 

1211 # threshold of deviation between the model without the rolloff 

1212 # and the model with the rolloff. The rolloff estimate is the 

1213 # last acceptable point. 

1214 modelWithoutRolloff = funcAstier(originalModelPars, meanVec[pointsToFit]) 

1215 modelWithRolloff = funcAstierWithRolloff(pars, meanVec[pointsToFit]) 

1216 residual = np.fabs(modelWithRolloff / modelWithoutRolloff - 1) 

1217 acceptablePoints = np.argwhere(residual <= self.config.maxPtcRolloffDeviation) 

1218 lastGoodIndex = acceptablePoints[-1] 

1219 rolloff = meanVec[pointsToFit][lastGoodIndex][0] 

1220 rolloff = min(rolloff, preTurnoff) 

1221 

1222 # Set the mask to the new mask 

1223 newMask = pointsToFit * (meanVec < rolloff) 

1224 dataset.expIdMask[ampName] = newMask 

1225 

1226 # Save the rolloff point 

1227 dataset.expIdRolloffMask[ampName] = pointsToFit 

1228 dataset.ptcRolloff[ampName] = rolloff 

1229 dataset.ptcRolloffError[ampName] = rolloffError 

1230 dataset.ptcRolloffTau[ampName] = tau 

1231 dataset.ptcRolloffTauError[ampName] = tauError 

1232 

1233 return dataset 

1234 

1235 def fitPtc(self, dataset, computePtcTurnoff=True): 

1236 """Fit the photon transfer curve to the 

1237 Astier+19 approximation (Eq. 16). 

1238 

1239 Fit the photon transfer curve using the exponential 

1240 approximation in Astier+19. 

1241 

1242 Sigma clipping is performed iteratively for the fit, as 

1243 well as an initial clipping of data points that are more 

1244 than `config.initialNonLinearityExclusionThreshold` away 

1245 from lying on a straight line. This other step is necessary 

1246 because the photon transfer curve turns over catastrophically 

1247 at very high flux (because saturation 

1248 drops the variance to ~0) and these far outliers cause the 

1249 initial fit to fail, meaning the sigma cannot be calculated 

1250 to perform the sigma-clipping. 

1251 

1252 If `doModelPtcRolloff` is True, a roll-off model will be added 

1253 to the initial fit of the PTC to try and capture saturation 

1254 effects. This will only be applied if 

1255 `ptcFitType=EXPAPPROXIMATION`. 

1256 

1257 Parameters 

1258 ---------- 

1259 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

1260 The dataset containing the means, variances and 

1261 exposure times. 

1262 computePtcTurnoff : `bool` 

1263 Compute and save the PTC turnoff and PTC turnoff sampling 

1264 error. If False, this will preserve the attributes in the 

1265 input `dataset`. 

1266 

1267 Returns 

1268 ------- 

1269 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

1270 This is the same dataset as the input parameter, however, 

1271 it has been modified to include information such as the 

1272 fit vectors and the fit parameters. See the class 

1273 `PhotonTransferCurveDatase`. 

1274 

1275 Raises 

1276 ------ 

1277 RuntimeError 

1278 Raised if dataset.ptcFitType is None or empty. 

1279 """ 

1280 # For FULLCOVARIANCE model fit 

1281 matrixSideFit = dataset.covMatrixSideFullCovFit 

1282 nanMatrixFit = np.empty((matrixSideFit, matrixSideFit)) 

1283 nanMatrixFit[:] = np.nan 

1284 

1285 for amp in dataset.ampNames: 

1286 lenInputTimes = len(dataset.rawExpTimes[amp]) 

1287 listNanMatrixFit = np.empty((lenInputTimes, matrixSideFit, matrixSideFit)) 

1288 listNanMatrixFit[:] = np.nan 

1289 

1290 dataset.covariancesModel[amp] = listNanMatrixFit 

1291 dataset.aMatrix[amp] = nanMatrixFit 

1292 dataset.bMatrix[amp] = nanMatrixFit 

1293 dataset.noiseMatrix[amp] = nanMatrixFit 

1294 

1295 def errFunc(p, x, y): 

1296 return ptcFunc(p, x) - y 

1297 

1298 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers 

1299 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers 

1300 

1301 for i, ampName in enumerate(dataset.ampNames): 

1302 # The PTC is sorted by detector means, 

1303 # however this might not be the true sorted 

1304 # order for any specific amplifier. We must 

1305 # therefore make sure that the amplifier 

1306 # itself is sorted by the amplifier means. 

1307 # At the end we will re-order the mask to 

1308 # match the sort by detector means. 

1309 

1310 # These are sorted by detector means 

1311 meanVecRaw = dataset.rawMeans[ampName].copy() 

1312 varVecRaw = dataset.rawVars[ampName].copy() 

1313 varVecRaw = self._makeZeroSafe(varVecRaw) 

1314 

1315 # These are sorted by the amplifier means 

1316 meanVecSort = np.argsort(meanVecRaw) 

1317 meanVecSorted = meanVecRaw[meanVecSort] 

1318 varVecSorted = varVecRaw[meanVecSort] 

1319 

1320 # Check if all points are bad from the 'cpExtractPtcTask' 

1321 goodPoints = dataset.expIdMask[ampName].copy() 

1322 goodPoints = goodPoints[meanVecSort] 

1323 

1324 if not goodPoints.any(): 

1325 msg = ("SERIOUS: All points in goodPoints are bad. " 

1326 f"Setting {ampName} to BAD.") 

1327 self.log.warning(msg) 

1328 # Fill entries with NaNs 

1329 self.fillBadAmp(dataset, ampName) 

1330 continue 

1331 

1332 mask = goodPoints.copy() 

1333 

1334 # Set the fitting function and search region 

1335 ptcFunc = funcAstier 

1336 parsIniPtc = [-2e-6, 1.5, 25.] # a00, gain, noise^2 

1337 if self.config.binSize > 1: 

1338 bounds = self._boundsForAstier(parsIniPtc) 

1339 else: 

1340 bounds = self._boundsForAstier( 

1341 parsIniPtc, 

1342 lowers=[-1e-4, 0.1, 0], 

1343 uppers=[0, 10.0, 2000], 

1344 ) 

1345 

1346 # We perform an initial (unweighted) fit of variance vs signal 

1347 # (after initial KS test or post-drop selection) to look for 

1348 # outliers, particularly at the high-flux end. The initial fit 

1349 # is performed only for points that are guaranteed to be below 

1350 # the PTC turnoff and then extrapolated to ensure that high 

1351 # flux points that have abnormal variance values can be properly 

1352 # rejected in this phase without biasing the initial fit. 

1353 # This algorithm was initially developed by Seth Digel for 

1354 # the EO Testing pipeline. 

1355 if self.config.scaleMaxSignalInitialPtcOutlierFit: 

1356 approxGain = np.nanmedian(meanVecRaw/varVecRaw) 

1357 maxADUInitialPtcOutlierFit = self.config.maxSignalInitialPtcOutlierFit/approxGain 

1358 maxDeltaADUInitialPtcOutlierFit = self.config.maxDeltaInitialPtcOutlierFit/approxGain 

1359 self.log.info( 

1360 "Using approximate gain %.3f and ADU signal cutoff of %.1f " 

1361 "and max delta %.1f for amplifier %s", 

1362 approxGain, 

1363 maxADUInitialPtcOutlierFit, 

1364 maxDeltaADUInitialPtcOutlierFit, 

1365 ampName, 

1366 ) 

1367 else: 

1368 maxADUInitialPtcOutlierFit = self.config.maxSignalInitialPtcOutlierFit 

1369 maxDeltaADUInitialPtcOutlierFit = self.config.maxDeltaInitialPtcOutlierFit 

1370 

1371 if maxIterationsPtcOutliers == 0: 

1372 # We are not doing any outlier rejection here, but we do want 

1373 # an initial fit. 

1374 res = least_squares( 

1375 errFunc, 

1376 parsIniPtc, 

1377 bounds=bounds, 

1378 args=(meanVecSorted[mask], varVecSorted[mask]), 

1379 ) 

1380 pars = res.x 

1381 newMask = mask.copy() 

1382 else: 

1383 newMask = ( 

1384 mask 

1385 & (meanVecSorted <= maxADUInitialPtcOutlierFit) 

1386 & (varVecSorted <= maxADUInitialPtcOutlierFit) 

1387 ) 

1388 

1389 converged = False 

1390 count = 0 

1391 lastMask = mask.copy() 

1392 while count < maxIterationsPtcOutliers: 

1393 res = least_squares( 

1394 errFunc, 

1395 parsIniPtc, 

1396 bounds=bounds, 

1397 args=(meanVecSorted[newMask], varVecSorted[newMask]), 

1398 ) 

1399 pars = res.x 

1400 

1401 sigResids = (varVecSorted - ptcFunc(pars, meanVecSorted))/np.sqrt(varVecSorted) 

1402 # The new mask includes points where the residuals are 

1403 # finite, are less than the cut, and include the original 

1404 # mask of known points that should not be used. 

1405 newMask = ( 

1406 np.isfinite(sigResids) 

1407 & (np.abs(np.nan_to_num(sigResids)) < sigmaCutPtcOutliers) 

1408 & mask 

1409 ) 

1410 # Demand at least 2 points to continue. 

1411 if np.count_nonzero(newMask) < 2: 

1412 msg = (f"SERIOUS: All points after outlier rejection are bad. " 

1413 f"Setting {ampName} to BAD.") 

1414 self.log.warning(msg) 

1415 # Fill entries with NaNs 

1416 self.fillBadAmp(dataset, ampName) 

1417 break 

1418 

1419 self.log.debug( 

1420 "Iteration %d: Removed %d points in total for %s.", 

1421 count, 

1422 np.count_nonzero(mask) - np.count_nonzero(newMask), 

1423 ampName, 

1424 ) 

1425 

1426 # Loop over all used (True) points. If one of them follows 

1427 # a False point, then it must be within 

1428 # maxDeltaADUInitialPtcOutlierFit of a True point. If it 

1429 # is a large gap, everything above is marked False. 

1430 useMask, = np.where(newMask) 

1431 for useIndex, usePoint in enumerate(useMask): 

1432 if useIndex == 0 or newMask[usePoint - 1]: 

1433 # The previous point was good; continue. 

1434 continue 

1435 deltaADU = meanVecSorted[usePoint] - meanVecSorted[useMask[useIndex - 1]] 

1436 if deltaADU < maxDeltaADUInitialPtcOutlierFit: 

1437 # This jump is fine; continue. 

1438 continue 

1439 

1440 self.log.info( 

1441 "Found a jump of %.2f for amp %s, greater than %.2f. Masking points higher " 

1442 "than %.2f ADU.", 

1443 deltaADU, 

1444 ampName, 

1445 maxDeltaADUInitialPtcOutlierFit, 

1446 meanVecSorted[useMask[useIndex - 1]], 

1447 ) 

1448 

1449 # Mark all further points bad. 

1450 newMask[usePoint:] = False 

1451 break 

1452 

1453 # If the mask hasn't changed then break out. 

1454 if np.all(newMask == lastMask): 

1455 self.log.debug("Convergence at iteration %d; breaking loop for %s.", count, ampName) 

1456 converged = True 

1457 break 

1458 

1459 lastMask = newMask.copy() 

1460 

1461 count += 1 

1462 

1463 if not converged and maxIterationsPtcOutliers > 0: 

1464 self.log.warning( 

1465 "Outlier detection was not converged prior to %d iteration for %s", 

1466 count, 

1467 ampName 

1468 ) 

1469 

1470 # Set the mask to the new mask, and reset the sorting. 

1471 mask = np.zeros(len(meanVecSort), dtype=np.bool_) 

1472 mask[meanVecSort[newMask]] = True 

1473 maskSorted = newMask.copy() 

1474 

1475 if not mask.any(): 

1476 # We have already filled the bad amp above, so continue. 

1477 continue 

1478 

1479 dataset.expIdMask[ampName] = mask 

1480 

1481 parsIniPtc = pars 

1482 meanVecFinal = meanVecRaw[mask] 

1483 varVecFinal = varVecRaw[mask] 

1484 

1485 if Counter(mask)[False] > 0: 

1486 self.log.info("Number of points discarded in PTC of amplifier %s:" 

1487 " %d out of %d", ampName, Counter(mask)[False], len(meanVecSorted)) 

1488 

1489 if (len(meanVecFinal) < len(parsIniPtc)): 

1490 msg = (f"SERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of " 

1491 f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.") 

1492 self.log.warning(msg) 

1493 # Fill entries with NaNs 

1494 self.fillBadAmp(dataset, ampName) 

1495 continue 

1496 

1497 # Fit the PTC. 

1498 # The variance of the variance is Var(v)=2*v^2/Npix. This is 

1499 # already calculated in `makeCovArray` of CpPtcExtract. 

1500 # dataset.covariancesSqrtWeights[ampName][:,0,0] 

1501 # has 1/sqrt(Var(v)). 

1502 weightsY = dataset.covariancesSqrtWeights[ampName][:, 0, 0][mask] 

1503 if self.config.doFitBootstrap: 

1504 parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal, 

1505 varVecFinal, ptcFunc, 

1506 weightsY=weightsY) 

1507 else: 

1508 parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal, 

1509 varVecFinal, ptcFunc, 

1510 weightsY=weightsY) 

1511 

1512 # Determine PTC turnoff 

1513 if computePtcTurnoff: 

1514 dataset.ptcTurnoff[ampName] = np.max(meanVecFinal) 

1515 # And compute the ptcTurnoffSamplingError as one half the 

1516 # difference between the previous and next point. 

1517 lastGoodIndex = np.where(maskSorted)[0][-1] 

1518 ptcTurnoffLow = meanVecSorted[lastGoodIndex - 1] 

1519 if lastGoodIndex == (len(meanVecSorted) - 1): 

1520 # If it's the last index, just use the interval. 

1521 ptcTurnoffSamplingError = dataset.ptcTurnoff[ampName] - ptcTurnoffLow 

1522 elif not np.isfinite(meanVecSorted[lastGoodIndex + 1]): 

1523 # If the next index is not finite, just use the interval. 

1524 ptcTurnoffSamplingError = dataset.ptcTurnoff[ampName] - ptcTurnoffLow 

1525 else: 

1526 ptcTurnoffSamplingError = (meanVecSorted[lastGoodIndex + 1] - ptcTurnoffLow)/2. 

1527 dataset.ptcTurnoffSamplingError[ampName] = ptcTurnoffSamplingError 

1528 

1529 dataset.ptcFitPars[ampName] = parsFit 

1530 dataset.ptcFitParsError[ampName] = parsFitErr 

1531 dataset.ptcFitChiSq[ampName] = reducedChiSqPtc 

1532 

1533 dataset.finalVars[ampName] = varVecRaw 

1534 dataset.finalVars[ampName][~mask] = np.nan 

1535 dataset.finalModelVars[ampName] = ptcFunc(parsFit, meanVecRaw) 

1536 dataset.finalModelVars[ampName][~mask] = np.nan 

1537 dataset.finalMeans[ampName] = meanVecRaw 

1538 dataset.finalMeans[ampName][~mask] = np.nan 

1539 

1540 ptcGain = parsFit[1] 

1541 ptcGainErr = parsFitErr[1] 

1542 ptcNoise = np.sqrt(np.fabs(parsFit[2])) 

1543 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2])) 

1544 

1545 # Save results 

1546 dataset.gain[ampName] = ptcGain 

1547 dataset.gainUnadjusted[ampName] = ptcGain 

1548 dataset.gainErr[ampName] = ptcGainErr 

1549 dataset.noise[ampName] = ptcNoise 

1550 dataset.noiseErr[ampName] = ptcNoiseErr 

1551 

1552 if len(dataset.badAmps) == 0: 

1553 dataset.badAmps = [] 

1554 

1555 return dataset 

1556 

1557 def fillBadAmp(self, dataset, ampName): 

1558 """Fill the dataset with NaNs if there are not enough 

1559 good points. 

1560 

1561 Parameters 

1562 ---------- 

1563 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

1564 The dataset containing the means, variances and 

1565 exposure times. 

1566 ampName : `str` 

1567 Amplifier name. 

1568 """ 

1569 dataset.badAmps.append(ampName) 

1570 dataset.expIdMask[ampName] = np.repeat(False, len(dataset.rawExpTimes[ampName])) 

1571 dataset.gain[ampName] = np.nan 

1572 dataset.gainUnadjusted[ampName] = np.nan 

1573 dataset.gainErr[ampName] = np.nan 

1574 dataset.noise[ampName] = np.nan 

1575 dataset.noiseErr[ampName] = np.nan 

1576 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, 3)) 

1577 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, 3)) 

1578 dataset.ptcFitChiSq[ampName] = np.nan 

1579 dataset.ptcTurnoff[ampName] = np.nan 

1580 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

1581 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

1582 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

1583 

1584 return