Coverage for python/lsst/cp/pipe/ptc/cpSolvePtcTask.py: 10%

444 statements  

« prev     ^ index     » next       coverage.py v7.2.5, created at 2023-05-13 03:01 -0700

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22import numpy as np 

23from collections import Counter 

24 

25import lsst.pex.config as pexConfig 

26import lsst.pipe.base as pipeBase 

27from lsst.cp.pipe.utils import (fitLeastSq, fitBootstrap, funcPolynomial, funcAstier, symmetrize) 

28 

29from scipy.signal import fftconvolve 

30from scipy.optimize import least_squares 

31from itertools import groupby 

32from operator import itemgetter 

33 

34import lsst.pipe.base.connectionTypes as cT 

35 

36from lsst.ip.isr import PhotonTransferCurveDataset 

37 

38from lsst.cp.pipe._lookupStaticCalibration import lookupStaticCalibration 

39 

40import copy 

41 

42 

43__all__ = ['PhotonTransferCurveSolveConfig', 'PhotonTransferCurveSolveTask'] 

44 

45 

46class PhotonTransferCurveSolveConnections(pipeBase.PipelineTaskConnections, 

47 dimensions=("instrument", "detector")): 

48 inputCovariances = cT.Input( 

49 name="ptcCovariances", 

50 doc="Tuple with measured covariances from flats.", 

51 storageClass="PhotonTransferCurveDataset", 

52 dimensions=("instrument", "exposure", "detector"), 

53 isCalibration=True, 

54 multiple=True, 

55 ) 

56 camera = cT.PrerequisiteInput( 

57 name="camera", 

58 doc="Camera the input data comes from.", 

59 storageClass="Camera", 

60 dimensions=("instrument",), 

61 isCalibration=True, 

62 lookupFunction=lookupStaticCalibration, 

63 ) 

64 outputPtcDataset = cT.Output( 

65 name="ptcDatsetProposal", 

66 doc="Output proposed ptc dataset.", 

67 storageClass="PhotonTransferCurveDataset", 

68 dimensions=("instrument", "detector"), 

69 multiple=False, 

70 isCalibration=True, 

71 ) 

72 

73 

74class PhotonTransferCurveSolveConfig(pipeBase.PipelineTaskConfig, 

75 pipelineConnections=PhotonTransferCurveSolveConnections): 

76 """Configuration for fitting measured covariances. 

77 """ 

78 

79 ptcFitType = pexConfig.ChoiceField( 

80 dtype=str, 

81 doc="Fit PTC to Eq. 16, Eq. 20 in Astier+19, or to a polynomial.", 

82 default="POLYNOMIAL", 

83 allowed={ 

84 "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').", 

85 "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).", 

86 "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)" 

87 } 

88 ) 

89 minMeanSignal = pexConfig.DictField( 

90 keytype=str, 

91 itemtype=float, 

92 doc="Minimum values (inclusive) of mean signal (in ADU) per amp to use." 

93 " The same cut is applied to all amps if this parameter [`dict`] is passed as " 

94 " {'ALL_AMPS': value}", 

95 default={'ALL_AMPS': 0.0}, 

96 ) 

97 maxMeanSignal = pexConfig.DictField( 

98 keytype=str, 

99 itemtype=float, 

100 doc="Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp." 

101 " The same cut is applied to all amps if this dictionary is of the form" 

102 " {'ALL_AMPS': value}", 

103 default={'ALL_AMPS': 1e6}, 

104 ) 

105 maximumRangeCovariancesAstier = pexConfig.Field( 

106 dtype=int, 

107 doc="Maximum range of covariances as in Astier+19", 

108 default=8, 

109 ) 

110 sigmaClipFullFitCovariancesAstier = pexConfig.Field( 

111 dtype=float, 

112 doc="sigma clip for full model fit for FULLCOVARIANCE ptcFitType ", 

113 default=5.0, 

114 ) 

115 maxIterFullFitCovariancesAstier = pexConfig.Field( 

116 dtype=int, 

117 doc="Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType", 

118 default=3, 

119 ) 

120 polynomialFitDegree = pexConfig.Field( 

121 dtype=int, 

122 doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.", 

123 default=3, 

124 ) 

125 doLegacyTurnoffAndOutlierSelection = pexConfig.Field( 

126 dtype=bool, 

127 doc="Use 'legacy' computation for PTC turnoff and outlier selection. If set " 

128 "to False, then the KS test p-value selection will be used instead.", 

129 default=False, 

130 ) 

131 sigmaCutPtcOutliers = pexConfig.Field( 

132 dtype=float, 

133 doc="Sigma cut for outlier rejection in PTC. Only used if " 

134 "doLegacyTurnoffAndOutlierSelection is True.", 

135 default=5.0, 

136 ) 

137 maxIterationsPtcOutliers = pexConfig.RangeField( 

138 dtype=int, 

139 doc="Maximum number of iterations for outlier rejection in PTC. Only used if " 

140 "doLegacyTurnoffAndOutlierSelection is True.", 

141 default=2, 

142 min=0 

143 ) 

144 minVarPivotSearch = pexConfig.Field( 

145 dtype=float, 

146 doc="The code looks for a pivot signal point after which the variance starts decreasing at high-flux" 

147 " to exclude then from the PTC model fit. However, sometimes at low fluxes, the variance" 

148 " decreases slightly. Set this variable for the variance value, in ADU^2, after which the pivot " 

149 " should be sought. Only used if doLegacyTurnoffAndOutlierSelection is True.", 

150 default=10000, 

151 ) 

152 consecutivePointsVarDecreases = pexConfig.RangeField( 

153 dtype=int, 

154 doc="Required number of consecutive points/fluxes in the PTC where the variance " 

155 "decreases in order to find a first estimate of the PTC turn-off. " 

156 "Only used if doLegacyTurnoffAndOutlierSelection is True.", 

157 default=2, 

158 min=2 

159 ) 

160 ksTestMinPvalue = pexConfig.Field( 

161 dtype=float, 

162 doc="Minimum value of the Gaussian histogram KS test p-value to be used in PTC fit. " 

163 "Only used if doLegacyTurnoffAndOutlierSelection is False.", 

164 default=0.01, 

165 ) 

166 doFitBootstrap = pexConfig.Field( 

167 dtype=bool, 

168 doc="Use bootstrap for the PTC fit parameters and errors?.", 

169 default=False, 

170 ) 

171 binSize = pexConfig.Field( 

172 dtype=int, 

173 doc="Bin the image by this factor in both dimensions.", 

174 default=1, 

175 ) 

176 

177 

178class PhotonTransferCurveSolveTask(pipeBase.PipelineTask): 

179 """Task to fit the PTC from flat covariances. 

180 

181 The first task of the PTC measurement pipeline, 

182 ``PhotonTransferCurveMeasureTask`` (and assumed to have been run 

183 before this task), produced a list of 

184 `~lsst.ip.isr.PhotonTransferCurveDataset` objects. Each dataset 

185 contains the mean signal and covariances of the 

186 difference image of the flat-field images taken at 

187 the same exposure time. The list also contains dummy 

188 datasets (with no measurements), whose purpose is to have 

189 the input and output dimensions of ``PhotonTransferCurveMeasureTask`` 

190 match. 

191 

192 This task, ``PhotonTransferCurveSolveTask``, assembles the list 

193 of individual PTC datasets produced 

194 by ``PhotonTransferCurveMeasureTask`` into one single final PTC 

195 dataset, discarding the dummy datset as appropiate. 

196 The task fits the measured (co)variances to one of three models: 

197 a polynomial model of a given order, or the models described 

198 in equations 16 and 20 of Astier+19. These options are referred 

199 to as ``POLYNOMIAL``, ``EXPAPPROXIMATION``, and ``FULLCOVARIANCE`` 

200 in the configuration options of the task, respectively). 

201 Parameters of interest such as the gain and noise are derived 

202 from the fits. The ``FULLCOVARIANCE`` model is fitted to the 

203 full covariance data (as oppossed to the other two models, which 

204 are fit to the variance vs mean measurements only). 

205 

206 Astier+19: "The Shape of the Photon Transfer Curve 

207 of CCD sensors", arXiv:1905.08677 

208 """ 

209 

210 ConfigClass = PhotonTransferCurveSolveConfig 

211 _DefaultName = 'cpPhotonTransferCurveSolve' 

212 

213 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

214 """Ensure that the input and output dimensions are passed along. 

215 

216 Parameters 

217 ---------- 

218 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext` 

219 Butler to operate on. 

220 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection` 

221 Input data refs to load. 

222 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection` 

223 Output data refs to persist. 

224 """ 

225 inputs = butlerQC.get(inputRefs) 

226 detId = inputRefs.inputCovariances[0].dataId['detector'] 

227 outputs = self.run(inputCovariances=inputs['inputCovariances'], camera=inputs['camera'], detId=detId) 

228 butlerQC.put(outputs, outputRefs) 

229 

230 def run(self, inputCovariances, camera=None, detId=0): 

231 """Fit measured covariances to different models. 

232 

233 Parameters 

234 ---------- 

235 inputCovariances : `list` [`lsst.ip.isr.PhotonTransferCurveDataset`] 

236 List of lsst.ip.isr.PhotonTransferCurveDataset datasets. 

237 camera : `lsst.afw.cameraGeom.Camera`, optional 

238 Input camera. 

239 detId : `int` 

240 Detector ID to locate the detector in the camera and 

241 populate the `lsst.ip.isr.PhotonTransferCurveDataset` 

242 metadata. 

243 Returns 

244 ------- 

245 results : `lsst.pipe.base.Struct` 

246 The resultins structure contains: 

247 

248 ``outputPtcDatset`` 

249 Final PTC dataset, containing information such as the 

250 means, variances, and exposure times 

251 (`lsst.ip.isr.PhotonTransferCurveDataset`). 

252 """ 

253 # Find the ampNames from a non-dummy ptc. 

254 ampNames = [] 

255 for partialPtcDataset in inputCovariances: 

256 if partialPtcDataset.ptcFitType != 'DUMMY': 

257 ampNames = partialPtcDataset.ampNames 

258 break 

259 

260 # Each amp may have a different min and max ADU signal 

261 # specified in the config. 

262 maxMeanSignalDict = {ampName: 1e6 for ampName in ampNames} 

263 minMeanSignalDict = {ampName: 0.0 for ampName in ampNames} 

264 for ampName in ampNames: 

265 if 'ALL_AMPS' in self.config.maxMeanSignal: 

266 maxMeanSignalDict[ampName] = self.config.maxMeanSignal['ALL_AMPS'] 

267 elif ampName in self.config.maxMeanSignal: 

268 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[ampName] 

269 

270 if 'ALL_AMPS' in self.config.minMeanSignal: 

271 minMeanSignalDict[ampName] = self.config.minMeanSignal['ALL_AMPS'] 

272 elif ampName in self.config.minMeanSignal: 

273 minMeanSignalDict[ampName] = self.config.minMeanSignal[ampName] 

274 

275 # Assemble individual PTC datasets into a single PTC dataset. 

276 datasetPtc = PhotonTransferCurveDataset(ampNames=ampNames, 

277 ptcFitType=self.config.ptcFitType, 

278 covMatrixSide=self.config.maximumRangeCovariancesAstier) 

279 for partialPtcDataset in inputCovariances: 

280 # Ignore dummy datasets 

281 if partialPtcDataset.ptcFitType == 'DUMMY': 

282 continue 

283 for ampName in ampNames: 

284 # The partial dataset consists of lists of values for each 

285 # quantity. In the case of the input exposure pairs, this is a 

286 # list of tuples. In all cases we only want the first 

287 # (and only) element of the list. 

288 datasetPtc.inputExpIdPairs[ampName].append(partialPtcDataset.inputExpIdPairs[ampName][0]) 

289 datasetPtc.rawExpTimes[ampName] = np.append(datasetPtc.rawExpTimes[ampName], 

290 partialPtcDataset.rawExpTimes[ampName][0]) 

291 datasetPtc.rawMeans[ampName] = np.append(datasetPtc.rawMeans[ampName], 

292 partialPtcDataset.rawMeans[ampName][0]) 

293 datasetPtc.rawVars[ampName] = np.append(datasetPtc.rawVars[ampName], 

294 partialPtcDataset.rawVars[ampName][0]) 

295 datasetPtc.histVars[ampName] = np.append(datasetPtc.histVars[ampName], 

296 partialPtcDataset.histVars[ampName][0]) 

297 datasetPtc.histChi2Dofs[ampName] = np.append(datasetPtc.histChi2Dofs[ampName], 

298 partialPtcDataset.histChi2Dofs[ampName][0]) 

299 datasetPtc.kspValues[ampName] = np.append(datasetPtc.kspValues[ampName], 

300 partialPtcDataset.kspValues[ampName][0]) 

301 datasetPtc.covariances[ampName] = np.append( 

302 datasetPtc.covariances[ampName].ravel(), 

303 partialPtcDataset.covariances[ampName].ravel() 

304 ).reshape( 

305 ( 

306 len(datasetPtc.rawExpTimes[ampName]), 

307 datasetPtc.covMatrixSide, 

308 datasetPtc.covMatrixSide, 

309 ) 

310 ) 

311 datasetPtc.covariancesSqrtWeights[ampName] = np.append( 

312 datasetPtc.covariancesSqrtWeights[ampName].ravel(), 

313 partialPtcDataset.covariancesSqrtWeights[ampName].ravel() 

314 ).reshape( 

315 ( 

316 len(datasetPtc.rawExpTimes[ampName]), 

317 datasetPtc.covMatrixSide, 

318 datasetPtc.covMatrixSide, 

319 ) 

320 ) 

321 

322 # Apply min/max masking. 

323 rawMean = partialPtcDataset.rawMeans[ampName][0] 

324 rawVar = partialPtcDataset.rawVars[ampName][0] 

325 expIdMask = partialPtcDataset.expIdMask[ampName][0] 

326 if (rawMean <= minMeanSignalDict[ampName]) or (rawMean >= maxMeanSignalDict[ampName]) \ 

327 or not np.isfinite(rawMean) or not np.isfinite(rawVar): 

328 expIdMask = False 

329 

330 kspValue = partialPtcDataset.kspValues[ampName][0] 

331 if not self.config.doLegacyTurnoffAndOutlierSelection and \ 

332 kspValue < self.config.ksTestMinPvalue: 

333 expIdMask = False 

334 

335 datasetPtc.expIdMask[ampName] = np.append(datasetPtc.expIdMask[ampName], expIdMask) 

336 

337 # Sort arrays that are filled so far in the final dataset by 

338 # rawMeans index. 

339 # First compute the mean across all the amps to make sure that they are 

340 # all sorted the same way. 

341 detectorMeans = np.zeros(len(datasetPtc.inputExpIdPairs[ampNames[0]])) 

342 

343 for i in range(len(detectorMeans)): 

344 arr = np.array([datasetPtc.rawMeans[ampName][i] for ampName in ampNames]) 

345 good, = (np.isfinite(arr)).nonzero() 

346 if good.size == 0: 

347 detectorMeans[i] = np.nan 

348 else: 

349 detectorMeans[i] = np.mean(arr[good]) 

350 

351 index = np.argsort(detectorMeans) 

352 

353 for ampName in ampNames: 

354 datasetPtc.inputExpIdPairs[ampName] = np.array( 

355 datasetPtc.inputExpIdPairs[ampName] 

356 )[index].tolist() 

357 datasetPtc.rawExpTimes[ampName] = datasetPtc.rawExpTimes[ampName][index] 

358 datasetPtc.rawMeans[ampName] = datasetPtc.rawMeans[ampName][index] 

359 datasetPtc.rawVars[ampName] = datasetPtc.rawVars[ampName][index] 

360 datasetPtc.expIdMask[ampName] = datasetPtc.expIdMask[ampName][index] 

361 datasetPtc.covariances[ampName] = datasetPtc.covariances[ampName][index] 

362 datasetPtc.covariancesSqrtWeights[ampName] = datasetPtc.covariancesSqrtWeights[ampName][index] 

363 

364 if self.config.ptcFitType == "FULLCOVARIANCE": 

365 # Fit the measured covariances vs mean signal to 

366 # the Astier+19 full model (Eq. 20). Before that 

367 # do a preliminary fit to the variance (C_00) vs mean 

368 # signal (mu) curve using the EXPAPPROXIMATION model 

369 # (Eq. 16 in Astier+19) in order to 

370 # get the flat pairs that are masked. The 

371 # points at these fluxes will also be masked when 

372 # calculating the other elements of the covariance 

373 # matrix, C_ij, i!=j). 

374 

375 # Preliminary fit, usign a temp dataset to get the mask 

376 tempDatasetPtc = copy.copy(datasetPtc) 

377 tempDatasetPtc.ptcFitType = "EXPAPPROXIMATION" 

378 tempDatasetPtc = self.fitMeasurementsToModel(tempDatasetPtc) 

379 

380 # "FULLCOVARIANCE", using the mask obtained from the 

381 # previous fit. 

382 for ampName in datasetPtc.ampNames: 

383 datasetPtc.expIdMask[ampName] = tempDatasetPtc.expIdMask[ampName] 

384 datasetPtc.fitType = "FULLCOVARIANCE" 

385 datasetPtc = self.fitMeasurementsToModel(datasetPtc) 

386 # The other options are: self.config.ptcFitType in 

387 # ("EXPAPPROXIMATION", "POLYNOMIAL") 

388 else: 

389 # Fit the PTC to a polynomial or to Astier+19 exponential 

390 # approximation (Eq. 16). Fill up 

391 # PhotonTransferCurveDataset object. 

392 datasetPtc = self.fitMeasurementsToModel(datasetPtc) 

393 

394 if camera: 

395 detector = camera[detId] 

396 else: 

397 detector = None 

398 datasetPtc.updateMetadataFromExposures(inputCovariances) 

399 datasetPtc.updateMetadata(setDate=True, camera=camera, detector=detector) 

400 

401 return pipeBase.Struct( 

402 outputPtcDataset=datasetPtc, 

403 ) 

404 

405 def fitMeasurementsToModel(self, dataset): 

406 """Fit the measured covariances vs mean signal to a 

407 polynomial or one of the models in Astier+19 

408 (Eq. 16 or Eq.20). 

409 

410 Parameters 

411 ---------- 

412 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

413 The dataset containing information such as the means, 

414 (co)variances, and exposure times. 

415 

416 Returns 

417 ------- 

418 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

419 This is the same dataset as the input parameter, however, 

420 it has been modified to include information such as the 

421 fit vectors and the fit parameters. See the class 

422 `PhotonTransferCurveDatase`. 

423 """ 

424 fitType = dataset.ptcFitType 

425 if fitType in ["FULLCOVARIANCE", ]: 

426 # This model uses the full covariance matrix in the fit. 

427 # The PTC is technically defined as variance vs signal, 

428 # with variance = Cov_00 

429 dataset = self.fitDataFullCovariance(dataset) 

430 elif fitType in ["POLYNOMIAL", "EXPAPPROXIMATION"]: 

431 # The PTC is technically defined as variance vs signal 

432 dataset = self.fitPtc(dataset) 

433 else: 

434 raise RuntimeError( 

435 f"Fitting option {fitType} not one of " 

436 "'POLYNOMIAL', 'EXPAPPROXIMATION', or 'FULLCOVARIANCE'" 

437 ) 

438 

439 return dataset 

440 

441 def fitDataFullCovariance(self, dataset): 

442 """Fit measured flat covariances to the full model in 

443 Astier+19 (Eq. 20). 

444 

445 Parameters 

446 ---------- 

447 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

448 The dataset containing information such as the means, 

449 (co)variances, and exposure times. 

450 

451 Returns 

452 ------- 

453 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

454 This is the same dataset as the input parameter, however, 

455 it has been modified to include information such as the 

456 fit vectors and the fit parameters. See the class 

457 `PhotonTransferCurveDatase`. 

458 

459 Notes 

460 ----- 

461 The parameters of the full model for C_ij(mu) ("C_ij" and "mu" 

462 in ADU^2 and ADU, respectively) in Astier+19 (Eq. 20) are: 

463 

464 - "a" coefficients (r by r matrix), units: 1/e 

465 - "b" coefficients (r by r matrix), units: 1/e 

466 - noise matrix (r by r matrix), units: e^2 

467 - gain, units: e/ADU 

468 

469 "b" appears in Eq. 20 only through the "ab" combination, which 

470 is defined in this code as "c=ab". 

471 

472 Total number of parameters: #entries(a) + #entries(c) + #entries(noise) 

473 + 1. This is equivalent to r^2 + r^2 + r^2 + 1, where "r" is the 

474 maximum lag considered for the covariances calculation, and the 

475 extra "1" is the gain. If "b" is 0, then "c" is 0, and len(pInit) will 

476 have r^2 fewer entries. 

477 """ 

478 matrixSide = self.config.maximumRangeCovariancesAstier 

479 lenParams = matrixSide*matrixSide 

480 

481 for ampName in dataset.ampNames: 

482 lenInputTimes = len(dataset.rawExpTimes[ampName]) 

483 # Not used when ptcFitType is 'FULLCOVARIANCE' 

484 dataset.ptcFitPars[ampName] = np.array([np.nan]) 

485 dataset.ptcFitParsError[ampName] = np.array([np.nan]) 

486 dataset.ptcFitChiSq[ampName] = np.nan 

487 

488 if ampName in dataset.badAmps: 

489 # Bad amp 

490 # Entries need to have proper dimensions so read/write 

491 # with astropy.Table works. 

492 nanMatrix = np.full((matrixSide, matrixSide), np.nan) 

493 listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan) 

494 dataset.covariancesModel[ampName] = listNanMatrix 

495 dataset.covariancesSqrtWeights[ampName] = listNanMatrix 

496 dataset.aMatrix[ampName] = nanMatrix 

497 dataset.bMatrix[ampName] = nanMatrix 

498 dataset.covariancesModelNoB[ampName] = listNanMatrix 

499 dataset.aMatrixNoB[ampName] = nanMatrix 

500 

501 dataset.expIdMask[ampName] = np.repeat(False, lenInputTimes) 

502 dataset.gain[ampName] = np.nan 

503 dataset.gainErr[ampName] = np.nan 

504 dataset.noise[ampName] = np.nan 

505 dataset.noiseErr[ampName] = np.nan 

506 dataset.finalVars[ampName] = np.repeat(np.nan, lenInputTimes) 

507 dataset.finalModelVars[ampName] = np.repeat(np.nan, lenInputTimes) 

508 dataset.finalMeans[ampName] = np.repeat(np.nan, lenInputTimes) 

509 continue 

510 

511 muAtAmp = dataset.rawMeans[ampName] 

512 maskAtAmp = dataset.expIdMask[ampName] 

513 if len(maskAtAmp) == 0: 

514 maskAtAmp = np.repeat(True, len(muAtAmp)) 

515 

516 muAtAmpMasked = muAtAmp[maskAtAmp] 

517 covAtAmp = dataset.covariances[ampName] 

518 covAtAmpMasked = np.nan_to_num(covAtAmp)[maskAtAmp] 

519 covSqrtWeightsAtAmp = dataset.covariancesSqrtWeights[ampName] 

520 covSqrtWeightsAtAmpMasked = np.nan_to_num(covSqrtWeightsAtAmp)[maskAtAmp] 

521 

522 # Initial fit, to approximate parameters, with c=0 

523 a0, c0, noise0, gain0 = self.initialFitFullCovariance( 

524 muAtAmpMasked, 

525 covAtAmpMasked, 

526 covSqrtWeightsAtAmpMasked 

527 ) 

528 

529 # Fit full model (Eq. 20 of Astier+19) and same model with 

530 # b=0 (c=0 in this code) 

531 pInit = np.concatenate((a0.ravel(), c0.ravel(), noise0.ravel(), np.array(gain0)), axis=None) 

532 functionsDict = {'fullModel': self.funcFullCovarianceModel, 

533 'fullModelNoB': self.funcFullCovarianceModelNoB} 

534 fitResults = {'fullModel': {'a': [], 'c': [], 'noise': [], 'gain': [], 'paramsErr': []}, 

535 'fullModelNoB': {'a': [], 'c': [], 'noise': [], 'gain': [], 'paramsErr': []}} 

536 for key in functionsDict: 

537 params, paramsErr, _ = fitLeastSq(pInit, muAtAmpMasked, 

538 covAtAmpMasked.ravel(), functionsDict[key], 

539 weightsY=covSqrtWeightsAtAmpMasked.ravel()) 

540 a = params[:lenParams].reshape((matrixSide, matrixSide)) 

541 c = params[lenParams:2*lenParams].reshape((matrixSide, matrixSide)) 

542 noise = params[2*lenParams:3*lenParams].reshape((matrixSide, matrixSide)) 

543 gain = params[-1] 

544 

545 fitResults[key]['a'] = a 

546 fitResults[key]['c'] = c 

547 fitResults[key]['noise'] = noise 

548 fitResults[key]['gain'] = gain 

549 fitResults[key]['paramsErr'] = paramsErr 

550 

551 # Put the information in the PTC dataset 

552 

553 # Not used when ptcFitType is 'FULLCOVARIANCE' 

554 dataset.ptcFitPars[ampName] = np.array([np.nan]) 

555 dataset.ptcFitParsError[ampName] = np.array([np.nan]) 

556 dataset.ptcFitChiSq[ampName] = np.nan 

557 

558 # Save full covariances, covariances models, and their weights. 

559 # dataset.expIdMask is already full, but needs to be 

560 # converted to bool. 

561 dataset.expIdMask[ampName] = np.array(dataset.expIdMask[ampName], dtype=bool) 

562 dataset.covariances[ampName] = covAtAmp 

563 # We evaluate the covariance model everywhere, even the 

564 # masked amps. 

565 dataset.covariancesModel[ampName] = self.evalCovModel(muAtAmp, 

566 fitResults['fullModel']['a'], 

567 fitResults['fullModel']['c'], 

568 fitResults['fullModel']['noise'], 

569 fitResults['fullModel']['gain']) 

570 dataset.covariancesSqrtWeights[ampName] = covSqrtWeightsAtAmp 

571 dataset.aMatrix[ampName] = fitResults['fullModel']['a'] 

572 dataset.bMatrix[ampName] = fitResults['fullModel']['c']/fitResults['fullModel']['a'] 

573 dataset.covariancesModelNoB[ampName] = self.evalCovModel(muAtAmp, 

574 fitResults['fullModelNoB']['a'], 

575 fitResults['fullModelNoB']['c'], 

576 fitResults['fullModelNoB']['noise'], 

577 fitResults['fullModelNoB']['gain'], 

578 setBtoZero=True) 

579 dataset.aMatrixNoB[ampName] = fitResults['fullModelNoB']['a'] 

580 dataset.gain[ampName] = fitResults['fullModel']['gain'] 

581 dataset.gainErr[ampName] = fitResults['fullModel']['paramsErr'][-1] 

582 readoutNoise = fitResults['fullModel']['noise'][0][0] 

583 readoutNoiseSqrt = np.sqrt(np.fabs(readoutNoise)) 

584 dataset.noise[ampName] = readoutNoise 

585 readoutNoiseSigma = fitResults['fullModel']['paramsErr'][2*lenParams] 

586 dataset.noiseErr[ampName] = 0.5*(readoutNoiseSigma/np.fabs(readoutNoise))*readoutNoiseSqrt 

587 dataset.finalVars[ampName] = covAtAmp[:, 0, 0] 

588 dataset.finalModelVars[ampName] = dataset.covariancesModel[ampName][:, 0, 0] 

589 dataset.finalMeans[ampName] = muAtAmp 

590 

591 return dataset 

592 

593 def initialFitFullCovariance(self, mu, cov, sqrtW): 

594 """ Performs a crude parabolic fit of the data in order to start 

595 the full fit close to the solution, setting b=0 (c=0) in Eq. 20 

596 of Astier+19. 

597 

598 Parameters 

599 ---------- 

600 mu : `numpy.array`, (N,) 

601 Signal `mu` (ADU) 

602 cov : `numpy.array`, (N, M, M) 

603 Covariance arrays of size `(M, M)` (with 

604 `M = config.maximumRangeCovariancesAstier`), 

605 indexed by mean signal `mu`. 

606 sqrtW : `numpy.array`, (N,) 

607 Covariance weights, defined as 1./sqrt(Variances) 

608 

609 Returns 

610 ------- 

611 a : `numpy.array`, (M, M) 

612 "a" parameter per flux in Eq. 20 of Astier+19. 

613 c : `numpy.array`, (M, M) 

614 "c"="ab" parameter per flux in Eq. 20 of Astier+19. 

615 noise : `numpy.array`, (M, M) 

616 "noise" parameter per flux in Eq. 20 of Astier+19. 

617 gain : `float` 

618 Amplifier gain (e/ADU) 

619 """ 

620 matrixSide = self.config.maximumRangeCovariancesAstier 

621 

622 # Initialize fit parameters 

623 a = np.zeros((matrixSide, matrixSide)) 

624 c = np.zeros((matrixSide, matrixSide)) 

625 noise = np.zeros((matrixSide, matrixSide)) 

626 gain = 1. 

627 

628 # iterate the fit to account for higher orders 

629 # the chi2 does not necessarily go down, so one could 

630 # stop when it increases 

631 oldChi2 = 1e30 

632 for _ in range(5): 

633 model = np.nan_to_num(self.evalCovModel(mu, a, c, noise, gain, setBtoZero=True)) 

634 # loop on lags 

635 for i in range(matrixSide): 

636 for j in range(matrixSide): 

637 # fit a parabola for a given lag 

638 parsFit = np.polyfit(mu, cov[:, i, j] - model[:, i, j], 

639 2, w=sqrtW[:, i, j]) 

640 # model equation (Eq. 20) in Astier+19, with c=a*b=0: 

641 a[i, j] += parsFit[0] 

642 noise[i, j] += parsFit[2] 

643 if(i + j == 0): 

644 gain = 1./(1/gain+parsFit[1]) 

645 weightedRes = (model - cov)*sqrtW 

646 chi2 = (weightedRes.flatten()**2).sum() 

647 if chi2 > oldChi2: 

648 break 

649 oldChi2 = chi2 

650 

651 return a, c, noise, gain 

652 

653 def funcFullCovarianceModel(self, params, x): 

654 """Model to fit covariances from flat fields; Equation 20 of 

655 Astier+19. 

656 

657 Parameters 

658 ---------- 

659 params : `list` 

660 Parameters of the model: aMatrix, CMatrix, noiseMatrix, 

661 gain (e/ADU). 

662 x : `numpy.array`, (N,) 

663 Signal `mu` (ADU) 

664 

665 Returns 

666 ------- 

667 y : `numpy.array`, (N,) 

668 Covariance matrix. 

669 """ 

670 matrixSide = self.config.maximumRangeCovariancesAstier 

671 lenParams = matrixSide*matrixSide 

672 aMatrix = params[:lenParams].reshape((matrixSide, matrixSide)) 

673 cMatrix = params[lenParams:2*lenParams].reshape((matrixSide, matrixSide)) 

674 noiseMatrix = params[2*lenParams:3*lenParams].reshape((matrixSide, matrixSide)) 

675 gain = params[-1] 

676 

677 return self.evalCovModel(x, aMatrix, cMatrix, noiseMatrix, gain).flatten() 

678 

679 def funcFullCovarianceModelNoB(self, params, x): 

680 """Model to fit covariances from flat fields; Equation 20 of 

681 Astier+19, with b=0 (equivalent to c=a*b=0 in this code). 

682 

683 Parameters 

684 ---------- 

685 params : `list` 

686 Parameters of the model: aMatrix, CMatrix, noiseMatrix, 

687 gain (e/ADU). 

688 x : `numpy.array`, (N,) 

689 Signal mu (ADU) 

690 

691 Returns 

692 ------- 

693 y : `numpy.array`, (N,) 

694 Covariance matrix. 

695 """ 

696 matrixSide = self.config.maximumRangeCovariancesAstier 

697 lenParams = matrixSide*matrixSide 

698 aMatrix = params[:lenParams].reshape((matrixSide, matrixSide)) 

699 cMatrix = params[lenParams:2*lenParams].reshape((matrixSide, matrixSide)) 

700 noiseMatrix = params[2*lenParams:3*lenParams].reshape((matrixSide, matrixSide)) 

701 gain = params[-1] 

702 

703 return self.evalCovModel(x, aMatrix, cMatrix, noiseMatrix, gain, setBtoZero=True).flatten() 

704 

705 def evalCovModel(self, mu, aMatrix, cMatrix, noiseMatrix, gain, setBtoZero=False): 

706 """Computes full covariances model (Eq. 20 of Astier+19). 

707 

708 Parameters 

709 ---------- 

710 mu : `numpy.array`, (N,) 

711 List of mean signals. 

712 aMatrix : `numpy.array`, (M, M) 

713 "a" parameter per flux in Eq. 20 of Astier+19. 

714 cMatrix : `numpy.array`, (M, M) 

715 "c"="ab" parameter per flux in Eq. 20 of Astier+19. 

716 noiseMatrix : `numpy.array`, (M, M) 

717 "noise" parameter per flux in Eq. 20 of Astier+19. 

718 gain : `float` 

719 Amplifier gain (e/ADU) 

720 setBtoZero=False : `bool`, optional 

721 Set "b" parameter in full model (see Astier+19) to zero. 

722 

723 Returns 

724 ------- 

725 covModel : `numpy.array`, (N, M, M) 

726 Covariances model. 

727 

728 Notes 

729 ----- 

730 By default, computes the covModel for the mu's stored(self.mu). 

731 Returns cov[Nmu, M, M]. The variance for the PTC is 

732 cov[:, 0, 0]. mu and cov are in ADUs and ADUs squared. To use 

733 electrons for both, the gain should be set to 1. This routine 

734 implements the model in Astier+19 (1905.08677). 

735 The parameters of the full model for C_ij(mu) ("C_ij" and "mu" 

736 in ADU^2 and ADU, respectively) in Astier+19 (Eq. 20) are: 

737 

738 - "a" coefficients (M by M matrix), units: 1/e 

739 - "b" coefficients (M by M matrix), units: 1/e 

740 - noise matrix (M by M matrix), units: e^2 

741 - gain, units: e/ADU 

742 

743 "b" appears in Eq. 20 only through the "ab" combination, which 

744 is defined in this code as "c=ab". 

745 """ 

746 matrixSide = self.config.maximumRangeCovariancesAstier 

747 sa = (matrixSide, matrixSide) 

748 # pad a with zeros and symmetrize 

749 aEnlarged = np.zeros((int(sa[0]*1.5)+1, int(sa[1]*1.5)+1)) 

750 aEnlarged[0:sa[0], 0:sa[1]] = aMatrix 

751 aSym = symmetrize(aEnlarged) 

752 # pad c with zeros and symmetrize 

753 cEnlarged = np.zeros((int(sa[0]*1.5)+1, int(sa[1]*1.5)+1)) 

754 cEnlarged[0:sa[0], 0:sa[1]] = cMatrix 

755 cSym = symmetrize(cEnlarged) 

756 a2 = fftconvolve(aSym, aSym, mode='same') 

757 a3 = fftconvolve(a2, aSym, mode='same') 

758 ac = fftconvolve(aSym, cSym, mode='same') 

759 (xc, yc) = np.unravel_index(np.abs(aSym).argmax(), a2.shape) 

760 

761 a1 = aMatrix[np.newaxis, :, :] 

762 a2 = a2[np.newaxis, xc:xc + matrixSide, yc:yc + matrixSide] 

763 a3 = a3[np.newaxis, xc:xc + matrixSide, yc:yc + matrixSide] 

764 ac = ac[np.newaxis, xc:xc + matrixSide, yc:yc + matrixSide] 

765 c1 = cMatrix[np.newaxis, ::] 

766 

767 # assumes that mu is 1d 

768 bigMu = mu[:, np.newaxis, np.newaxis]*gain 

769 # c(=a*b in Astier+19) also has a contribution to the last 

770 # term, that is absent for now. 

771 if setBtoZero: 

772 c1 = np.zeros_like(c1) 

773 ac = np.zeros_like(ac) 

774 covModel = (bigMu/(gain*gain)*(a1*bigMu+2./3.*(bigMu*bigMu)*(a2 + c1) 

775 + (1./3.*a3 + 5./6.*ac)*(bigMu*bigMu*bigMu)) + noiseMatrix[np.newaxis, :, :]/gain**2) 

776 # add the Poisson term, and the read out noise (variance) 

777 covModel[:, 0, 0] += mu/gain 

778 

779 return covModel 

780 

781 # EXPAPPROXIMATION and POLYNOMIAL fit methods 

782 @staticmethod 

783 def _initialParsForPolynomial(order): 

784 assert(order >= 2) 

785 pars = np.zeros(order, dtype=float) 

786 pars[0] = 10 

787 pars[1] = 1 

788 pars[2:] = 0.0001 

789 return pars 

790 

791 @staticmethod 

792 def _boundsForPolynomial(initialPars, lowers=[], uppers=[]): 

793 if not len(lowers): 

794 lowers = [np.NINF for p in initialPars] 

795 if not len(uppers): 

796 uppers = [np.inf for p in initialPars] 

797 lowers[1] = 0 # no negative gains 

798 return (lowers, uppers) 

799 

800 @staticmethod 

801 def _boundsForAstier(initialPars, lowers=[], uppers=[]): 

802 if not len(lowers): 

803 lowers = [np.NINF for p in initialPars] 

804 if not len(uppers): 

805 uppers = [np.inf for p in initialPars] 

806 return (lowers, uppers) 

807 

808 @staticmethod 

809 def _getInitialGoodPoints(means, variances, minVarPivotSearch, consecutivePointsVarDecreases): 

810 """Return a boolean array to mask bad points. 

811 

812 Parameters 

813 ---------- 

814 means : `numpy.array` 

815 Input array with mean signal values. 

816 variances : `numpy.array` 

817 Input array with variances at each mean value. 

818 minVarPivotSearch : `float` 

819 The variance (in ADU^2), above which, the point 

820 of decreasing variance should be sought. 

821 consecutivePointsVarDecreases : `int` 

822 Required number of consecutive points/fluxes 

823 in the PTC where the variance 

824 decreases in order to find a first 

825 estimate of the PTC turn-off. 

826 

827 Returns 

828 ------ 

829 goodPoints : `numpy.array` [`bool`] 

830 Boolean array to select good (`True`) and bad (`False`) 

831 points. 

832 

833 Notes 

834 ----- 

835 Eliminate points beyond which the variance decreases. 

836 """ 

837 goodPoints = np.ones_like(means, dtype=bool) 

838 # Variances are sorted and should monotonically increase 

839 pivotList = np.where(np.array(np.diff(variances)) < 0)[0] 

840 if len(pivotList) > 0: 

841 # For small values, sometimes the variance decreases slightly 

842 # Only look when var > self.config.minVarPivotSearch 

843 pivotList = [p for p in pivotList if variances[p] > minVarPivotSearch] 

844 # Require that the varince decreases during 

845 # consecutivePointsVarDecreases 

846 # consecutive points. This will give a first 

847 # estimate of the PTC turn-off, which 

848 # may be updated (reduced) further in the code. 

849 if len(pivotList) > 1: 

850 # enumerate(pivotList) creates tuples (index, value), for 

851 # each value in pivotList. The lambda function subtracts 

852 # each value from the index. 

853 # groupby groups elements by equal key value. 

854 for k, g in groupby(enumerate(pivotList), lambda x: x[0]-x[1]): 

855 group = (map(itemgetter(1), g)) 

856 # Form groups of consecute values from pivotList 

857 group = list(map(int, group)) 

858 # values in pivotList are indices where np.diff(variances) 

859 # is negative, i.e., where the variance starts decreasing. 

860 # Find the first group of consecutive numbers when 

861 # variance decreases. 

862 if len(group) >= consecutivePointsVarDecreases: 

863 pivotIndex = np.min(group) 

864 goodPoints[pivotIndex+1:] = False 

865 break 

866 

867 # Finally, we filter out any infinities or NaNs. 

868 goodPoints[(~np.isfinite(means)) | (~np.isfinite(variances))] = False 

869 

870 return goodPoints 

871 

872 def _makeZeroSafe(self, array, substituteValue=1e-9): 

873 """""" 

874 array = np.array(array) 

875 nBad = Counter(np.ravel(array))[0] 

876 if nBad == 0: 

877 return array 

878 

879 index, = np.where(array == 0) 

880 if len(index): 

881 msg = f"Found {nBad} zeros in array at elements {index}" 

882 self.log.warning(msg) 

883 

884 array[index] = substituteValue 

885 

886 return array 

887 

888 def fitPtc(self, dataset): 

889 """Fit the photon transfer curve to a polynomial or to the 

890 Astier+19 approximation (Eq. 16). 

891 

892 Fit the photon transfer curve with either a polynomial of 

893 the order specified in the task config, or using the 

894 exponential approximation in Astier+19 (Eq. 16). 

895 

896 Sigma clipping is performed iteratively for the fit, as 

897 well as an initial clipping of data points that are more 

898 than `config.initialNonLinearityExclusionThreshold` away 

899 from lying on a straight line. This other step is necessary 

900 because the photon transfer curve turns over catastrophically 

901 at very high flux (because saturation 

902 drops the variance to ~0) and these far outliers cause the 

903 initial fit to fail, meaning the sigma cannot be calculated 

904 to perform the sigma-clipping. 

905 

906 Parameters 

907 ---------- 

908 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

909 The dataset containing the means, variances and 

910 exposure times. 

911 

912 Returns 

913 ------- 

914 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

915 This is the same dataset as the input parameter, however, 

916 it has been modified to include information such as the 

917 fit vectors and the fit parameters. See the class 

918 `PhotonTransferCurveDatase`. 

919 

920 Raises 

921 ------ 

922 RuntimeError 

923 Raised if dataset.ptcFitType is None or empty. 

924 """ 

925 if dataset.ptcFitType: 

926 ptcFitType = dataset.ptcFitType 

927 else: 

928 raise RuntimeError("ptcFitType is None of empty in PTC dataset.") 

929 matrixSide = self.config.maximumRangeCovariancesAstier 

930 nanMatrix = np.empty((matrixSide, matrixSide)) 

931 nanMatrix[:] = np.nan 

932 

933 for amp in dataset.ampNames: 

934 lenInputTimes = len(dataset.rawExpTimes[amp]) 

935 listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide)) 

936 listNanMatrix[:] = np.nan 

937 

938 dataset.covariancesModel[amp] = listNanMatrix 

939 dataset.aMatrix[amp] = nanMatrix 

940 dataset.bMatrix[amp] = nanMatrix 

941 dataset.covariancesModelNoB[amp] = listNanMatrix 

942 dataset.aMatrixNoB[amp] = nanMatrix 

943 

944 def errFunc(p, x, y): 

945 return ptcFunc(p, x) - y 

946 

947 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers 

948 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers 

949 

950 for i, ampName in enumerate(dataset.ampNames): 

951 timeVecOriginal = dataset.rawExpTimes[ampName] 

952 meanVecOriginal = dataset.rawMeans[ampName] 

953 varVecOriginal = dataset.rawVars[ampName] 

954 varVecOriginal = self._makeZeroSafe(varVecOriginal) 

955 

956 if self.config.doLegacyTurnoffAndOutlierSelection: 

957 # Discard points when the variance starts to decrease after two 

958 # consecutive signal levels 

959 goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal, 

960 self.config.minVarPivotSearch, 

961 self.config.consecutivePointsVarDecreases) 

962 else: 

963 goodPoints = dataset.expIdMask[ampName] 

964 

965 # Check if all points are bad from the 'cpExtractPtcTask' 

966 initialExpIdMask = dataset.expIdMask[ampName] 

967 

968 if not (goodPoints.any() and initialExpIdMask.any()): 

969 msg = (f"SERIOUS: All points in goodPoints: {goodPoints} or " 

970 f"in initialExpIdMask: {initialExpIdMask} are bad." 

971 f"Setting {ampName} to BAD.") 

972 self.log.warning(msg) 

973 # Fill entries with NaNs 

974 self.fillBadAmp(dataset, ptcFitType, ampName) 

975 continue 

976 

977 # Save the point where the variance starts decreasing as the 

978 # PTC turnoff point 

979 ptcTurnoff = meanVecOriginal[goodPoints][-1] 

980 dataset.ptcTurnoff[ampName] = ptcTurnoff 

981 

982 mask = goodPoints 

983 

984 if ptcFitType == 'EXPAPPROXIMATION': 

985 ptcFunc = funcAstier 

986 parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noise^2 

987 # lowers and uppers obtained from BOT data studies by 

988 # C. Lage (UC Davis, 11/2020). 

989 if self.config.binSize > 1: 

990 bounds = self._boundsForAstier(parsIniPtc) 

991 else: 

992 bounds = self._boundsForAstier(parsIniPtc, lowers=[-1e-4, 0.5, -2000], 

993 uppers=[1e-4, 2.5, 2000]) 

994 if ptcFitType == 'POLYNOMIAL': 

995 ptcFunc = funcPolynomial 

996 parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1) 

997 bounds = self._boundsForPolynomial(parsIniPtc) 

998 

999 if self.config.doLegacyTurnoffAndOutlierSelection: 

1000 # Before bootstrap fit, do an iterative fit to get rid of 

1001 # outliers. This further process of outlier rejection be 

1002 # skipped if self.config.maxIterationsPtcOutliers = 0. 

1003 # We already did some initial outlier rejection above in 

1004 # self._getInitialGoodPoints. 

1005 count = 1 

1006 newMask = np.ones_like(meanVecOriginal, dtype=bool) 

1007 pars = parsIniPtc 

1008 while count <= maxIterationsPtcOutliers: 

1009 # Note that application of the mask actually shrinks the 

1010 # array to size rather than setting elements to zero (as we 

1011 # want) so always update mask itself and re-apply to the 

1012 # original data. 

1013 meanTempVec = meanVecOriginal[mask] 

1014 varTempVec = varVecOriginal[mask] 

1015 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec)) 

1016 pars = res.x 

1017 

1018 # change this to the original from the temp because 

1019 # the masks are ANDed meaning once a point is masked 

1020 # it's always masked, and the masks must always be the 

1021 # same length for broadcasting 

1022 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal) 

1023 newMask = np.array([True if np.abs(r) < sigmaCutPtcOutliers else False 

1024 for r in sigResids]) 

1025 mask = mask & newMask 

1026 if not (mask.any() and newMask.any()): 

1027 msg = (f"SERIOUS: All points in either mask: {mask} or newMask: {newMask} are bad. " 

1028 f"Setting {ampName} to BAD.") 

1029 self.log.warning(msg) 

1030 # Fill entries with NaNs 

1031 self.fillBadAmp(dataset, ptcFitType, ampName) 

1032 break 

1033 nDroppedTotal = Counter(mask)[False] 

1034 self.log.debug("Iteration %d: discarded %d points in total for %s", 

1035 count, nDroppedTotal, ampName) 

1036 count += 1 

1037 # objects should never shrink 

1038 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal)) 

1039 else: 

1040 pars = parsIniPtc 

1041 meanTempVec = meanVecOriginal[mask] 

1042 varTempVec = varVecOriginal[mask] 

1043 res = least_squares( 

1044 errFunc, 

1045 parsIniPtc, 

1046 bounds=bounds, 

1047 args=(meanVecOriginal[mask], varVecOriginal[mask]), 

1048 ) 

1049 pars = res.x 

1050 newMask = mask 

1051 

1052 if not (mask.any() and newMask.any()): 

1053 continue 

1054 dataset.expIdMask[ampName] = np.array(dataset.expIdMask[ampName]) 

1055 # store the final mask 

1056 if len(dataset.expIdMask[ampName]): 

1057 dataset.expIdMask[ampName] &= mask # bitwise_and if there is already a mask 

1058 else: 

1059 dataset.expIdMask[ampName] = mask 

1060 # In case there was a previous mask stored 

1061 mask = dataset.expIdMask[ampName] 

1062 parsIniPtc = pars 

1063 meanVecFinal = meanVecOriginal[mask] 

1064 varVecFinal = varVecOriginal[mask] 

1065 

1066 if Counter(mask)[False] > 0: 

1067 self.log.info("Number of points discarded in PTC of amplifier %s:" 

1068 " %d out of %d", ampName, Counter(mask)[False], len(meanVecOriginal)) 

1069 

1070 if (len(meanVecFinal) < len(parsIniPtc)): 

1071 msg = (f"SERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of " 

1072 f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.") 

1073 self.log.warning(msg) 

1074 # Fill entries with NaNs 

1075 self.fillBadAmp(dataset, ptcFitType, ampName) 

1076 continue 

1077 # Fit the PTC. 

1078 # The variance of the variance is Var(v)=2*v^2/Npix. This is 

1079 # already calculated in `makeCovArray` of CpPtcExtract. 

1080 # dataset.covariancesSqrtWeights[ampName][:,0,0] 

1081 # has 1/sqrt(Var(v)). 

1082 weightsY = dataset.covariancesSqrtWeights[ampName][:, 0, 0][mask] 

1083 if self.config.doFitBootstrap: 

1084 parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal, 

1085 varVecFinal, ptcFunc, 

1086 weightsY=weightsY) 

1087 else: 

1088 parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal, 

1089 varVecFinal, ptcFunc, 

1090 weightsY=weightsY) 

1091 dataset.ptcFitPars[ampName] = parsFit 

1092 dataset.ptcFitParsError[ampName] = parsFitErr 

1093 dataset.ptcFitChiSq[ampName] = reducedChiSqPtc 

1094 

1095 dataset.finalVars[ampName] = varVecOriginal 

1096 dataset.finalVars[ampName][~mask] = np.nan 

1097 dataset.finalModelVars[ampName] = ptcFunc(parsFit, meanVecOriginal) 

1098 dataset.finalModelVars[ampName][~mask] = np.nan 

1099 dataset.finalMeans[ampName] = meanVecOriginal 

1100 dataset.finalMeans[ampName][~mask] = np.nan 

1101 

1102 if ptcFitType == 'EXPAPPROXIMATION': 

1103 ptcGain = parsFit[1] 

1104 ptcGainErr = parsFitErr[1] 

1105 ptcNoise = np.sqrt(np.fabs(parsFit[2])) 

1106 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2])) 

1107 if ptcFitType == 'POLYNOMIAL': 

1108 ptcGain = 1./parsFit[1] 

1109 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1]) 

1110 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain 

1111 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain 

1112 dataset.gain[ampName] = ptcGain 

1113 dataset.gainErr[ampName] = ptcGainErr 

1114 dataset.noise[ampName] = ptcNoise 

1115 dataset.noiseErr[ampName] = ptcNoiseErr 

1116 

1117 if not len(dataset.ptcFitType) == 0: 

1118 dataset.ptcFitType = ptcFitType 

1119 if len(dataset.badAmps) == 0: 

1120 dataset.badAmps = [] 

1121 

1122 return dataset 

1123 

1124 def fillBadAmp(self, dataset, ptcFitType, ampName): 

1125 """Fill the dataset with NaNs if there are not enough 

1126 good points. 

1127 

1128 Parameters 

1129 ---------- 

1130 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

1131 The dataset containing the means, variances and 

1132 exposure times. 

1133 ptcFitType : {'POLYNOMIAL', 'EXPAPPROXIMATION'} 

1134 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or 

1135 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC. 

1136 ampName : `str` 

1137 Amplifier name. 

1138 """ 

1139 dataset.badAmps.append(ampName) 

1140 dataset.expIdMask[ampName] = np.repeat(False, len(dataset.rawExpTimes[ampName])) 

1141 dataset.gain[ampName] = np.nan 

1142 dataset.gainErr[ampName] = np.nan 

1143 dataset.noise[ampName] = np.nan 

1144 dataset.noiseErr[ampName] = np.nan 

1145 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if 

1146 ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3)) 

1147 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if 

1148 ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3)) 

1149 dataset.ptcFitChiSq[ampName] = np.nan 

1150 dataset.ptcTurnoff[ampName] = np.nan 

1151 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

1152 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

1153 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName])) 

1154 

1155 return