Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22 

23__all__ = ['MeasurePhotonTransferCurveTask', 

24 'MeasurePhotonTransferCurveTaskConfig', 

25 'PhotonTransferCurveDataset'] 

26 

27import numpy as np 

28import matplotlib.pyplot as plt 

29from sqlite3 import OperationalError 

30from collections import Counter 

31from dataclasses import dataclass 

32 

33import lsst.afw.math as afwMath 

34import lsst.pex.config as pexConfig 

35import lsst.pipe.base as pipeBase 

36from .utils import (NonexistentDatasetTaskDataIdContainer, PairedVisitListTaskRunner, 

37 checkExpLengthEqual, fitLeastSq, fitBootstrap, funcPolynomial, funcAstier) 

38from scipy.optimize import least_squares 

39 

40from lsst.ip.isr.linearize import Linearizer 

41import datetime 

42 

43from .astierCovPtcUtils import (fftSize, CovFft, computeCovDirect, fitData) 

44 

45 

46class MeasurePhotonTransferCurveTaskConfig(pexConfig.Config): 

47 """Config class for photon transfer curve measurement task""" 

48 ccdKey = pexConfig.Field( 

49 dtype=str, 

50 doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'.", 

51 default='ccd', 

52 ) 

53 ptcFitType = pexConfig.ChoiceField( 

54 dtype=str, 

55 doc="Fit PTC to approximation in Astier+19 (Equation 16) or to a polynomial.", 

56 default="POLYNOMIAL", 

57 allowed={ 

58 "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').", 

59 "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).", 

60 "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)" 

61 } 

62 ) 

63 maximumRangeCovariancesAstier = pexConfig.Field( 

64 dtype=int, 

65 doc="Maximum range of covariances as in Astier+19", 

66 default=8, 

67 ) 

68 covAstierRealSpace = pexConfig.Field( 

69 dtype=bool, 

70 doc="Calculate covariances in real space or via FFT? (see appendix A of Astier+19).", 

71 default=False, 

72 ) 

73 polynomialFitDegree = pexConfig.Field( 

74 dtype=int, 

75 doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.", 

76 default=3, 

77 ) 

78 doCreateLinearizer = pexConfig.Field( 

79 dtype=bool, 

80 doc="Calculate non-linearity and persist linearizer?", 

81 default=False, 

82 ) 

83 linearizerType = pexConfig.ChoiceField( 

84 dtype=str, 

85 doc="Linearizer type, if doCreateLinearizer=True", 

86 default="LINEARIZEPOLYNOMIAL", 

87 allowed={ 

88 "LINEARIZEPOLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegreeNonLinearity' to set 'n').", 

89 "LINEARIZESQUARED": "c0 quadratic coefficient derived from coefficients of polynomiual fit", 

90 "LOOKUPTABLE": "Loouk table formed from linear part of polynomial fit." 

91 } 

92 ) 

93 polynomialFitDegreeNonLinearity = pexConfig.Field( 

94 dtype=int, 

95 doc="If doCreateLinearizer, degree of polynomial to fit the meanSignal vs exposureTime" + 

96 " curve to produce the table for LinearizeLookupTable.", 

97 default=3, 

98 ) 

99 binSize = pexConfig.Field( 

100 dtype=int, 

101 doc="Bin the image by this factor in both dimensions.", 

102 default=1, 

103 ) 

104 minMeanSignal = pexConfig.Field( 

105 dtype=float, 

106 doc="Minimum value (inclusive) of mean signal (in DN) above which to consider.", 

107 default=0, 

108 ) 

109 maxMeanSignal = pexConfig.Field( 

110 dtype=float, 

111 doc="Maximum value (inclusive) of mean signal (in DN) below which to consider.", 

112 default=9e6, 

113 ) 

114 initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField( 

115 dtype=float, 

116 doc="Initially exclude data points with a variance that are more than a factor of this from being" 

117 " linear in the positive direction, from the PTC fit. Note that these points will also be" 

118 " excluded from the non-linearity fit. This is done before the iterative outlier rejection," 

119 " to allow an accurate determination of the sigmas for said iterative fit.", 

120 default=0.12, 

121 min=0.0, 

122 max=1.0, 

123 ) 

124 initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField( 

125 dtype=float, 

126 doc="Initially exclude data points with a variance that are more than a factor of this from being" 

127 " linear in the negative direction, from the PTC fit. Note that these points will also be" 

128 " excluded from the non-linearity fit. This is done before the iterative outlier rejection," 

129 " to allow an accurate determination of the sigmas for said iterative fit.", 

130 default=0.25, 

131 min=0.0, 

132 max=1.0, 

133 ) 

134 sigmaCutPtcOutliers = pexConfig.Field( 

135 dtype=float, 

136 doc="Sigma cut for outlier rejection in PTC.", 

137 default=5.0, 

138 ) 

139 nSigmaClipPtc = pexConfig.Field( 

140 dtype=float, 

141 doc="Sigma cut for afwMath.StatisticsControl()", 

142 default=5.5, 

143 ) 

144 nIterSigmaClipPtc = pexConfig.Field( 

145 dtype=int, 

146 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()", 

147 default=1, 

148 ) 

149 maxIterationsPtcOutliers = pexConfig.Field( 

150 dtype=int, 

151 doc="Maximum number of iterations for outlier rejection in PTC.", 

152 default=2, 

153 ) 

154 doFitBootstrap = pexConfig.Field( 

155 dtype=bool, 

156 doc="Use bootstrap for the PTC fit parameters and errors?.", 

157 default=False, 

158 ) 

159 maxAduForLookupTableLinearizer = pexConfig.Field( 

160 dtype=int, 

161 doc="Maximum DN value for the LookupTable linearizer.", 

162 default=2**18, 

163 ) 

164 instrumentName = pexConfig.Field( 

165 dtype=str, 

166 doc="Instrument name.", 

167 default='', 

168 ) 

169 

170 

171@dataclass 

172class LinearityResidualsAndLinearizersDataset: 

173 """A simple class to hold the output from the 

174 `calculateLinearityResidualAndLinearizers` function. 

175 """ 

176 # Normalized coefficients for polynomial NL correction 

177 polynomialLinearizerCoefficients: list 

178 # Normalized coefficient for quadratic polynomial NL correction (c0) 

179 quadraticPolynomialLinearizerCoefficient: float 

180 # LUT array row for the amplifier at hand 

181 linearizerTableRow: list 

182 meanSignalVsTimePolyFitPars: list 

183 meanSignalVsTimePolyFitParsErr: list 

184 meanSignalVsTimePolyFitReducedChiSq: float 

185 

186 

187class PhotonTransferCurveDataset: 

188 """A simple class to hold the output data from the PTC task. 

189 

190 The dataset is made up of a dictionary for each item, keyed by the 

191 amplifiers' names, which much be supplied at construction time. 

192 

193 New items cannot be added to the class to save accidentally saving to the 

194 wrong property, and the class can be frozen if desired. 

195 

196 inputVisitPairs records the visits used to produce the data. 

197 When fitPtc() or fitCovariancesAstier() is run, a mask is built up, which is by definition 

198 always the same length as inputVisitPairs, rawExpTimes, rawMeans 

199 and rawVars, and is a list of bools, which are incrementally set to False 

200 as points are discarded from the fits. 

201 

202 PTC fit parameters for polynomials are stored in a list in ascending order 

203 of polynomial term, i.e. par[0]*x^0 + par[1]*x + par[2]*x^2 etc 

204 with the length of the list corresponding to the order of the polynomial 

205 plus one. 

206 

207 Parameters 

208 ---------- 

209 ampNames : `list` 

210 List with the names of the amplifiers of the detector at hand. 

211 

212 ptcFitType : `str` 

213 Type of model fitted to the PTC: "POLYNOMIAL", "EXPAPPROXIMATION", or "FULLCOVARIANCE". 

214 

215 Returns 

216 ------- 

217 `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

218 Output dataset from MeasurePhotonTransferCurveTask. 

219 """ 

220 

221 def __init__(self, ampNames, ptcFitType): 

222 # add items to __dict__ directly because __setattr__ is overridden 

223 

224 # instance variables 

225 self.__dict__["ptcFitType"] = ptcFitType 

226 self.__dict__["ampNames"] = ampNames 

227 self.__dict__["badAmps"] = [] 

228 

229 # raw data variables 

230 # visitMask is the mask produced after outlier rejection. The mask produced by "FULLCOVARIANCE" 

231 # may differ from the one produced in the other two PTC fit types. 

232 self.__dict__["inputVisitPairs"] = {ampName: [] for ampName in ampNames} 

233 self.__dict__["visitMask"] = {ampName: [] for ampName in ampNames} 

234 self.__dict__["rawExpTimes"] = {ampName: [] for ampName in ampNames} 

235 self.__dict__["rawMeans"] = {ampName: [] for ampName in ampNames} 

236 self.__dict__["rawVars"] = {ampName: [] for ampName in ampNames} 

237 

238 # Gain and noise 

239 self.__dict__["gain"] = {ampName: -1. for ampName in ampNames} 

240 self.__dict__["gainErr"] = {ampName: -1. for ampName in ampNames} 

241 self.__dict__["noise"] = {ampName: -1. for ampName in ampNames} 

242 self.__dict__["noiseErr"] = {ampName: -1. for ampName in ampNames} 

243 

244 # if ptcFitTye in ["POLYNOMIAL", "EXPAPPROXIMATION"] 

245 # fit information 

246 self.__dict__["ptcFitPars"] = {ampName: [] for ampName in ampNames} 

247 self.__dict__["ptcFitParsError"] = {ampName: [] for ampName in ampNames} 

248 self.__dict__["ptcFitReducedChiSquared"] = {ampName: [] for ampName in ampNames} 

249 

250 # if ptcFitTye in ["FULLCOVARIANCE"] 

251 # "covariancesTuple" is a numpy recarray with entries of the form 

252 # ['mu', 'i', 'j', 'var', 'cov', 'npix', 'ext', 'expTime', 'ampName'] 

253 # "covariancesFits" has CovFit objects that fit the measured covariances to Eq. 20 of Astier+19. 

254 # In "covariancesFitsWithNoB", "b"=0 in the model described by Eq. 20 of Astier+19. 

255 self.__dict__["covariancesTuple"] = {ampName: [] for ampName in ampNames} 

256 self.__dict__["covariancesFitsWithNoB"] = {ampName: [] for ampName in ampNames} 

257 self.__dict__["covariancesFits"] = {ampName: [] for ampName in ampNames} 

258 self.__dict__["aMatrix"] = {ampName: [] for ampName in ampNames} 

259 self.__dict__["bMatrix"] = {ampName: [] for ampName in ampNames} 

260 

261 # "final" means that the "raw" vectors above had "visitMask" applied. 

262 self.__dict__["finalVars"] = {ampName: [] for ampName in ampNames} 

263 self.__dict__["finalModelVars"] = {ampName: [] for ampName in ampNames} 

264 self.__dict__["finalMeans"] = {ampName: [] for ampName in ampNames} 

265 

266 def __setattr__(self, attribute, value): 

267 """Protect class attributes""" 

268 if attribute not in self.__dict__: 

269 raise AttributeError(f"{attribute} is not already a member of PhotonTransferCurveDataset, which" 

270 " does not support setting of new attributes.") 

271 else: 

272 self.__dict__[attribute] = value 

273 

274 def getVisitsUsed(self, ampName): 

275 """Get the visits used, i.e. not discarded, for a given amp. 

276 

277 If no mask has been created yet, all visits are returned. 

278 """ 

279 if len(self.visitMask[ampName]) == 0: 

280 return self.inputVisitPairs[ampName] 

281 

282 # if the mask exists it had better be the same length as the visitPairs 

283 assert len(self.visitMask[ampName]) == len(self.inputVisitPairs[ampName]) 

284 

285 pairs = self.inputVisitPairs[ampName] 

286 mask = self.visitMask[ampName] 

287 # cast to bool required because numpy 

288 return [(v1, v2) for ((v1, v2), m) in zip(pairs, mask) if bool(m) is True] 

289 

290 def getGoodAmps(self): 

291 return [amp for amp in self.ampNames if amp not in self.badAmps] 

292 

293 

294class MeasurePhotonTransferCurveTask(pipeBase.CmdLineTask): 

295 """A class to calculate, fit, and plot a PTC from a set of flat pairs. 

296 

297 The Photon Transfer Curve (var(signal) vs mean(signal)) is a standard tool 

298 used in astronomical detectors characterization (e.g., Janesick 2001, 

299 Janesick 2007). If ptcFitType is "EXPAPPROXIMATION" or "POLYNOMIAL", this task calculates the 

300 PTC from a series of pairs of flat-field images; each pair taken at identical exposure 

301 times. The difference image of each pair is formed to eliminate fixed pattern noise, 

302 and then the variance of the difference image and the mean of the average image 

303 are used to produce the PTC. An n-degree polynomial or the approximation in Equation 

304 16 of Astier+19 ("The Shape of the Photon Transfer Curve of CCD sensors", 

305 arXiv:1905.08677) can be fitted to the PTC curve. These models include 

306 parameters such as the gain (e/DN) and readout noise. 

307 

308 Linearizers to correct for signal-chain non-linearity are also calculated. 

309 The `Linearizer` class, in general, can support per-amp linearizers, but in this 

310 task this is not supported. 

311 

312 If ptcFitType is "FULLCOVARIANCE", the covariances of the difference images are calculated via the 

313 DFT methods described in Astier+19 and the variances for the PTC are given by the cov[0,0] elements 

314 at each signal level. The full model in Equation 20 of Astier+19 is fit to the PTC to get the gain 

315 and the noise. 

316 

317 Parameters 

318 ---------- 

319 

320 *args: `list` 

321 Positional arguments passed to the Task constructor. None used at this 

322 time. 

323 **kwargs: `dict` 

324 Keyword arguments passed on to the Task constructor. None used at this 

325 time. 

326 

327 """ 

328 

329 RunnerClass = PairedVisitListTaskRunner 

330 ConfigClass = MeasurePhotonTransferCurveTaskConfig 

331 _DefaultName = "measurePhotonTransferCurve" 

332 

333 def __init__(self, *args, **kwargs): 

334 pipeBase.CmdLineTask.__init__(self, *args, **kwargs) 

335 plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too 

336 self.config.validate() 

337 self.config.freeze() 

338 

339 @classmethod 

340 def _makeArgumentParser(cls): 

341 """Augment argument parser for the MeasurePhotonTransferCurveTask.""" 

342 parser = pipeBase.ArgumentParser(name=cls._DefaultName) 

343 parser.add_argument("--visit-pairs", dest="visitPairs", nargs="*", 

344 help="Visit pairs to use. Each pair must be of the form INT,INT e.g. 123,456") 

345 parser.add_id_argument("--id", datasetType="photonTransferCurveDataset", 

346 ContainerClass=NonexistentDatasetTaskDataIdContainer, 

347 help="The ccds to use, e.g. --id ccd=0..100") 

348 return parser 

349 

350 @pipeBase.timeMethod 

351 def runDataRef(self, dataRef, visitPairs): 

352 """Run the Photon Transfer Curve (PTC) measurement task. 

353 

354 For a dataRef (which is each detector here), 

355 and given a list of visit pairs (postISR) at different exposure times, 

356 measure the PTC. 

357 

358 Parameters 

359 ---------- 

360 dataRef : list of lsst.daf.persistence.ButlerDataRef 

361 dataRef for the detector for the visits to be fit. 

362 

363 visitPairs : `iterable` of `tuple` of `int` 

364 Pairs of visit numbers to be processed together 

365 """ 

366 

367 # setup necessary objects 

368 detNum = dataRef.dataId[self.config.ccdKey] 

369 detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]] 

370 # expand some missing fields that we need for lsstCam. This is a work-around 

371 # for Gen2 problems that I (RHL) don't feel like solving. The calibs pipelines 

372 # (which inherit from CalibTask) use addMissingKeys() to do basically the same thing 

373 # 

374 # Basically, the butler's trying to look up the fields in `raw_visit` which won't work 

375 for name in dataRef.getButler().getKeys('bias'): 

376 if name not in dataRef.dataId: 

377 try: 

378 dataRef.dataId[name] = \ 

379 dataRef.getButler().queryMetadata('raw', [name], detector=detNum)[0] 

380 except OperationalError: 

381 pass 

382 

383 amps = detector.getAmplifiers() 

384 ampNames = [amp.getName() for amp in amps] 

385 datasetPtc = PhotonTransferCurveDataset(ampNames, self.config.ptcFitType) 

386 self.log.info('Measuring PTC using %s visits for detector %s' % (visitPairs, detector.getId())) 

387 

388 tupleRecords = [] 

389 allTags = [] 

390 for (v1, v2) in visitPairs: 

391 # Get postISR exposures. 

392 dataRef.dataId['expId'] = v1 

393 exp1 = dataRef.get("postISRCCD", immediate=True) 

394 dataRef.dataId['expId'] = v2 

395 exp2 = dataRef.get("postISRCCD", immediate=True) 

396 del dataRef.dataId['expId'] 

397 

398 checkExpLengthEqual(exp1, exp2, v1, v2, raiseWithMessage=True) 

399 expTime = exp1.getInfo().getVisitInfo().getExposureTime() 

400 

401 tupleRows = [] 

402 for ampNumber, amp in enumerate(detector): 

403 ampName = amp.getName() 

404 # covAstier: (i, j, var (cov[0,0]), cov, npix) 

405 doRealSpace = self.config.covAstierRealSpace 

406 muDiff, varDiff, covAstier = self.measureMeanVarCov(exp1, exp2, region=amp.getBBox(), 

407 covAstierRealSpace=doRealSpace) 

408 

409 datasetPtc.rawExpTimes[ampName].append(expTime) 

410 datasetPtc.rawMeans[ampName].append(muDiff) 

411 datasetPtc.rawVars[ampName].append(varDiff) 

412 datasetPtc.inputVisitPairs[ampName].append((v1, v2)) 

413 

414 tupleRows += [(muDiff, ) + covRow + (ampNumber, expTime, ampName) for covRow in covAstier] 

415 tags = ['mu', 'i', 'j', 'var', 'cov', 'npix', 'ext', 'expTime', 'ampName'] 

416 allTags += tags 

417 tupleRecords += tupleRows 

418 covariancesWithTags = np.core.records.fromrecords(tupleRecords, names=allTags) 

419 

420 if self.config.ptcFitType in ["FULLCOVARIANCE", ]: 

421 # Calculate covariances and fit them, including the PTC, to Astier+19 full model (Eq. 20) 

422 datasetPtc = self.fitCovariancesAstier(datasetPtc, covariancesWithTags) 

423 elif self.config.ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]: 

424 # Fit the PTC to a polynomial or to Astier+19 exponential approximation (Eq. 16) 

425 # Fill up PhotonTransferCurveDataset object. 

426 datasetPtc = self.fitPtc(datasetPtc, self.config.ptcFitType) 

427 

428 # Fit a poynomial to calculate non-linearity and persist linearizer. 

429 if self.config.doCreateLinearizer: 

430 numberAmps = len(amps) 

431 numberAduValues = self.config.maxAduForLookupTableLinearizer 

432 lookupTableArray = np.zeros((numberAmps, numberAduValues), dtype=np.float32) 

433 

434 # Fit (non)linearity of signal vs time curve. 

435 # Fill up PhotonTransferCurveDataset object. 

436 # Fill up array for LUT linearizer (tableArray). 

437 # Produce coefficients for Polynomial ans Squared linearizers. 

438 # Build linearizer objects. 

439 linearizer = self.fitNonLinearityAndBuildLinearizers(datasetPtc, detector, 

440 tableArray=lookupTableArray, 

441 log=self.log) 

442 

443 if self.config.linearizerType == "LINEARIZEPOLYNOMIAL": 

444 linDataType = 'linearizePolynomial' 

445 linMsg = "polynomial (coefficients for a polynomial correction)." 

446 elif self.config.linearizerType == "LINEARIZESQUARED": 

447 linDataType = 'linearizePolynomial' 

448 linMsg = "squared (c0, derived from k_i coefficients of a polynomial fit)." 

449 elif self.config.linearizerType == "LOOKUPTABLE": 

450 linDataType = 'linearizePolynomial' 

451 linMsg = "lookup table (linear component of polynomial fit)." 

452 else: 

453 raise RuntimeError("Invalid config.linearizerType {selg.config.linearizerType}. " 

454 "Supported: 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'") 

455 

456 butler = dataRef.getButler() 

457 self.log.info(f"Writing linearizer: \n {linMsg}") 

458 

459 detName = detector.getName() 

460 now = datetime.datetime.utcnow() 

461 calibDate = now.strftime("%Y-%m-%d") 

462 

463 butler.put(linearizer, datasetType=linDataType, dataId={'detector': detNum, 

464 'detectorName': detName, 'calibDate': calibDate}) 

465 

466 self.log.info(f"Writing PTC data to {dataRef.getUri(write=True)}") 

467 dataRef.put(datasetPtc, datasetType="photonTransferCurveDataset") 

468 

469 return pipeBase.Struct(exitStatus=0) 

470 

471 def fitCovariancesAstier(self, dataset, covariancesWithTagsArray): 

472 """Fit measured flat covariances to full model in Astier+19. 

473 

474 Parameters 

475 ---------- 

476 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

477 The dataset containing information such as the means, variances and exposure times. 

478 

479 covariancesWithTagsArray : `numpy.recarray` 

480 Tuple with at least (mu, cov, var, i, j, npix), where: 

481 mu : 0.5*(m1 + m2), where: 

482 mu1: mean value of flat1 

483 mu2: mean value of flat2 

484 cov: covariance value at lag(i, j) 

485 var: variance(covariance value at lag(0, 0)) 

486 i: lag dimension 

487 j: lag dimension 

488 npix: number of pixels used for covariance calculation. 

489 

490 Returns 

491 ------- 

492 dataset: `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

493 This is the same dataset as the input paramter, however, it has been modified 

494 to include information such as the fit vectors and the fit parameters. See 

495 the class `PhotonTransferCurveDatase`. 

496 """ 

497 

498 covFits, covFitsNoB = fitData(covariancesWithTagsArray, maxMu=self.config.maxMeanSignal, 

499 r=self.config.maximumRangeCovariancesAstier) 

500 

501 dataset.covariancesTuple = covariancesWithTagsArray 

502 dataset.covariancesFits = covFits 

503 dataset.covariancesFitsWithNoB = covFitsNoB 

504 dataset = self.getOutputPtcDataCovAstier(dataset, covFits) 

505 

506 return dataset 

507 

508 def getOutputPtcDataCovAstier(self, dataset, covFits): 

509 """Get output data for PhotonTransferCurveCovAstierDataset from CovFit objects. 

510 

511 Parameters 

512 ---------- 

513 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

514 The dataset containing information such as the means, variances and exposure times. 

515 

516 covFits: `dict` 

517 Dictionary of CovFit objects, with amp names as keys. 

518 

519 Returns 

520 ------- 

521 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

522 This is the same dataset as the input paramter, however, it has been modified 

523 to include extra information such as the mask 1D array, gains, reoudout noise, measured signal, 

524 measured variance, modeled variance, a, and b coefficient matrices (see Astier+19) per amplifier. 

525 See the class `PhotonTransferCurveDatase`. 

526 """ 

527 

528 for i, amp in enumerate(covFits): 

529 fit = covFits[amp] 

530 meanVecFinal, varVecFinal, varVecModel, wc = fit.getNormalizedFitData(0, 0, divideByMu=False) 

531 gain = fit.getGain() 

532 dataset.visitMask[amp] = fit.getMaskVar() 

533 dataset.gain[amp] = gain 

534 dataset.gainErr[amp] = fit.getGainErr() 

535 dataset.noise[amp] = np.sqrt(np.fabs(fit.getRon())) 

536 dataset.noiseErr[amp] = fit.getRonErr() 

537 dataset.finalVars[amp].append(varVecFinal/(gain**2)) 

538 dataset.finalModelVars[amp].append(varVecModel/(gain**2)) 

539 dataset.finalMeans[amp].append(meanVecFinal/gain) 

540 dataset.aMatrix[amp].append(fit.getA()) 

541 dataset.bMatrix[amp].append(fit.getB()) 

542 

543 return dataset 

544 

545 def measureMeanVarCov(self, exposure1, exposure2, region=None, covAstierRealSpace=False): 

546 """Calculate the mean of each of two exposures and the variance and covariance of their difference. 

547 

548 The variance is calculated via afwMath, and the covariance via the methods in Astier+19 (appendix A). 

549 In theory, var = covariance[0,0]. This should be validated, and in the future, we may decide to just 

550 keep one (covariance). 

551 

552 Parameters 

553 ---------- 

554 exposure1 : `lsst.afw.image.exposure.exposure.ExposureF` 

555 First exposure of flat field pair. 

556 

557 exposure2 : `lsst.afw.image.exposure.exposure.ExposureF` 

558 Second exposure of flat field pair. 

559 

560 region : `lsst.geom.Box2I`, optional 

561 Region of each exposure where to perform the calculations (e.g, an amplifier). 

562 

563 covAstierRealSpace : `bool`, optional 

564 Should the covariannces in Astier+19 be calculated in real space or via FFT? 

565 See Appendix A of Astier+19. 

566 

567 Returns 

568 ------- 

569 mu : `float` 

570 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means of the regions in 

571 both exposures. 

572 

573 varDiff : `float` 

574 Half of the clipped variance of the difference of the regions inthe two input 

575 exposures. 

576 

577 covDiffAstier : `list` 

578 List with tuples of the form (dx, dy, var, cov, npix), where: 

579 dx : `int` 

580 Lag in x 

581 dy : `int` 

582 Lag in y 

583 var : `float` 

584 Variance at (dx, dy). 

585 cov : `float` 

586 Covariance at (dx, dy). 

587 nPix : `int` 

588 Number of pixel pairs used to evaluate var and cov. 

589 """ 

590 

591 if region is not None: 

592 im1Area = exposure1.maskedImage[region] 

593 im2Area = exposure2.maskedImage[region] 

594 else: 

595 im1Area = exposure1.maskedImage 

596 im2Area = exposure2.maskedImage 

597 

598 im1Area = afwMath.binImage(im1Area, self.config.binSize) 

599 im2Area = afwMath.binImage(im2Area, self.config.binSize) 

600 

601 statsCtrl = afwMath.StatisticsControl() 

602 statsCtrl.setNumSigmaClip(self.config.nSigmaClipPtc) 

603 statsCtrl.setNumIter(self.config.nIterSigmaClipPtc) 

604 # Clipped mean of images; then average of mean. 

605 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, statsCtrl).getValue() 

606 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, statsCtrl).getValue() 

607 mu = 0.5*(mu1 + mu2) 

608 

609 # Take difference of pairs 

610 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2)) 

611 temp = im2Area.clone() 

612 temp *= mu1 

613 diffIm = im1Area.clone() 

614 diffIm *= mu2 

615 diffIm -= temp 

616 diffIm /= mu 

617 

618 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, statsCtrl).getValue()) 

619 

620 # Get the mask and identify good pixels as '1', and the rest as '0'. 

621 w1 = np.where(im1Area.getMask().getArray() == 0, 1, 0) 

622 w2 = np.where(im2Area.getMask().getArray() == 0, 1, 0) 

623 

624 w12 = w1*w2 

625 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0) 

626 w = w12*wDiff 

627 

628 maxRangeCov = self.config.maximumRangeCovariancesAstier 

629 if covAstierRealSpace: 

630 covDiffAstier = computeCovDirect(diffIm.getImage().getArray(), w, maxRangeCov) 

631 else: 

632 shapeDiff = diffIm.getImage().getArray().shape 

633 fftShape = (fftSize(shapeDiff[0] + maxRangeCov), fftSize(shapeDiff[1]+maxRangeCov)) 

634 c = CovFft(diffIm.getImage().getArray(), w, fftShape, maxRangeCov) 

635 covDiffAstier = c.reportCovFft(maxRangeCov) 

636 

637 return mu, varDiff, covDiffAstier 

638 

639 def computeCovDirect(self, diffImage, weightImage, maxRange): 

640 """Compute covariances of diffImage in real space. 

641 

642 For lags larger than ~25, it is slower than the FFT way. 

643 Taken from https://github.com/PierreAstier/bfptc/ 

644 

645 Parameters 

646 ---------- 

647 diffImage : `numpy.array` 

648 Image to compute the covariance of. 

649 

650 weightImage : `numpy.array` 

651 Weight image of diffImage (1's and 0's for good and bad pixels, respectively). 

652 

653 maxRange : `int` 

654 Last index of the covariance to be computed. 

655 

656 Returns 

657 ------- 

658 outList : `list` 

659 List with tuples of the form (dx, dy, var, cov, npix), where: 

660 dx : `int` 

661 Lag in x 

662 dy : `int` 

663 Lag in y 

664 var : `float` 

665 Variance at (dx, dy). 

666 cov : `float` 

667 Covariance at (dx, dy). 

668 nPix : `int` 

669 Number of pixel pairs used to evaluate var and cov. 

670 """ 

671 outList = [] 

672 var = 0 

673 # (dy,dx) = (0,0) has to be first 

674 for dy in range(maxRange + 1): 

675 for dx in range(0, maxRange + 1): 

676 if (dx*dy > 0): 

677 cov1, nPix1 = self.covDirectValue(diffImage, weightImage, dx, dy) 

678 cov2, nPix2 = self.covDirectValue(diffImage, weightImage, dx, -dy) 

679 cov = 0.5*(cov1 + cov2) 

680 nPix = nPix1 + nPix2 

681 else: 

682 cov, nPix = self.covDirectValue(diffImage, weightImage, dx, dy) 

683 if (dx == 0 and dy == 0): 

684 var = cov 

685 outList.append((dx, dy, var, cov, nPix)) 

686 

687 return outList 

688 

689 def covDirectValue(self, diffImage, weightImage, dx, dy): 

690 """Compute covariances of diffImage in real space at lag (dx, dy). 

691 

692 Taken from https://github.com/PierreAstier/bfptc/ (c.f., appendix of Astier+19). 

693 

694 Parameters 

695 ---------- 

696 diffImage : `numpy.array` 

697 Image to compute the covariance of. 

698 

699 weightImage : `numpy.array` 

700 Weight image of diffImage (1's and 0's for good and bad pixels, respectively). 

701 

702 dx : `int` 

703 Lag in x. 

704 

705 dy : `int` 

706 Lag in y. 

707 

708 Returns 

709 ------- 

710 cov : `float` 

711 Covariance at (dx, dy) 

712 

713 nPix : `int` 

714 Number of pixel pairs used to evaluate var and cov. 

715 """ 

716 (nCols, nRows) = diffImage.shape 

717 # switching both signs does not change anything: 

718 # it just swaps im1 and im2 below 

719 if (dx < 0): 

720 (dx, dy) = (-dx, -dy) 

721 # now, we have dx >0. We have to distinguish two cases 

722 # depending on the sign of dy 

723 if dy >= 0: 

724 im1 = diffImage[dy:, dx:] 

725 w1 = weightImage[dy:, dx:] 

726 im2 = diffImage[:nCols - dy, :nRows - dx] 

727 w2 = weightImage[:nCols - dy, :nRows - dx] 

728 else: 

729 im1 = diffImage[:nCols + dy, dx:] 

730 w1 = weightImage[:nCols + dy, dx:] 

731 im2 = diffImage[-dy:, :nRows - dx] 

732 w2 = weightImage[-dy:, :nRows - dx] 

733 # use the same mask for all 3 calculations 

734 wAll = w1*w2 

735 # do not use mean() because weightImage=0 pixels would then count 

736 nPix = wAll.sum() 

737 im1TimesW = im1*wAll 

738 s1 = im1TimesW.sum()/nPix 

739 s2 = (im2*wAll).sum()/nPix 

740 p = (im1TimesW*im2).sum()/nPix 

741 cov = p - s1*s2 

742 

743 return cov, nPix 

744 

745 def fitNonLinearityAndBuildLinearizers(self, datasetPtc, detector, tableArray=None, log=None): 

746 """Fit non-linearity function and build linearizer objects. 

747 

748 Parameters 

749 ---------- 

750 datasePtct : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

751 The dataset containing information such as the means, variances and exposure times. 

752 nLinearity 

753 

754 detector : `lsst.afw.cameraGeom.Detector` 

755 Detector object. 

756 

757 tableArray : `np.array`, optional 

758 Optional. Look-up table array with size rows=nAmps and columns=DN values. 

759 It will be modified in-place if supplied. 

760 

761 log : `lsst.log.Log`, optional 

762 Logger to handle messages. 

763 

764 Returns 

765 ------- 

766 linearizer : `lsst.ip.isr.Linearizer` 

767 Linearizer object 

768 """ 

769 

770 # Fit NonLinearity 

771 datasetNonLinearity = self.fitNonLinearity(datasetPtc, tableArray=tableArray) 

772 

773 # Produce linearizer 

774 now = datetime.datetime.utcnow() 

775 calibDate = now.strftime("%Y-%m-%d") 

776 linType = self.config.linearizerType 

777 

778 if linType == "LOOKUPTABLE": 

779 tableArray = tableArray 

780 else: 

781 tableArray = None 

782 

783 linearizer = self.buildLinearizerObject(datasetNonLinearity, detector, calibDate, linType, 

784 instruName=self.config.instrumentName, 

785 tableArray=tableArray, 

786 log=log) 

787 

788 return linearizer 

789 

790 def fitNonLinearity(self, datasetPtc, tableArray=None): 

791 """Fit a polynomial to signal vs effective time curve to calculate linearity and residuals. 

792 

793 Parameters 

794 ---------- 

795 datasetPtc : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

796 The dataset containing the means, variances and exposure times. 

797 

798 tableArray : `np.array` 

799 Optional. Look-up table array with size rows=nAmps and columns=DN values. 

800 It will be modified in-place if supplied. 

801 

802 Returns 

803 ------- 

804 datasetNonLinearity : `dict` 

805 Dictionary of `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset` 

806 dataclasses. Each one holds the output of `calculateLinearityResidualAndLinearizers` per 

807 amplifier. 

808 """ 

809 datasetNonLinearity = {ampName: [] for ampName in datasetPtc.ampNames} 

810 for i, ampName in enumerate(datasetPtc.ampNames): 

811 # If a mask is not found, use all points. 

812 if (len(datasetPtc.visitMask[ampName]) == 0): 

813 self.log.warn(f"Mask not found for {ampName} in non-linearity fit. Using all points.") 

814 mask = np.repeat(True, len(datasetPtc.rawExpTimes[ampName])) 

815 else: 

816 mask = datasetPtc.visitMask[ampName] 

817 

818 timeVecFinal = np.array(datasetPtc.rawExpTimes[ampName])[mask] 

819 meanVecFinal = np.array(datasetPtc.rawMeans[ampName])[mask] 

820 

821 # Non-linearity residuals (NL of mean vs time curve): percentage, and fit to a quadratic function 

822 # In this case, len(parsIniNonLinearity) = 3 indicates that we want a quadratic fit 

823 datasetLinRes = self.calculateLinearityResidualAndLinearizers(timeVecFinal, meanVecFinal) 

824 

825 # LinearizerLookupTable 

826 if tableArray is not None: 

827 tableArray[i, :] = datasetLinRes.linearizerTableRow 

828 

829 datasetNonLinearity[ampName] = datasetLinRes 

830 

831 return datasetNonLinearity 

832 

833 def calculateLinearityResidualAndLinearizers(self, exposureTimeVector, meanSignalVector): 

834 """Calculate linearity residual and fit an n-order polynomial to the mean vs time curve 

835 to produce corrections (deviation from linear part of polynomial) for a particular amplifier 

836 to populate LinearizeLookupTable. 

837 Use the coefficients of this fit to calculate the correction coefficients for LinearizePolynomial 

838 and LinearizeSquared." 

839 

840 Parameters 

841 --------- 

842 

843 exposureTimeVector: `list` of `float` 

844 List of exposure times for each flat pair 

845 

846 meanSignalVector: `list` of `float` 

847 List of mean signal from diference image of flat pairs 

848 

849 Returns 

850 ------- 

851 dataset : `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset` 

852 The dataset containing the fit parameters, the NL correction coefficients, and the 

853 LUT row for the amplifier at hand. 

854 

855 Notes 

856 ----- 

857 datase members: 

858 

859 dataset.polynomialLinearizerCoefficients : `list` of `float` 

860 Coefficients for LinearizePolynomial, where corrImage = uncorrImage + sum_i c_i uncorrImage^(2 + 

861 i). 

862 c_(j-2) = -k_j/(k_1^j) with units DN^(1-j) (c.f., Eq. 37 of 2003.05978). The units of k_j are 

863 DN/t^j, and they are fit from meanSignalVector = k0 + k1*exposureTimeVector + 

864 k2*exposureTimeVector^2 + ... + kn*exposureTimeVector^n, with 

865 n = "polynomialFitDegreeNonLinearity". k_0 and k_1 and degenerate with bias level and gain, 

866 and are not used by the non-linearity correction. Therefore, j = 2...n in the above expression 

867 (see `LinearizePolynomial` class in `linearize.py`.) 

868 

869 dataset.quadraticPolynomialLinearizerCoefficient : `float` 

870 Coefficient for LinearizeSquared, where corrImage = uncorrImage + c0*uncorrImage^2. 

871 c0 = -k2/(k1^2), where k1 and k2 are fit from 

872 meanSignalVector = k0 + k1*exposureTimeVector + k2*exposureTimeVector^2 +... 

873 + kn*exposureTimeVector^n, with n = "polynomialFitDegreeNonLinearity". 

874 

875 dataset.linearizerTableRow : `list` of `float` 

876 One dimensional array with deviation from linear part of n-order polynomial fit 

877 to mean vs time curve. This array will be one row (for the particular amplifier at hand) 

878 of the table array for LinearizeLookupTable. 

879 

880 dataset.meanSignalVsTimePolyFitPars : `list` of `float` 

881 Parameters from n-order polynomial fit to meanSignalVector vs exposureTimeVector. 

882 

883 dataset.meanSignalVsTimePolyFitParsErr : `list` of `float` 

884 Parameters from n-order polynomial fit to meanSignalVector vs exposureTimeVector. 

885 

886 dataset.meanSignalVsTimePolyFitReducedChiSq : `float` 

887 Reduced unweighted chi squared from polynomial fit to meanSignalVector vs exposureTimeVector. 

888 """ 

889 

890 # Lookup table linearizer 

891 parsIniNonLinearity = self._initialParsForPolynomial(self.config.polynomialFitDegreeNonLinearity + 1) 

892 if self.config.doFitBootstrap: 

893 parsFit, parsFitErr, reducedChiSquaredNonLinearityFit = fitBootstrap(parsIniNonLinearity, 

894 exposureTimeVector, 

895 meanSignalVector, 

896 funcPolynomial) 

897 else: 

898 parsFit, parsFitErr, reducedChiSquaredNonLinearityFit = fitLeastSq(parsIniNonLinearity, 

899 exposureTimeVector, 

900 meanSignalVector, 

901 funcPolynomial) 

902 

903 # LinearizeLookupTable: 

904 # Use linear part to get time at wich signal is maxAduForLookupTableLinearizer DN 

905 tMax = (self.config.maxAduForLookupTableLinearizer - parsFit[0])/parsFit[1] 

906 timeRange = np.linspace(0, tMax, self.config.maxAduForLookupTableLinearizer) 

907 signalIdeal = parsFit[0] + parsFit[1]*timeRange 

908 signalUncorrected = funcPolynomial(parsFit, timeRange) 

909 linearizerTableRow = signalIdeal - signalUncorrected # LinearizerLookupTable has corrections 

910 # LinearizePolynomial and LinearizeSquared: 

911 # Check that magnitude of higher order (>= 3) coefficents of the polyFit are small, 

912 # i.e., less than threshold = 1e-10 (typical quadratic and cubic coefficents are ~1e-6 

913 # and ~1e-12). 

914 k1 = parsFit[1] 

915 polynomialLinearizerCoefficients = [] 

916 for i, coefficient in enumerate(parsFit): 

917 c = -coefficient/(k1**i) 

918 polynomialLinearizerCoefficients.append(c) 

919 if np.fabs(c) > 1e-10: 

920 msg = f"Coefficient {c} in polynomial fit larger than threshold 1e-10." 

921 self.log.warn(msg) 

922 # Coefficient for LinearizedSquared. Called "c0" in linearize.py 

923 c0 = polynomialLinearizerCoefficients[2] 

924 

925 dataset = LinearityResidualsAndLinearizersDataset([], None, [], [], [], None) 

926 dataset.polynomialLinearizerCoefficients = polynomialLinearizerCoefficients 

927 dataset.quadraticPolynomialLinearizerCoefficient = c0 

928 dataset.linearizerTableRow = linearizerTableRow 

929 dataset.meanSignalVsTimePolyFitPars = parsFit 

930 dataset.meanSignalVsTimePolyFitParsErr = parsFitErr 

931 dataset.meanSignalVsTimePolyFitReducedChiSq = reducedChiSquaredNonLinearityFit 

932 

933 return dataset 

934 

935 def buildLinearizerObject(self, datasetNonLinearity, detector, calibDate, linearizerType, instruName='', 

936 tableArray=None, log=None): 

937 """Build linearizer object to persist. 

938 

939 Parameters 

940 ---------- 

941 datasetNonLinearity : `dict` 

942 Dictionary of `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset` objects. 

943 

944 detector : `lsst.afw.cameraGeom.Detector` 

945 Detector object 

946 

947 calibDate : `datetime.datetime` 

948 Calibration date 

949 

950 linearizerType : `str` 

951 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL' 

952 

953 instruName : `str`, optional 

954 Instrument name 

955 

956 tableArray : `np.array`, optional 

957 Look-up table array with size rows=nAmps and columns=DN values 

958 

959 log : `lsst.log.Log`, optional 

960 Logger to handle messages 

961 

962 Returns 

963 ------- 

964 linearizer : `lsst.ip.isr.Linearizer` 

965 Linearizer object 

966 """ 

967 detName = detector.getName() 

968 detNum = detector.getId() 

969 if linearizerType == "LOOKUPTABLE": 

970 if tableArray is not None: 

971 linearizer = Linearizer(detector=detector, table=tableArray, log=log) 

972 else: 

973 raise RuntimeError("tableArray must be provided when creating a LookupTable linearizer") 

974 elif linearizerType in ("LINEARIZESQUARED", "LINEARIZEPOLYNOMIAL"): 

975 linearizer = Linearizer(log=log) 

976 else: 

977 raise RuntimeError("Invalid linearizerType {linearizerType} to build a Linearizer object. " 

978 "Supported: 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'") 

979 for i, amp in enumerate(detector.getAmplifiers()): 

980 ampName = amp.getName() 

981 datasetNonLinAmp = datasetNonLinearity[ampName] 

982 if linearizerType == "LOOKUPTABLE": 

983 linearizer.linearityCoeffs[ampName] = [i, 0] 

984 linearizer.linearityType[ampName] = "LookupTable" 

985 elif linearizerType == "LINEARIZESQUARED": 

986 linearizer.fitParams[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitPars 

987 linearizer.fitParamsErr[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitParsErr 

988 linearizer.linearityFitReducedChiSquared[ampName] = ( 

989 datasetNonLinAmp.meanSignalVsTimePolyFitReducedChiSq) 

990 linearizer.linearityCoeffs[ampName] = [ 

991 datasetNonLinAmp.quadraticPolynomialLinearizerCoefficient] 

992 linearizer.linearityType[ampName] = "Squared" 

993 elif linearizerType == "LINEARIZEPOLYNOMIAL": 

994 linearizer.fitParams[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitPars 

995 linearizer.fitParamsErr[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitParsErr 

996 linearizer.linearityFitReducedChiSquared[ampName] = ( 

997 datasetNonLinAmp.meanSignalVsTimePolyFitReducedChiSq) 

998 # Slice correction coefficients (starting at 2) for polynomial linearizer 

999 # (and squared linearizer above). The first and second are reduntant with 

1000 # the bias and gain, respectively, and are not used by LinearizerPolynomial. 

1001 polyLinCoeffs = np.array(datasetNonLinAmp.polynomialLinearizerCoefficients[2:]) 

1002 linearizer.linearityCoeffs[ampName] = polyLinCoeffs 

1003 linearizer.linearityType[ampName] = "Polynomial" 

1004 linearizer.linearityBBox[ampName] = amp.getBBox() 

1005 linearizer.validate() 

1006 calibId = f"detectorName={detName} detector={detNum} calibDate={calibDate} ccd={detNum} filter=NONE" 

1007 

1008 try: 

1009 raftName = detName.split("_")[0] 

1010 calibId += f" raftName={raftName}" 

1011 except Exception: 

1012 raftname = "NONE" 

1013 calibId += f" raftName={raftname}" 

1014 

1015 serial = detector.getSerial() 

1016 linearizer.updateMetadata(instrumentName=instruName, detectorId=f"{detNum}", 

1017 calibId=calibId, serial=serial, detectorName=f"{detName}") 

1018 

1019 return linearizer 

1020 

1021 @staticmethod 

1022 def _initialParsForPolynomial(order): 

1023 assert(order >= 2) 

1024 pars = np.zeros(order, dtype=np.float) 

1025 pars[0] = 10 

1026 pars[1] = 1 

1027 pars[2:] = 0.0001 

1028 return pars 

1029 

1030 @staticmethod 

1031 def _boundsForPolynomial(initialPars): 

1032 lowers = [np.NINF for p in initialPars] 

1033 uppers = [np.inf for p in initialPars] 

1034 lowers[1] = 0 # no negative gains 

1035 return (lowers, uppers) 

1036 

1037 @staticmethod 

1038 def _boundsForAstier(initialPars): 

1039 lowers = [np.NINF for p in initialPars] 

1040 uppers = [np.inf for p in initialPars] 

1041 return (lowers, uppers) 

1042 

1043 @staticmethod 

1044 def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative): 

1045 """Return a boolean array to mask bad points. 

1046 

1047 A linear function has a constant ratio, so find the median 

1048 value of the ratios, and exclude the points that deviate 

1049 from that by more than a factor of maxDeviationPositive/negative. 

1050 Asymmetric deviations are supported as we expect the PTC to turn 

1051 down as the flux increases, but sometimes it anomalously turns 

1052 upwards just before turning over, which ruins the fits, so it 

1053 is wise to be stricter about restricting positive outliers than 

1054 negative ones. 

1055 

1056 Too high and points that are so bad that fit will fail will be included 

1057 Too low and the non-linear points will be excluded, biasing the NL fit.""" 

1058 ratios = [b/a for (a, b) in zip(means, variances)] 

1059 medianRatio = np.median(ratios) 

1060 ratioDeviations = [(r/medianRatio)-1 for r in ratios] 

1061 

1062 # so that it doesn't matter if the deviation is expressed as positive or negative 

1063 maxDeviationPositive = abs(maxDeviationPositive) 

1064 maxDeviationNegative = -1. * abs(maxDeviationNegative) 

1065 

1066 goodPoints = np.array([True if (r < maxDeviationPositive and r > maxDeviationNegative) 

1067 else False for r in ratioDeviations]) 

1068 return goodPoints 

1069 

1070 def _makeZeroSafe(self, array, warn=True, substituteValue=1e-9): 

1071 """""" 

1072 nBad = Counter(array)[0] 

1073 if nBad == 0: 

1074 return array 

1075 

1076 if warn: 

1077 msg = f"Found {nBad} zeros in array at elements {[x for x in np.where(array==0)[0]]}" 

1078 self.log.warn(msg) 

1079 

1080 array[array == 0] = substituteValue 

1081 return array 

1082 

1083 def fitPtc(self, dataset, ptcFitType): 

1084 """Fit the photon transfer curve to a polynimial or to Astier+19 approximation. 

1085 

1086 Fit the photon transfer curve with either a polynomial of the order 

1087 specified in the task config, or using the Astier approximation. 

1088 

1089 Sigma clipping is performed iteratively for the fit, as well as an 

1090 initial clipping of data points that are more than 

1091 config.initialNonLinearityExclusionThreshold away from lying on a 

1092 straight line. This other step is necessary because the photon transfer 

1093 curve turns over catastrophically at very high flux (because saturation 

1094 drops the variance to ~0) and these far outliers cause the initial fit 

1095 to fail, meaning the sigma cannot be calculated to perform the 

1096 sigma-clipping. 

1097 

1098 Parameters 

1099 ---------- 

1100 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

1101 The dataset containing the means, variances and exposure times 

1102 

1103 ptcFitType : `str` 

1104 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or 

1105 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC 

1106 

1107 Returns 

1108 ------- 

1109 dataset: `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

1110 This is the same dataset as the input paramter, however, it has been modified 

1111 to include information such as the fit vectors and the fit parameters. See 

1112 the class `PhotonTransferCurveDatase`. 

1113 """ 

1114 

1115 def errFunc(p, x, y): 

1116 return ptcFunc(p, x) - y 

1117 

1118 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers 

1119 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers 

1120 

1121 for i, ampName in enumerate(dataset.ampNames): 

1122 timeVecOriginal = np.array(dataset.rawExpTimes[ampName]) 

1123 meanVecOriginal = np.array(dataset.rawMeans[ampName]) 

1124 varVecOriginal = np.array(dataset.rawVars[ampName]) 

1125 varVecOriginal = self._makeZeroSafe(varVecOriginal) 

1126 

1127 mask = ((meanVecOriginal >= self.config.minMeanSignal) & 

1128 (meanVecOriginal <= self.config.maxMeanSignal)) 

1129 

1130 goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal, 

1131 self.config.initialNonLinearityExclusionThresholdPositive, 

1132 self.config.initialNonLinearityExclusionThresholdNegative) 

1133 mask = mask & goodPoints 

1134 

1135 if ptcFitType == 'EXPAPPROXIMATION': 

1136 ptcFunc = funcAstier 

1137 parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noise 

1138 bounds = self._boundsForAstier(parsIniPtc) 

1139 if ptcFitType == 'POLYNOMIAL': 

1140 ptcFunc = funcPolynomial 

1141 parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1) 

1142 bounds = self._boundsForPolynomial(parsIniPtc) 

1143 

1144 # Before bootstrap fit, do an iterative fit to get rid of outliers 

1145 count = 1 

1146 while count <= maxIterationsPtcOutliers: 

1147 # Note that application of the mask actually shrinks the array 

1148 # to size rather than setting elements to zero (as we want) so 

1149 # always update mask itself and re-apply to the original data 

1150 meanTempVec = meanVecOriginal[mask] 

1151 varTempVec = varVecOriginal[mask] 

1152 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec)) 

1153 pars = res.x 

1154 

1155 # change this to the original from the temp because the masks are ANDed 

1156 # meaning once a point is masked it's always masked, and the masks must 

1157 # always be the same length for broadcasting 

1158 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal) 

1159 newMask = np.array([True if np.abs(r) < sigmaCutPtcOutliers else False for r in sigResids]) 

1160 mask = mask & newMask 

1161 

1162 nDroppedTotal = Counter(mask)[False] 

1163 self.log.debug(f"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}") 

1164 count += 1 

1165 # objects should never shrink 

1166 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal)) 

1167 

1168 dataset.visitMask[ampName] = mask # store the final mask 

1169 parsIniPtc = pars 

1170 meanVecFinal = meanVecOriginal[mask] 

1171 varVecFinal = varVecOriginal[mask] 

1172 

1173 if Counter(mask)[False] > 0: 

1174 self.log.info((f"Number of points discarded in PTC of amplifier {ampName}:" + 

1175 f" {Counter(mask)[False]} out of {len(meanVecOriginal)}")) 

1176 

1177 if (len(meanVecFinal) < len(parsIniPtc)): 

1178 msg = (f"\nSERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of" 

1179 f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.") 

1180 self.log.warn(msg) 

1181 # The first and second parameters of initial fit are discarded (bias and gain) 

1182 # for the final NL coefficients 

1183 dataset.badAmps.append(ampName) 

1184 dataset.gain[ampName] = np.nan 

1185 dataset.gainErr[ampName] = np.nan 

1186 dataset.noise[ampName] = np.nan 

1187 dataset.noiseErr[ampName] = np.nan 

1188 dataset.ptcFitPars[ampName] = np.nan 

1189 dataset.ptcFitParsError[ampName] = np.nan 

1190 dataset.ptcFitReducedChiSquared[ampName] = np.nan 

1191 continue 

1192 

1193 # Fit the PTC 

1194 if self.config.doFitBootstrap: 

1195 parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal, 

1196 varVecFinal, ptcFunc) 

1197 else: 

1198 parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal, 

1199 varVecFinal, ptcFunc) 

1200 dataset.ptcFitPars[ampName] = parsFit 

1201 dataset.ptcFitParsError[ampName] = parsFitErr 

1202 dataset.ptcFitReducedChiSquared[ampName] = reducedChiSqPtc 

1203 

1204 if ptcFitType == 'EXPAPPROXIMATION': 

1205 ptcGain = parsFit[1] 

1206 ptcGainErr = parsFitErr[1] 

1207 ptcNoise = np.sqrt(np.fabs(parsFit[2])) 

1208 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2])) 

1209 if ptcFitType == 'POLYNOMIAL': 

1210 ptcGain = 1./parsFit[1] 

1211 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1]) 

1212 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain 

1213 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain 

1214 dataset.gain[ampName] = ptcGain 

1215 dataset.gainErr[ampName] = ptcGainErr 

1216 dataset.noise[ampName] = ptcNoise 

1217 dataset.noiseErr[ampName] = ptcNoiseErr 

1218 if not len(dataset.ptcFitType) == 0: 

1219 dataset.ptcFitType = ptcFitType 

1220 

1221 return dataset