Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22 

23__all__ = ['MeasurePhotonTransferCurveTask', 

24 'MeasurePhotonTransferCurveTaskConfig', 

25 'PhotonTransferCurveDataset'] 

26 

27import numpy as np 

28import matplotlib.pyplot as plt 

29from sqlite3 import OperationalError 

30from collections import Counter 

31from dataclasses import dataclass 

32 

33import lsst.afw.math as afwMath 

34import lsst.pex.config as pexConfig 

35import lsst.pipe.base as pipeBase 

36from .utils import (NonexistentDatasetTaskDataIdContainer, PairedVisitListTaskRunner, 

37 checkExpLengthEqual, fitLeastSq, fitBootstrap, funcPolynomial, funcAstier) 

38from scipy.optimize import least_squares 

39 

40from lsst.ip.isr.linearize import Linearizer 

41import datetime 

42 

43from .astierCovPtcUtils import (fftSize, CovFft, computeCovDirect, fitData) 

44 

45 

46class MeasurePhotonTransferCurveTaskConfig(pexConfig.Config): 

47 """Config class for photon transfer curve measurement task""" 

48 ccdKey = pexConfig.Field( 

49 dtype=str, 

50 doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'.", 

51 default='ccd', 

52 ) 

53 ptcFitType = pexConfig.ChoiceField( 

54 dtype=str, 

55 doc="Fit PTC to approximation in Astier+19 (Equation 16) or to a polynomial.", 

56 default="POLYNOMIAL", 

57 allowed={ 

58 "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').", 

59 "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).", 

60 "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)" 

61 } 

62 ) 

63 maximumRangeCovariancesAstier = pexConfig.Field( 

64 dtype=int, 

65 doc="Maximum range of covariances as in Astier+19", 

66 default=8, 

67 ) 

68 covAstierRealSpace = pexConfig.Field( 

69 dtype=bool, 

70 doc="Calculate covariances in real space or via FFT? (see appendix A of Astier+19).", 

71 default=False, 

72 ) 

73 polynomialFitDegree = pexConfig.Field( 

74 dtype=int, 

75 doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.", 

76 default=3, 

77 ) 

78 doCreateLinearizer = pexConfig.Field( 

79 dtype=bool, 

80 doc="Calculate non-linearity and persist linearizer?", 

81 default=False, 

82 ) 

83 linearizerType = pexConfig.ChoiceField( 

84 dtype=str, 

85 doc="Linearizer type, if doCreateLinearizer=True", 

86 default="LINEARIZEPOLYNOMIAL", 

87 allowed={ 

88 "LINEARIZEPOLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegreeNonLinearity' to set 'n').", 

89 "LINEARIZESQUARED": "c0 quadratic coefficient derived from coefficients of polynomiual fit", 

90 "LOOKUPTABLE": "Loouk table formed from linear part of polynomial fit." 

91 } 

92 ) 

93 polynomialFitDegreeNonLinearity = pexConfig.Field( 

94 dtype=int, 

95 doc="If doCreateLinearizer, degree of polynomial to fit the meanSignal vs exposureTime" + 

96 " curve to produce the table for LinearizeLookupTable.", 

97 default=3, 

98 ) 

99 binSize = pexConfig.Field( 

100 dtype=int, 

101 doc="Bin the image by this factor in both dimensions.", 

102 default=1, 

103 ) 

104 minMeanSignal = pexConfig.Field( 

105 dtype=float, 

106 doc="Minimum value (inclusive) of mean signal (in DN) above which to consider.", 

107 default=0, 

108 ) 

109 maxMeanSignal = pexConfig.Field( 

110 dtype=float, 

111 doc="Maximum value (inclusive) of mean signal (in DN) below which to consider.", 

112 default=9e6, 

113 ) 

114 initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField( 

115 dtype=float, 

116 doc="Initially exclude data points with a variance that are more than a factor of this from being" 

117 " linear in the positive direction, from the PTC fit. Note that these points will also be" 

118 " excluded from the non-linearity fit. This is done before the iterative outlier rejection," 

119 " to allow an accurate determination of the sigmas for said iterative fit.", 

120 default=0.12, 

121 min=0.0, 

122 max=1.0, 

123 ) 

124 initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField( 

125 dtype=float, 

126 doc="Initially exclude data points with a variance that are more than a factor of this from being" 

127 " linear in the negative direction, from the PTC fit. Note that these points will also be" 

128 " excluded from the non-linearity fit. This is done before the iterative outlier rejection," 

129 " to allow an accurate determination of the sigmas for said iterative fit.", 

130 default=0.25, 

131 min=0.0, 

132 max=1.0, 

133 ) 

134 sigmaCutPtcOutliers = pexConfig.Field( 

135 dtype=float, 

136 doc="Sigma cut for outlier rejection in PTC.", 

137 default=5.0, 

138 ) 

139 maskNameList = pexConfig.ListField( 

140 dtype=str, 

141 doc="Mask list to exclude from statistics calculations.", 

142 default=['DETECTED', 'BAD', 'NO_DATA'], 

143 ) 

144 nSigmaClipPtc = pexConfig.Field( 

145 dtype=float, 

146 doc="Sigma cut for afwMath.StatisticsControl()", 

147 default=5.5, 

148 ) 

149 nIterSigmaClipPtc = pexConfig.Field( 

150 dtype=int, 

151 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()", 

152 default=1, 

153 ) 

154 maxIterationsPtcOutliers = pexConfig.Field( 

155 dtype=int, 

156 doc="Maximum number of iterations for outlier rejection in PTC.", 

157 default=2, 

158 ) 

159 doFitBootstrap = pexConfig.Field( 

160 dtype=bool, 

161 doc="Use bootstrap for the PTC fit parameters and errors?.", 

162 default=False, 

163 ) 

164 maxAduForLookupTableLinearizer = pexConfig.Field( 

165 dtype=int, 

166 doc="Maximum DN value for the LookupTable linearizer.", 

167 default=2**18, 

168 ) 

169 instrumentName = pexConfig.Field( 

170 dtype=str, 

171 doc="Instrument name.", 

172 default='', 

173 ) 

174 

175 

176@dataclass 

177class LinearityResidualsAndLinearizersDataset: 

178 """A simple class to hold the output from the 

179 `calculateLinearityResidualAndLinearizers` function. 

180 """ 

181 # Normalized coefficients for polynomial NL correction 

182 polynomialLinearizerCoefficients: list 

183 # Normalized coefficient for quadratic polynomial NL correction (c0) 

184 quadraticPolynomialLinearizerCoefficient: float 

185 # LUT array row for the amplifier at hand 

186 linearizerTableRow: list 

187 meanSignalVsTimePolyFitPars: list 

188 meanSignalVsTimePolyFitParsErr: list 

189 meanSignalVsTimePolyFitReducedChiSq: float 

190 

191 

192class PhotonTransferCurveDataset: 

193 """A simple class to hold the output data from the PTC task. 

194 

195 The dataset is made up of a dictionary for each item, keyed by the 

196 amplifiers' names, which much be supplied at construction time. 

197 

198 New items cannot be added to the class to save accidentally saving to the 

199 wrong property, and the class can be frozen if desired. 

200 

201 inputVisitPairs records the visits used to produce the data. 

202 When fitPtc() or fitCovariancesAstier() is run, a mask is built up, which is by definition 

203 always the same length as inputVisitPairs, rawExpTimes, rawMeans 

204 and rawVars, and is a list of bools, which are incrementally set to False 

205 as points are discarded from the fits. 

206 

207 PTC fit parameters for polynomials are stored in a list in ascending order 

208 of polynomial term, i.e. par[0]*x^0 + par[1]*x + par[2]*x^2 etc 

209 with the length of the list corresponding to the order of the polynomial 

210 plus one. 

211 

212 Parameters 

213 ---------- 

214 ampNames : `list` 

215 List with the names of the amplifiers of the detector at hand. 

216 

217 ptcFitType : `str` 

218 Type of model fitted to the PTC: "POLYNOMIAL", "EXPAPPROXIMATION", or "FULLCOVARIANCE". 

219 

220 Returns 

221 ------- 

222 `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

223 Output dataset from MeasurePhotonTransferCurveTask. 

224 """ 

225 

226 def __init__(self, ampNames, ptcFitType): 

227 # add items to __dict__ directly because __setattr__ is overridden 

228 

229 # instance variables 

230 self.__dict__["ptcFitType"] = ptcFitType 

231 self.__dict__["ampNames"] = ampNames 

232 self.__dict__["badAmps"] = [] 

233 

234 # raw data variables 

235 # visitMask is the mask produced after outlier rejection. The mask produced by "FULLCOVARIANCE" 

236 # may differ from the one produced in the other two PTC fit types. 

237 self.__dict__["inputVisitPairs"] = {ampName: [] for ampName in ampNames} 

238 self.__dict__["visitMask"] = {ampName: [] for ampName in ampNames} 

239 self.__dict__["rawExpTimes"] = {ampName: [] for ampName in ampNames} 

240 self.__dict__["rawMeans"] = {ampName: [] for ampName in ampNames} 

241 self.__dict__["rawVars"] = {ampName: [] for ampName in ampNames} 

242 

243 # Gain and noise 

244 self.__dict__["gain"] = {ampName: -1. for ampName in ampNames} 

245 self.__dict__["gainErr"] = {ampName: -1. for ampName in ampNames} 

246 self.__dict__["noise"] = {ampName: -1. for ampName in ampNames} 

247 self.__dict__["noiseErr"] = {ampName: -1. for ampName in ampNames} 

248 

249 # if ptcFitTye in ["POLYNOMIAL", "EXPAPPROXIMATION"] 

250 # fit information 

251 self.__dict__["ptcFitPars"] = {ampName: [] for ampName in ampNames} 

252 self.__dict__["ptcFitParsError"] = {ampName: [] for ampName in ampNames} 

253 self.__dict__["ptcFitReducedChiSquared"] = {ampName: [] for ampName in ampNames} 

254 

255 # if ptcFitTye in ["FULLCOVARIANCE"] 

256 # "covariancesTuple" is a numpy recarray with entries of the form 

257 # ['mu', 'i', 'j', 'var', 'cov', 'npix', 'ext', 'expTime', 'ampName'] 

258 # "covariancesFits" has CovFit objects that fit the measured covariances to Eq. 20 of Astier+19. 

259 # In "covariancesFitsWithNoB", "b"=0 in the model described by Eq. 20 of Astier+19. 

260 self.__dict__["covariancesTuple"] = {ampName: [] for ampName in ampNames} 

261 self.__dict__["covariancesFitsWithNoB"] = {ampName: [] for ampName in ampNames} 

262 self.__dict__["covariancesFits"] = {ampName: [] for ampName in ampNames} 

263 self.__dict__["aMatrix"] = {ampName: [] for ampName in ampNames} 

264 self.__dict__["bMatrix"] = {ampName: [] for ampName in ampNames} 

265 

266 # "final" means that the "raw" vectors above had "visitMask" applied. 

267 self.__dict__["finalVars"] = {ampName: [] for ampName in ampNames} 

268 self.__dict__["finalModelVars"] = {ampName: [] for ampName in ampNames} 

269 self.__dict__["finalMeans"] = {ampName: [] for ampName in ampNames} 

270 

271 def __setattr__(self, attribute, value): 

272 """Protect class attributes""" 

273 if attribute not in self.__dict__: 

274 raise AttributeError(f"{attribute} is not already a member of PhotonTransferCurveDataset, which" 

275 " does not support setting of new attributes.") 

276 else: 

277 self.__dict__[attribute] = value 

278 

279 def getVisitsUsed(self, ampName): 

280 """Get the visits used, i.e. not discarded, for a given amp. 

281 

282 If no mask has been created yet, all visits are returned. 

283 """ 

284 if len(self.visitMask[ampName]) == 0: 

285 return self.inputVisitPairs[ampName] 

286 

287 # if the mask exists it had better be the same length as the visitPairs 

288 assert len(self.visitMask[ampName]) == len(self.inputVisitPairs[ampName]) 

289 

290 pairs = self.inputVisitPairs[ampName] 

291 mask = self.visitMask[ampName] 

292 # cast to bool required because numpy 

293 return [(v1, v2) for ((v1, v2), m) in zip(pairs, mask) if bool(m) is True] 

294 

295 def getGoodAmps(self): 

296 return [amp for amp in self.ampNames if amp not in self.badAmps] 

297 

298 

299class MeasurePhotonTransferCurveTask(pipeBase.CmdLineTask): 

300 """A class to calculate, fit, and plot a PTC from a set of flat pairs. 

301 

302 The Photon Transfer Curve (var(signal) vs mean(signal)) is a standard tool 

303 used in astronomical detectors characterization (e.g., Janesick 2001, 

304 Janesick 2007). If ptcFitType is "EXPAPPROXIMATION" or "POLYNOMIAL", this task calculates the 

305 PTC from a series of pairs of flat-field images; each pair taken at identical exposure 

306 times. The difference image of each pair is formed to eliminate fixed pattern noise, 

307 and then the variance of the difference image and the mean of the average image 

308 are used to produce the PTC. An n-degree polynomial or the approximation in Equation 

309 16 of Astier+19 ("The Shape of the Photon Transfer Curve of CCD sensors", 

310 arXiv:1905.08677) can be fitted to the PTC curve. These models include 

311 parameters such as the gain (e/DN) and readout noise. 

312 

313 Linearizers to correct for signal-chain non-linearity are also calculated. 

314 The `Linearizer` class, in general, can support per-amp linearizers, but in this 

315 task this is not supported. 

316 

317 If ptcFitType is "FULLCOVARIANCE", the covariances of the difference images are calculated via the 

318 DFT methods described in Astier+19 and the variances for the PTC are given by the cov[0,0] elements 

319 at each signal level. The full model in Equation 20 of Astier+19 is fit to the PTC to get the gain 

320 and the noise. 

321 

322 Parameters 

323 ---------- 

324 

325 *args: `list` 

326 Positional arguments passed to the Task constructor. None used at this 

327 time. 

328 **kwargs: `dict` 

329 Keyword arguments passed on to the Task constructor. None used at this 

330 time. 

331 

332 """ 

333 

334 RunnerClass = PairedVisitListTaskRunner 

335 ConfigClass = MeasurePhotonTransferCurveTaskConfig 

336 _DefaultName = "measurePhotonTransferCurve" 

337 

338 def __init__(self, *args, **kwargs): 

339 pipeBase.CmdLineTask.__init__(self, *args, **kwargs) 

340 plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too 

341 self.config.validate() 

342 self.config.freeze() 

343 

344 @classmethod 

345 def _makeArgumentParser(cls): 

346 """Augment argument parser for the MeasurePhotonTransferCurveTask.""" 

347 parser = pipeBase.ArgumentParser(name=cls._DefaultName) 

348 parser.add_argument("--visit-pairs", dest="visitPairs", nargs="*", 

349 help="Visit pairs to use. Each pair must be of the form INT,INT e.g. 123,456") 

350 parser.add_id_argument("--id", datasetType="photonTransferCurveDataset", 

351 ContainerClass=NonexistentDatasetTaskDataIdContainer, 

352 help="The ccds to use, e.g. --id ccd=0..100") 

353 return parser 

354 

355 @pipeBase.timeMethod 

356 def runDataRef(self, dataRef, visitPairs): 

357 """Run the Photon Transfer Curve (PTC) measurement task. 

358 

359 For a dataRef (which is each detector here), 

360 and given a list of visit pairs (postISR) at different exposure times, 

361 measure the PTC. 

362 

363 Parameters 

364 ---------- 

365 dataRef : list of lsst.daf.persistence.ButlerDataRef 

366 dataRef for the detector for the visits to be fit. 

367 

368 visitPairs : `iterable` of `tuple` of `int` 

369 Pairs of visit numbers to be processed together 

370 """ 

371 

372 # setup necessary objects 

373 detNum = dataRef.dataId[self.config.ccdKey] 

374 detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]] 

375 # expand some missing fields that we need for lsstCam. This is a work-around 

376 # for Gen2 problems that I (RHL) don't feel like solving. The calibs pipelines 

377 # (which inherit from CalibTask) use addMissingKeys() to do basically the same thing 

378 # 

379 # Basically, the butler's trying to look up the fields in `raw_visit` which won't work 

380 for name in dataRef.getButler().getKeys('bias'): 

381 if name not in dataRef.dataId: 

382 try: 

383 dataRef.dataId[name] = \ 

384 dataRef.getButler().queryMetadata('raw', [name], detector=detNum)[0] 

385 except OperationalError: 

386 pass 

387 

388 amps = detector.getAmplifiers() 

389 ampNames = [amp.getName() for amp in amps] 

390 datasetPtc = PhotonTransferCurveDataset(ampNames, self.config.ptcFitType) 

391 self.log.info('Measuring PTC using %s visits for detector %s' % (visitPairs, detector.getId())) 

392 

393 tupleRecords = [] 

394 allTags = [] 

395 for (v1, v2) in visitPairs: 

396 # Get postISR exposures. 

397 dataRef.dataId['expId'] = v1 

398 exp1 = dataRef.get("postISRCCD", immediate=True) 

399 dataRef.dataId['expId'] = v2 

400 exp2 = dataRef.get("postISRCCD", immediate=True) 

401 del dataRef.dataId['expId'] 

402 

403 checkExpLengthEqual(exp1, exp2, v1, v2, raiseWithMessage=True) 

404 expTime = exp1.getInfo().getVisitInfo().getExposureTime() 

405 

406 tupleRows = [] 

407 for ampNumber, amp in enumerate(detector): 

408 ampName = amp.getName() 

409 # covAstier: (i, j, var (cov[0,0]), cov, npix) 

410 doRealSpace = self.config.covAstierRealSpace 

411 muDiff, varDiff, covAstier = self.measureMeanVarCov(exp1, exp2, region=amp.getBBox(), 

412 covAstierRealSpace=doRealSpace) 

413 

414 datasetPtc.rawExpTimes[ampName].append(expTime) 

415 datasetPtc.rawMeans[ampName].append(muDiff) 

416 datasetPtc.rawVars[ampName].append(varDiff) 

417 datasetPtc.inputVisitPairs[ampName].append((v1, v2)) 

418 

419 tupleRows += [(muDiff, ) + covRow + (ampNumber, expTime, ampName) for covRow in covAstier] 

420 tags = ['mu', 'i', 'j', 'var', 'cov', 'npix', 'ext', 'expTime', 'ampName'] 

421 allTags += tags 

422 tupleRecords += tupleRows 

423 covariancesWithTags = np.core.records.fromrecords(tupleRecords, names=allTags) 

424 

425 if self.config.ptcFitType in ["FULLCOVARIANCE", ]: 

426 # Calculate covariances and fit them, including the PTC, to Astier+19 full model (Eq. 20) 

427 datasetPtc = self.fitCovariancesAstier(datasetPtc, covariancesWithTags) 

428 elif self.config.ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]: 

429 # Fit the PTC to a polynomial or to Astier+19 exponential approximation (Eq. 16) 

430 # Fill up PhotonTransferCurveDataset object. 

431 datasetPtc = self.fitPtc(datasetPtc, self.config.ptcFitType) 

432 

433 # Fit a poynomial to calculate non-linearity and persist linearizer. 

434 if self.config.doCreateLinearizer: 

435 numberAmps = len(amps) 

436 numberAduValues = self.config.maxAduForLookupTableLinearizer 

437 lookupTableArray = np.zeros((numberAmps, numberAduValues), dtype=np.float32) 

438 

439 # Fit (non)linearity of signal vs time curve. 

440 # Fill up PhotonTransferCurveDataset object. 

441 # Fill up array for LUT linearizer (tableArray). 

442 # Produce coefficients for Polynomial ans Squared linearizers. 

443 # Build linearizer objects. 

444 linearizer = self.fitNonLinearityAndBuildLinearizers(datasetPtc, detector, 

445 tableArray=lookupTableArray, 

446 log=self.log) 

447 

448 if self.config.linearizerType == "LINEARIZEPOLYNOMIAL": 

449 linDataType = 'linearizePolynomial' 

450 linMsg = "polynomial (coefficients for a polynomial correction)." 

451 elif self.config.linearizerType == "LINEARIZESQUARED": 

452 linDataType = 'linearizePolynomial' 

453 linMsg = "squared (c0, derived from k_i coefficients of a polynomial fit)." 

454 elif self.config.linearizerType == "LOOKUPTABLE": 

455 linDataType = 'linearizePolynomial' 

456 linMsg = "lookup table (linear component of polynomial fit)." 

457 else: 

458 raise RuntimeError("Invalid config.linearizerType {selg.config.linearizerType}. " 

459 "Supported: 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'") 

460 

461 butler = dataRef.getButler() 

462 self.log.info(f"Writing linearizer: \n {linMsg}") 

463 

464 detName = detector.getName() 

465 now = datetime.datetime.utcnow() 

466 calibDate = now.strftime("%Y-%m-%d") 

467 

468 butler.put(linearizer, datasetType=linDataType, dataId={'detector': detNum, 

469 'detectorName': detName, 'calibDate': calibDate}) 

470 

471 self.log.info(f"Writing PTC data to {dataRef.getUri(write=True)}") 

472 dataRef.put(datasetPtc, datasetType="photonTransferCurveDataset") 

473 

474 return pipeBase.Struct(exitStatus=0) 

475 

476 def fitCovariancesAstier(self, dataset, covariancesWithTagsArray): 

477 """Fit measured flat covariances to full model in Astier+19. 

478 

479 Parameters 

480 ---------- 

481 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

482 The dataset containing information such as the means, variances and exposure times. 

483 

484 covariancesWithTagsArray : `numpy.recarray` 

485 Tuple with at least (mu, cov, var, i, j, npix), where: 

486 mu : 0.5*(m1 + m2), where: 

487 mu1: mean value of flat1 

488 mu2: mean value of flat2 

489 cov: covariance value at lag(i, j) 

490 var: variance(covariance value at lag(0, 0)) 

491 i: lag dimension 

492 j: lag dimension 

493 npix: number of pixels used for covariance calculation. 

494 

495 Returns 

496 ------- 

497 dataset: `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

498 This is the same dataset as the input paramter, however, it has been modified 

499 to include information such as the fit vectors and the fit parameters. See 

500 the class `PhotonTransferCurveDatase`. 

501 """ 

502 

503 covFits, covFitsNoB = fitData(covariancesWithTagsArray, maxMu=self.config.maxMeanSignal, 

504 r=self.config.maximumRangeCovariancesAstier) 

505 

506 dataset.covariancesTuple = covariancesWithTagsArray 

507 dataset.covariancesFits = covFits 

508 dataset.covariancesFitsWithNoB = covFitsNoB 

509 dataset = self.getOutputPtcDataCovAstier(dataset, covFits) 

510 

511 return dataset 

512 

513 def getOutputPtcDataCovAstier(self, dataset, covFits): 

514 """Get output data for PhotonTransferCurveCovAstierDataset from CovFit objects. 

515 

516 Parameters 

517 ---------- 

518 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

519 The dataset containing information such as the means, variances and exposure times. 

520 

521 covFits: `dict` 

522 Dictionary of CovFit objects, with amp names as keys. 

523 

524 Returns 

525 ------- 

526 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

527 This is the same dataset as the input paramter, however, it has been modified 

528 to include extra information such as the mask 1D array, gains, reoudout noise, measured signal, 

529 measured variance, modeled variance, a, and b coefficient matrices (see Astier+19) per amplifier. 

530 See the class `PhotonTransferCurveDatase`. 

531 """ 

532 

533 for i, amp in enumerate(covFits): 

534 fit = covFits[amp] 

535 meanVecFinal, varVecFinal, varVecModel, wc = fit.getNormalizedFitData(0, 0, divideByMu=False) 

536 gain = fit.getGain() 

537 dataset.visitMask[amp] = fit.getMaskVar() 

538 dataset.gain[amp] = gain 

539 dataset.gainErr[amp] = fit.getGainErr() 

540 dataset.noise[amp] = np.sqrt(np.fabs(fit.getRon())) 

541 dataset.noiseErr[amp] = fit.getRonErr() 

542 dataset.finalVars[amp].append(varVecFinal/(gain**2)) 

543 dataset.finalModelVars[amp].append(varVecModel/(gain**2)) 

544 dataset.finalMeans[amp].append(meanVecFinal/gain) 

545 dataset.aMatrix[amp].append(fit.getA()) 

546 dataset.bMatrix[amp].append(fit.getB()) 

547 

548 return dataset 

549 

550 def measureMeanVarCov(self, exposure1, exposure2, region=None, covAstierRealSpace=False): 

551 """Calculate the mean of each of two exposures and the variance and covariance of their difference. 

552 

553 The variance is calculated via afwMath, and the covariance via the methods in Astier+19 (appendix A). 

554 In theory, var = covariance[0,0]. This should be validated, and in the future, we may decide to just 

555 keep one (covariance). 

556 

557 Parameters 

558 ---------- 

559 exposure1 : `lsst.afw.image.exposure.exposure.ExposureF` 

560 First exposure of flat field pair. 

561 

562 exposure2 : `lsst.afw.image.exposure.exposure.ExposureF` 

563 Second exposure of flat field pair. 

564 

565 region : `lsst.geom.Box2I`, optional 

566 Region of each exposure where to perform the calculations (e.g, an amplifier). 

567 

568 covAstierRealSpace : `bool`, optional 

569 Should the covariannces in Astier+19 be calculated in real space or via FFT? 

570 See Appendix A of Astier+19. 

571 

572 Returns 

573 ------- 

574 mu : `float` 

575 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means of the regions in 

576 both exposures. 

577 

578 varDiff : `float` 

579 Half of the clipped variance of the difference of the regions inthe two input 

580 exposures. 

581 

582 covDiffAstier : `list` 

583 List with tuples of the form (dx, dy, var, cov, npix), where: 

584 dx : `int` 

585 Lag in x 

586 dy : `int` 

587 Lag in y 

588 var : `float` 

589 Variance at (dx, dy). 

590 cov : `float` 

591 Covariance at (dx, dy). 

592 nPix : `int` 

593 Number of pixel pairs used to evaluate var and cov. 

594 """ 

595 

596 if region is not None: 

597 im1Area = exposure1.maskedImage[region] 

598 im2Area = exposure2.maskedImage[region] 

599 else: 

600 im1Area = exposure1.maskedImage 

601 im2Area = exposure2.maskedImage 

602 

603 im1Area = afwMath.binImage(im1Area, self.config.binSize) 

604 im2Area = afwMath.binImage(im2Area, self.config.binSize) 

605 

606 im1MaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList) 

607 im1StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc, 

608 self.config.nIterSigmaClipPtc, 

609 im1MaskVal) 

610 im1StatsCtrl.setAndMask(im1MaskVal) 

611 

612 im2MaskVal = exposure2.getMask().getPlaneBitMask(self.config.maskNameList) 

613 im2StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc, 

614 self.config.nIterSigmaClipPtc, 

615 im2MaskVal) 

616 im2StatsCtrl.setAndMask(im2MaskVal) 

617 

618 # Clipped mean of images; then average of mean. 

619 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, im1StatsCtrl).getValue() 

620 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, im2StatsCtrl).getValue() 

621 mu = 0.5*(mu1 + mu2) 

622 

623 # Take difference of pairs 

624 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2)) 

625 temp = im2Area.clone() 

626 temp *= mu1 

627 diffIm = im1Area.clone() 

628 diffIm *= mu2 

629 diffIm -= temp 

630 diffIm /= mu 

631 

632 diffImMaskVal = diffIm.getMask().getPlaneBitMask(self.config.maskNameList) 

633 diffImStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc, 

634 self.config.nIterSigmaClipPtc, 

635 diffImMaskVal) 

636 diffImStatsCtrl.setAndMask(diffImMaskVal) 

637 

638 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, diffImStatsCtrl).getValue()) 

639 

640 # Get the mask and identify good pixels as '1', and the rest as '0'. 

641 w1 = np.where(im1Area.getMask().getArray() == 0, 1, 0) 

642 w2 = np.where(im2Area.getMask().getArray() == 0, 1, 0) 

643 

644 w12 = w1*w2 

645 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0) 

646 w = w12*wDiff 

647 

648 maxRangeCov = self.config.maximumRangeCovariancesAstier 

649 if covAstierRealSpace: 

650 covDiffAstier = computeCovDirect(diffIm.getImage().getArray(), w, maxRangeCov) 

651 else: 

652 shapeDiff = diffIm.getImage().getArray().shape 

653 fftShape = (fftSize(shapeDiff[0] + maxRangeCov), fftSize(shapeDiff[1]+maxRangeCov)) 

654 c = CovFft(diffIm.getImage().getArray(), w, fftShape, maxRangeCov) 

655 covDiffAstier = c.reportCovFft(maxRangeCov) 

656 

657 return mu, varDiff, covDiffAstier 

658 

659 def computeCovDirect(self, diffImage, weightImage, maxRange): 

660 """Compute covariances of diffImage in real space. 

661 

662 For lags larger than ~25, it is slower than the FFT way. 

663 Taken from https://github.com/PierreAstier/bfptc/ 

664 

665 Parameters 

666 ---------- 

667 diffImage : `numpy.array` 

668 Image to compute the covariance of. 

669 

670 weightImage : `numpy.array` 

671 Weight image of diffImage (1's and 0's for good and bad pixels, respectively). 

672 

673 maxRange : `int` 

674 Last index of the covariance to be computed. 

675 

676 Returns 

677 ------- 

678 outList : `list` 

679 List with tuples of the form (dx, dy, var, cov, npix), where: 

680 dx : `int` 

681 Lag in x 

682 dy : `int` 

683 Lag in y 

684 var : `float` 

685 Variance at (dx, dy). 

686 cov : `float` 

687 Covariance at (dx, dy). 

688 nPix : `int` 

689 Number of pixel pairs used to evaluate var and cov. 

690 """ 

691 outList = [] 

692 var = 0 

693 # (dy,dx) = (0,0) has to be first 

694 for dy in range(maxRange + 1): 

695 for dx in range(0, maxRange + 1): 

696 if (dx*dy > 0): 

697 cov1, nPix1 = self.covDirectValue(diffImage, weightImage, dx, dy) 

698 cov2, nPix2 = self.covDirectValue(diffImage, weightImage, dx, -dy) 

699 cov = 0.5*(cov1 + cov2) 

700 nPix = nPix1 + nPix2 

701 else: 

702 cov, nPix = self.covDirectValue(diffImage, weightImage, dx, dy) 

703 if (dx == 0 and dy == 0): 

704 var = cov 

705 outList.append((dx, dy, var, cov, nPix)) 

706 

707 return outList 

708 

709 def covDirectValue(self, diffImage, weightImage, dx, dy): 

710 """Compute covariances of diffImage in real space at lag (dx, dy). 

711 

712 Taken from https://github.com/PierreAstier/bfptc/ (c.f., appendix of Astier+19). 

713 

714 Parameters 

715 ---------- 

716 diffImage : `numpy.array` 

717 Image to compute the covariance of. 

718 

719 weightImage : `numpy.array` 

720 Weight image of diffImage (1's and 0's for good and bad pixels, respectively). 

721 

722 dx : `int` 

723 Lag in x. 

724 

725 dy : `int` 

726 Lag in y. 

727 

728 Returns 

729 ------- 

730 cov : `float` 

731 Covariance at (dx, dy) 

732 

733 nPix : `int` 

734 Number of pixel pairs used to evaluate var and cov. 

735 """ 

736 (nCols, nRows) = diffImage.shape 

737 # switching both signs does not change anything: 

738 # it just swaps im1 and im2 below 

739 if (dx < 0): 

740 (dx, dy) = (-dx, -dy) 

741 # now, we have dx >0. We have to distinguish two cases 

742 # depending on the sign of dy 

743 if dy >= 0: 

744 im1 = diffImage[dy:, dx:] 

745 w1 = weightImage[dy:, dx:] 

746 im2 = diffImage[:nCols - dy, :nRows - dx] 

747 w2 = weightImage[:nCols - dy, :nRows - dx] 

748 else: 

749 im1 = diffImage[:nCols + dy, dx:] 

750 w1 = weightImage[:nCols + dy, dx:] 

751 im2 = diffImage[-dy:, :nRows - dx] 

752 w2 = weightImage[-dy:, :nRows - dx] 

753 # use the same mask for all 3 calculations 

754 wAll = w1*w2 

755 # do not use mean() because weightImage=0 pixels would then count 

756 nPix = wAll.sum() 

757 im1TimesW = im1*wAll 

758 s1 = im1TimesW.sum()/nPix 

759 s2 = (im2*wAll).sum()/nPix 

760 p = (im1TimesW*im2).sum()/nPix 

761 cov = p - s1*s2 

762 

763 return cov, nPix 

764 

765 def fitNonLinearityAndBuildLinearizers(self, datasetPtc, detector, tableArray=None, log=None): 

766 """Fit non-linearity function and build linearizer objects. 

767 

768 Parameters 

769 ---------- 

770 datasePtct : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

771 The dataset containing information such as the means, variances and exposure times. 

772 nLinearity 

773 

774 detector : `lsst.afw.cameraGeom.Detector` 

775 Detector object. 

776 

777 tableArray : `np.array`, optional 

778 Optional. Look-up table array with size rows=nAmps and columns=DN values. 

779 It will be modified in-place if supplied. 

780 

781 log : `lsst.log.Log`, optional 

782 Logger to handle messages. 

783 

784 Returns 

785 ------- 

786 linearizer : `lsst.ip.isr.Linearizer` 

787 Linearizer object 

788 """ 

789 

790 # Fit NonLinearity 

791 datasetNonLinearity = self.fitNonLinearity(datasetPtc, tableArray=tableArray) 

792 

793 # Produce linearizer 

794 now = datetime.datetime.utcnow() 

795 calibDate = now.strftime("%Y-%m-%d") 

796 linType = self.config.linearizerType 

797 

798 if linType == "LOOKUPTABLE": 

799 tableArray = tableArray 

800 else: 

801 tableArray = None 

802 

803 linearizer = self.buildLinearizerObject(datasetNonLinearity, detector, calibDate, linType, 

804 instruName=self.config.instrumentName, 

805 tableArray=tableArray, 

806 log=log) 

807 

808 return linearizer 

809 

810 def fitNonLinearity(self, datasetPtc, tableArray=None): 

811 """Fit a polynomial to signal vs effective time curve to calculate linearity and residuals. 

812 

813 Parameters 

814 ---------- 

815 datasetPtc : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

816 The dataset containing the means, variances and exposure times. 

817 

818 tableArray : `np.array` 

819 Optional. Look-up table array with size rows=nAmps and columns=DN values. 

820 It will be modified in-place if supplied. 

821 

822 Returns 

823 ------- 

824 datasetNonLinearity : `dict` 

825 Dictionary of `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset` 

826 dataclasses. Each one holds the output of `calculateLinearityResidualAndLinearizers` per 

827 amplifier. 

828 """ 

829 datasetNonLinearity = {ampName: [] for ampName in datasetPtc.ampNames} 

830 for i, ampName in enumerate(datasetPtc.ampNames): 

831 # If a mask is not found, use all points. 

832 if (len(datasetPtc.visitMask[ampName]) == 0): 

833 self.log.warn(f"Mask not found for {ampName} in non-linearity fit. Using all points.") 

834 mask = np.repeat(True, len(datasetPtc.rawExpTimes[ampName])) 

835 else: 

836 mask = datasetPtc.visitMask[ampName] 

837 

838 timeVecFinal = np.array(datasetPtc.rawExpTimes[ampName])[mask] 

839 meanVecFinal = np.array(datasetPtc.rawMeans[ampName])[mask] 

840 

841 # Non-linearity residuals (NL of mean vs time curve): percentage, and fit to a quadratic function 

842 # In this case, len(parsIniNonLinearity) = 3 indicates that we want a quadratic fit 

843 datasetLinRes = self.calculateLinearityResidualAndLinearizers(timeVecFinal, meanVecFinal) 

844 

845 # LinearizerLookupTable 

846 if tableArray is not None: 

847 tableArray[i, :] = datasetLinRes.linearizerTableRow 

848 

849 datasetNonLinearity[ampName] = datasetLinRes 

850 

851 return datasetNonLinearity 

852 

853 def calculateLinearityResidualAndLinearizers(self, exposureTimeVector, meanSignalVector): 

854 """Calculate linearity residual and fit an n-order polynomial to the mean vs time curve 

855 to produce corrections (deviation from linear part of polynomial) for a particular amplifier 

856 to populate LinearizeLookupTable. 

857 Use the coefficients of this fit to calculate the correction coefficients for LinearizePolynomial 

858 and LinearizeSquared." 

859 

860 Parameters 

861 --------- 

862 

863 exposureTimeVector: `list` of `float` 

864 List of exposure times for each flat pair 

865 

866 meanSignalVector: `list` of `float` 

867 List of mean signal from diference image of flat pairs 

868 

869 Returns 

870 ------- 

871 dataset : `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset` 

872 The dataset containing the fit parameters, the NL correction coefficients, and the 

873 LUT row for the amplifier at hand. 

874 

875 Notes 

876 ----- 

877 datase members: 

878 

879 dataset.polynomialLinearizerCoefficients : `list` of `float` 

880 Coefficients for LinearizePolynomial, where corrImage = uncorrImage + sum_i c_i uncorrImage^(2 + 

881 i). 

882 c_(j-2) = -k_j/(k_1^j) with units DN^(1-j) (c.f., Eq. 37 of 2003.05978). The units of k_j are 

883 DN/t^j, and they are fit from meanSignalVector = k0 + k1*exposureTimeVector + 

884 k2*exposureTimeVector^2 + ... + kn*exposureTimeVector^n, with 

885 n = "polynomialFitDegreeNonLinearity". k_0 and k_1 and degenerate with bias level and gain, 

886 and are not used by the non-linearity correction. Therefore, j = 2...n in the above expression 

887 (see `LinearizePolynomial` class in `linearize.py`.) 

888 

889 dataset.quadraticPolynomialLinearizerCoefficient : `float` 

890 Coefficient for LinearizeSquared, where corrImage = uncorrImage + c0*uncorrImage^2. 

891 c0 = -k2/(k1^2), where k1 and k2 are fit from 

892 meanSignalVector = k0 + k1*exposureTimeVector + k2*exposureTimeVector^2 +... 

893 + kn*exposureTimeVector^n, with n = "polynomialFitDegreeNonLinearity". 

894 

895 dataset.linearizerTableRow : `list` of `float` 

896 One dimensional array with deviation from linear part of n-order polynomial fit 

897 to mean vs time curve. This array will be one row (for the particular amplifier at hand) 

898 of the table array for LinearizeLookupTable. 

899 

900 dataset.meanSignalVsTimePolyFitPars : `list` of `float` 

901 Parameters from n-order polynomial fit to meanSignalVector vs exposureTimeVector. 

902 

903 dataset.meanSignalVsTimePolyFitParsErr : `list` of `float` 

904 Parameters from n-order polynomial fit to meanSignalVector vs exposureTimeVector. 

905 

906 dataset.meanSignalVsTimePolyFitReducedChiSq : `float` 

907 Reduced unweighted chi squared from polynomial fit to meanSignalVector vs exposureTimeVector. 

908 """ 

909 

910 # Lookup table linearizer 

911 parsIniNonLinearity = self._initialParsForPolynomial(self.config.polynomialFitDegreeNonLinearity + 1) 

912 if self.config.doFitBootstrap: 

913 parsFit, parsFitErr, reducedChiSquaredNonLinearityFit = fitBootstrap(parsIniNonLinearity, 

914 exposureTimeVector, 

915 meanSignalVector, 

916 funcPolynomial) 

917 else: 

918 parsFit, parsFitErr, reducedChiSquaredNonLinearityFit = fitLeastSq(parsIniNonLinearity, 

919 exposureTimeVector, 

920 meanSignalVector, 

921 funcPolynomial) 

922 

923 # LinearizeLookupTable: 

924 # Use linear part to get time at wich signal is maxAduForLookupTableLinearizer DN 

925 tMax = (self.config.maxAduForLookupTableLinearizer - parsFit[0])/parsFit[1] 

926 timeRange = np.linspace(0, tMax, self.config.maxAduForLookupTableLinearizer) 

927 signalIdeal = parsFit[0] + parsFit[1]*timeRange 

928 signalUncorrected = funcPolynomial(parsFit, timeRange) 

929 linearizerTableRow = signalIdeal - signalUncorrected # LinearizerLookupTable has corrections 

930 # LinearizePolynomial and LinearizeSquared: 

931 # Check that magnitude of higher order (>= 3) coefficents of the polyFit are small, 

932 # i.e., less than threshold = 1e-10 (typical quadratic and cubic coefficents are ~1e-6 

933 # and ~1e-12). 

934 k1 = parsFit[1] 

935 polynomialLinearizerCoefficients = [] 

936 for i, coefficient in enumerate(parsFit): 

937 c = -coefficient/(k1**i) 

938 polynomialLinearizerCoefficients.append(c) 

939 if np.fabs(c) > 1e-10: 

940 msg = f"Coefficient {c} in polynomial fit larger than threshold 1e-10." 

941 self.log.warn(msg) 

942 # Coefficient for LinearizedSquared. Called "c0" in linearize.py 

943 c0 = polynomialLinearizerCoefficients[2] 

944 

945 dataset = LinearityResidualsAndLinearizersDataset([], None, [], [], [], None) 

946 dataset.polynomialLinearizerCoefficients = polynomialLinearizerCoefficients 

947 dataset.quadraticPolynomialLinearizerCoefficient = c0 

948 dataset.linearizerTableRow = linearizerTableRow 

949 dataset.meanSignalVsTimePolyFitPars = parsFit 

950 dataset.meanSignalVsTimePolyFitParsErr = parsFitErr 

951 dataset.meanSignalVsTimePolyFitReducedChiSq = reducedChiSquaredNonLinearityFit 

952 

953 return dataset 

954 

955 def buildLinearizerObject(self, datasetNonLinearity, detector, calibDate, linearizerType, instruName='', 

956 tableArray=None, log=None): 

957 """Build linearizer object to persist. 

958 

959 Parameters 

960 ---------- 

961 datasetNonLinearity : `dict` 

962 Dictionary of `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset` objects. 

963 

964 detector : `lsst.afw.cameraGeom.Detector` 

965 Detector object 

966 

967 calibDate : `datetime.datetime` 

968 Calibration date 

969 

970 linearizerType : `str` 

971 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL' 

972 

973 instruName : `str`, optional 

974 Instrument name 

975 

976 tableArray : `np.array`, optional 

977 Look-up table array with size rows=nAmps and columns=DN values 

978 

979 log : `lsst.log.Log`, optional 

980 Logger to handle messages 

981 

982 Returns 

983 ------- 

984 linearizer : `lsst.ip.isr.Linearizer` 

985 Linearizer object 

986 """ 

987 detName = detector.getName() 

988 detNum = detector.getId() 

989 if linearizerType == "LOOKUPTABLE": 

990 if tableArray is not None: 

991 linearizer = Linearizer(detector=detector, table=tableArray, log=log) 

992 else: 

993 raise RuntimeError("tableArray must be provided when creating a LookupTable linearizer") 

994 elif linearizerType in ("LINEARIZESQUARED", "LINEARIZEPOLYNOMIAL"): 

995 linearizer = Linearizer(log=log) 

996 else: 

997 raise RuntimeError("Invalid linearizerType {linearizerType} to build a Linearizer object. " 

998 "Supported: 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'") 

999 for i, amp in enumerate(detector.getAmplifiers()): 

1000 ampName = amp.getName() 

1001 datasetNonLinAmp = datasetNonLinearity[ampName] 

1002 if linearizerType == "LOOKUPTABLE": 

1003 linearizer.linearityCoeffs[ampName] = [i, 0] 

1004 linearizer.linearityType[ampName] = "LookupTable" 

1005 elif linearizerType == "LINEARIZESQUARED": 

1006 linearizer.fitParams[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitPars 

1007 linearizer.fitParamsErr[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitParsErr 

1008 linearizer.linearityFitReducedChiSquared[ampName] = ( 

1009 datasetNonLinAmp.meanSignalVsTimePolyFitReducedChiSq) 

1010 linearizer.linearityCoeffs[ampName] = [ 

1011 datasetNonLinAmp.quadraticPolynomialLinearizerCoefficient] 

1012 linearizer.linearityType[ampName] = "Squared" 

1013 elif linearizerType == "LINEARIZEPOLYNOMIAL": 

1014 linearizer.fitParams[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitPars 

1015 linearizer.fitParamsErr[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitParsErr 

1016 linearizer.linearityFitReducedChiSquared[ampName] = ( 

1017 datasetNonLinAmp.meanSignalVsTimePolyFitReducedChiSq) 

1018 # Slice correction coefficients (starting at 2) for polynomial linearizer 

1019 # (and squared linearizer above). The first and second are reduntant with 

1020 # the bias and gain, respectively, and are not used by LinearizerPolynomial. 

1021 polyLinCoeffs = np.array(datasetNonLinAmp.polynomialLinearizerCoefficients[2:]) 

1022 linearizer.linearityCoeffs[ampName] = polyLinCoeffs 

1023 linearizer.linearityType[ampName] = "Polynomial" 

1024 linearizer.linearityBBox[ampName] = amp.getBBox() 

1025 linearizer.validate() 

1026 calibId = f"detectorName={detName} detector={detNum} calibDate={calibDate} ccd={detNum} filter=NONE" 

1027 

1028 try: 

1029 raftName = detName.split("_")[0] 

1030 calibId += f" raftName={raftName}" 

1031 except Exception: 

1032 raftname = "NONE" 

1033 calibId += f" raftName={raftname}" 

1034 

1035 serial = detector.getSerial() 

1036 linearizer.updateMetadata(instrumentName=instruName, detectorId=f"{detNum}", 

1037 calibId=calibId, serial=serial, detectorName=f"{detName}") 

1038 

1039 return linearizer 

1040 

1041 @staticmethod 

1042 def _initialParsForPolynomial(order): 

1043 assert(order >= 2) 

1044 pars = np.zeros(order, dtype=np.float) 

1045 pars[0] = 10 

1046 pars[1] = 1 

1047 pars[2:] = 0.0001 

1048 return pars 

1049 

1050 @staticmethod 

1051 def _boundsForPolynomial(initialPars): 

1052 lowers = [np.NINF for p in initialPars] 

1053 uppers = [np.inf for p in initialPars] 

1054 lowers[1] = 0 # no negative gains 

1055 return (lowers, uppers) 

1056 

1057 @staticmethod 

1058 def _boundsForAstier(initialPars): 

1059 lowers = [np.NINF for p in initialPars] 

1060 uppers = [np.inf for p in initialPars] 

1061 return (lowers, uppers) 

1062 

1063 @staticmethod 

1064 def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative): 

1065 """Return a boolean array to mask bad points. 

1066 

1067 A linear function has a constant ratio, so find the median 

1068 value of the ratios, and exclude the points that deviate 

1069 from that by more than a factor of maxDeviationPositive/negative. 

1070 Asymmetric deviations are supported as we expect the PTC to turn 

1071 down as the flux increases, but sometimes it anomalously turns 

1072 upwards just before turning over, which ruins the fits, so it 

1073 is wise to be stricter about restricting positive outliers than 

1074 negative ones. 

1075 

1076 Too high and points that are so bad that fit will fail will be included 

1077 Too low and the non-linear points will be excluded, biasing the NL fit.""" 

1078 ratios = [b/a for (a, b) in zip(means, variances)] 

1079 medianRatio = np.median(ratios) 

1080 ratioDeviations = [(r/medianRatio)-1 for r in ratios] 

1081 

1082 # so that it doesn't matter if the deviation is expressed as positive or negative 

1083 maxDeviationPositive = abs(maxDeviationPositive) 

1084 maxDeviationNegative = -1. * abs(maxDeviationNegative) 

1085 

1086 goodPoints = np.array([True if (r < maxDeviationPositive and r > maxDeviationNegative) 

1087 else False for r in ratioDeviations]) 

1088 return goodPoints 

1089 

1090 def _makeZeroSafe(self, array, warn=True, substituteValue=1e-9): 

1091 """""" 

1092 nBad = Counter(array)[0] 

1093 if nBad == 0: 

1094 return array 

1095 

1096 if warn: 

1097 msg = f"Found {nBad} zeros in array at elements {[x for x in np.where(array==0)[0]]}" 

1098 self.log.warn(msg) 

1099 

1100 array[array == 0] = substituteValue 

1101 return array 

1102 

1103 def fitPtc(self, dataset, ptcFitType): 

1104 """Fit the photon transfer curve to a polynimial or to Astier+19 approximation. 

1105 

1106 Fit the photon transfer curve with either a polynomial of the order 

1107 specified in the task config, or using the Astier approximation. 

1108 

1109 Sigma clipping is performed iteratively for the fit, as well as an 

1110 initial clipping of data points that are more than 

1111 config.initialNonLinearityExclusionThreshold away from lying on a 

1112 straight line. This other step is necessary because the photon transfer 

1113 curve turns over catastrophically at very high flux (because saturation 

1114 drops the variance to ~0) and these far outliers cause the initial fit 

1115 to fail, meaning the sigma cannot be calculated to perform the 

1116 sigma-clipping. 

1117 

1118 Parameters 

1119 ---------- 

1120 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

1121 The dataset containing the means, variances and exposure times 

1122 

1123 ptcFitType : `str` 

1124 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or 

1125 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC 

1126 

1127 Returns 

1128 ------- 

1129 dataset: `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

1130 This is the same dataset as the input paramter, however, it has been modified 

1131 to include information such as the fit vectors and the fit parameters. See 

1132 the class `PhotonTransferCurveDatase`. 

1133 """ 

1134 

1135 def errFunc(p, x, y): 

1136 return ptcFunc(p, x) - y 

1137 

1138 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers 

1139 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers 

1140 

1141 for i, ampName in enumerate(dataset.ampNames): 

1142 timeVecOriginal = np.array(dataset.rawExpTimes[ampName]) 

1143 meanVecOriginal = np.array(dataset.rawMeans[ampName]) 

1144 varVecOriginal = np.array(dataset.rawVars[ampName]) 

1145 varVecOriginal = self._makeZeroSafe(varVecOriginal) 

1146 

1147 mask = ((meanVecOriginal >= self.config.minMeanSignal) & 

1148 (meanVecOriginal <= self.config.maxMeanSignal)) 

1149 

1150 goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal, 

1151 self.config.initialNonLinearityExclusionThresholdPositive, 

1152 self.config.initialNonLinearityExclusionThresholdNegative) 

1153 mask = mask & goodPoints 

1154 

1155 if ptcFitType == 'EXPAPPROXIMATION': 

1156 ptcFunc = funcAstier 

1157 parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noise 

1158 bounds = self._boundsForAstier(parsIniPtc) 

1159 if ptcFitType == 'POLYNOMIAL': 

1160 ptcFunc = funcPolynomial 

1161 parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1) 

1162 bounds = self._boundsForPolynomial(parsIniPtc) 

1163 

1164 # Before bootstrap fit, do an iterative fit to get rid of outliers 

1165 count = 1 

1166 while count <= maxIterationsPtcOutliers: 

1167 # Note that application of the mask actually shrinks the array 

1168 # to size rather than setting elements to zero (as we want) so 

1169 # always update mask itself and re-apply to the original data 

1170 meanTempVec = meanVecOriginal[mask] 

1171 varTempVec = varVecOriginal[mask] 

1172 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec)) 

1173 pars = res.x 

1174 

1175 # change this to the original from the temp because the masks are ANDed 

1176 # meaning once a point is masked it's always masked, and the masks must 

1177 # always be the same length for broadcasting 

1178 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal) 

1179 newMask = np.array([True if np.abs(r) < sigmaCutPtcOutliers else False for r in sigResids]) 

1180 mask = mask & newMask 

1181 

1182 nDroppedTotal = Counter(mask)[False] 

1183 self.log.debug(f"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}") 

1184 count += 1 

1185 # objects should never shrink 

1186 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal)) 

1187 

1188 dataset.visitMask[ampName] = mask # store the final mask 

1189 parsIniPtc = pars 

1190 meanVecFinal = meanVecOriginal[mask] 

1191 varVecFinal = varVecOriginal[mask] 

1192 

1193 if Counter(mask)[False] > 0: 

1194 self.log.info((f"Number of points discarded in PTC of amplifier {ampName}:" + 

1195 f" {Counter(mask)[False]} out of {len(meanVecOriginal)}")) 

1196 

1197 if (len(meanVecFinal) < len(parsIniPtc)): 

1198 msg = (f"\nSERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of" 

1199 f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.") 

1200 self.log.warn(msg) 

1201 # The first and second parameters of initial fit are discarded (bias and gain) 

1202 # for the final NL coefficients 

1203 dataset.badAmps.append(ampName) 

1204 dataset.gain[ampName] = np.nan 

1205 dataset.gainErr[ampName] = np.nan 

1206 dataset.noise[ampName] = np.nan 

1207 dataset.noiseErr[ampName] = np.nan 

1208 dataset.ptcFitPars[ampName] = np.nan 

1209 dataset.ptcFitParsError[ampName] = np.nan 

1210 dataset.ptcFitReducedChiSquared[ampName] = np.nan 

1211 continue 

1212 

1213 # Fit the PTC 

1214 if self.config.doFitBootstrap: 

1215 parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal, 

1216 varVecFinal, ptcFunc) 

1217 else: 

1218 parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal, 

1219 varVecFinal, ptcFunc) 

1220 dataset.ptcFitPars[ampName] = parsFit 

1221 dataset.ptcFitParsError[ampName] = parsFitErr 

1222 dataset.ptcFitReducedChiSquared[ampName] = reducedChiSqPtc 

1223 

1224 if ptcFitType == 'EXPAPPROXIMATION': 

1225 ptcGain = parsFit[1] 

1226 ptcGainErr = parsFitErr[1] 

1227 ptcNoise = np.sqrt(np.fabs(parsFit[2])) 

1228 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2])) 

1229 if ptcFitType == 'POLYNOMIAL': 

1230 ptcGain = 1./parsFit[1] 

1231 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1]) 

1232 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain 

1233 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain 

1234 dataset.gain[ampName] = ptcGain 

1235 dataset.gainErr[ampName] = ptcGainErr 

1236 dataset.noise[ampName] = ptcNoise 

1237 dataset.noiseErr[ampName] = ptcNoiseErr 

1238 if not len(dataset.ptcFitType) == 0: 

1239 dataset.ptcFitType = ptcFitType 

1240 

1241 return dataset