Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22import numpy as np 

23import matplotlib.pyplot as plt 

24from collections import Counter 

25 

26import lsst.afw.math as afwMath 

27import lsst.pex.config as pexConfig 

28import lsst.pipe.base as pipeBase 

29from .utils import (fitLeastSq, fitBootstrap, funcPolynomial, funcAstier) 

30from scipy.optimize import least_squares 

31 

32import datetime 

33 

34from .astierCovPtcUtils import (fftSize, CovFft, computeCovDirect, fitData) 

35from .linearity import LinearitySolveTask 

36from .photodiode import getBOTphotodiodeData 

37 

38from lsst.pipe.tasks.getRepositoryData import DataRefListRunner 

39 

40__all__ = ['MeasurePhotonTransferCurveTask', 

41 'MeasurePhotonTransferCurveTaskConfig', 

42 'PhotonTransferCurveDataset'] 

43 

44 

45class MeasurePhotonTransferCurveTaskConfig(pexConfig.Config): 

46 """Config class for photon transfer curve measurement task""" 

47 ccdKey = pexConfig.Field( 

48 dtype=str, 

49 doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'.", 

50 default='ccd', 

51 ) 

52 ptcFitType = pexConfig.ChoiceField( 

53 dtype=str, 

54 doc="Fit PTC to approximation in Astier+19 (Equation 16) or to a polynomial.", 

55 default="POLYNOMIAL", 

56 allowed={ 

57 "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').", 

58 "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).", 

59 "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)" 

60 } 

61 ) 

62 sigmaClipFullFitCovariancesAstier = pexConfig.Field( 

63 dtype=float, 

64 doc="sigma clip for full model fit for FULLCOVARIANCE ptcFitType ", 

65 default=5.0, 

66 ) 

67 maxIterFullFitCovariancesAstier = pexConfig.Field( 

68 dtype=int, 

69 doc="Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType", 

70 default=3, 

71 ) 

72 maximumRangeCovariancesAstier = pexConfig.Field( 

73 dtype=int, 

74 doc="Maximum range of covariances as in Astier+19", 

75 default=8, 

76 ) 

77 covAstierRealSpace = pexConfig.Field( 

78 dtype=bool, 

79 doc="Calculate covariances in real space or via FFT? (see appendix A of Astier+19).", 

80 default=False, 

81 ) 

82 polynomialFitDegree = pexConfig.Field( 

83 dtype=int, 

84 doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.", 

85 default=3, 

86 ) 

87 linearity = pexConfig.ConfigurableField( 

88 target=LinearitySolveTask, 

89 doc="Task to solve the linearity." 

90 ) 

91 

92 doCreateLinearizer = pexConfig.Field( 

93 dtype=bool, 

94 doc="Calculate non-linearity and persist linearizer?", 

95 default=False, 

96 ) 

97 

98 binSize = pexConfig.Field( 

99 dtype=int, 

100 doc="Bin the image by this factor in both dimensions.", 

101 default=1, 

102 ) 

103 minMeanSignal = pexConfig.Field( 

104 dtype=float, 

105 doc="Minimum value (inclusive) of mean signal (in DN) above which to consider.", 

106 default=0, 

107 ) 

108 maxMeanSignal = pexConfig.Field( 

109 dtype=float, 

110 doc="Maximum value (inclusive) of mean signal (in DN) below which to consider.", 

111 default=9e6, 

112 ) 

113 initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField( 

114 dtype=float, 

115 doc="Initially exclude data points with a variance that are more than a factor of this from being" 

116 " linear in the positive direction, from the PTC fit. Note that these points will also be" 

117 " excluded from the non-linearity fit. This is done before the iterative outlier rejection," 

118 " to allow an accurate determination of the sigmas for said iterative fit.", 

119 default=0.12, 

120 min=0.0, 

121 max=1.0, 

122 ) 

123 initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField( 

124 dtype=float, 

125 doc="Initially exclude data points with a variance that are more than a factor of this from being" 

126 " linear in the negative direction, from the PTC fit. Note that these points will also be" 

127 " excluded from the non-linearity fit. This is done before the iterative outlier rejection," 

128 " to allow an accurate determination of the sigmas for said iterative fit.", 

129 default=0.25, 

130 min=0.0, 

131 max=1.0, 

132 ) 

133 sigmaCutPtcOutliers = pexConfig.Field( 

134 dtype=float, 

135 doc="Sigma cut for outlier rejection in PTC.", 

136 default=5.0, 

137 ) 

138 maskNameList = pexConfig.ListField( 

139 dtype=str, 

140 doc="Mask list to exclude from statistics calculations.", 

141 default=['SUSPECT', 'BAD', 'NO_DATA'], 

142 ) 

143 nSigmaClipPtc = pexConfig.Field( 

144 dtype=float, 

145 doc="Sigma cut for afwMath.StatisticsControl()", 

146 default=5.5, 

147 ) 

148 nIterSigmaClipPtc = pexConfig.Field( 

149 dtype=int, 

150 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()", 

151 default=1, 

152 ) 

153 maxIterationsPtcOutliers = pexConfig.Field( 

154 dtype=int, 

155 doc="Maximum number of iterations for outlier rejection in PTC.", 

156 default=2, 

157 ) 

158 doFitBootstrap = pexConfig.Field( 

159 dtype=bool, 

160 doc="Use bootstrap for the PTC fit parameters and errors?.", 

161 default=False, 

162 ) 

163 doPhotodiode = pexConfig.Field( 

164 dtype=bool, 

165 doc="Apply a correction based on the photodiode readings if available?", 

166 default=True, 

167 ) 

168 photodiodeDataPath = pexConfig.Field( 

169 dtype=str, 

170 doc="Gen2 only: path to locate the data photodiode data files.", 

171 default="" 

172 ) 

173 instrumentName = pexConfig.Field( 

174 dtype=str, 

175 doc="Instrument name.", 

176 default='', 

177 ) 

178 

179 

180class PhotonTransferCurveDataset: 

181 """A simple class to hold the output data from the PTC task. 

182 

183 The dataset is made up of a dictionary for each item, keyed by the 

184 amplifiers' names, which much be supplied at construction time. 

185 

186 New items cannot be added to the class to save accidentally saving to the 

187 wrong property, and the class can be frozen if desired. 

188 

189 inputExpIdPairs records the exposures used to produce the data. 

190 When fitPtc() or fitCovariancesAstier() is run, a mask is built up, which is by definition 

191 always the same length as inputExpIdPairs, rawExpTimes, rawMeans 

192 and rawVars, and is a list of bools, which are incrementally set to False 

193 as points are discarded from the fits. 

194 

195 PTC fit parameters for polynomials are stored in a list in ascending order 

196 of polynomial term, i.e. par[0]*x^0 + par[1]*x + par[2]*x^2 etc 

197 with the length of the list corresponding to the order of the polynomial 

198 plus one. 

199 

200 Parameters 

201 ---------- 

202 ampNames : `list` 

203 List with the names of the amplifiers of the detector at hand. 

204 

205 ptcFitType : `str` 

206 Type of model fitted to the PTC: "POLYNOMIAL", "EXPAPPROXIMATION", or "FULLCOVARIANCE". 

207 

208 Returns 

209 ------- 

210 `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

211 Output dataset from MeasurePhotonTransferCurveTask. 

212 """ 

213 

214 def __init__(self, ampNames, ptcFitType): 

215 # add items to __dict__ directly because __setattr__ is overridden 

216 

217 # instance variables 

218 self.__dict__["ptcFitType"] = ptcFitType 

219 self.__dict__["ampNames"] = ampNames 

220 self.__dict__["badAmps"] = [] 

221 

222 # raw data variables 

223 # expIdMask is the mask produced after outlier rejection. The mask produced by "FULLCOVARIANCE" 

224 # may differ from the one produced in the other two PTC fit types. 

225 self.__dict__["inputExpIdPairs"] = {ampName: [] for ampName in ampNames} 

226 self.__dict__["expIdMask"] = {ampName: [] for ampName in ampNames} 

227 self.__dict__["rawExpTimes"] = {ampName: [] for ampName in ampNames} 

228 self.__dict__["rawMeans"] = {ampName: [] for ampName in ampNames} 

229 self.__dict__["rawVars"] = {ampName: [] for ampName in ampNames} 

230 self.__dict__["photoCharge"] = {ampName: [] for ampName in ampNames} 

231 

232 # Gain and noise 

233 self.__dict__["gain"] = {ampName: -1. for ampName in ampNames} 

234 self.__dict__["gainErr"] = {ampName: -1. for ampName in ampNames} 

235 self.__dict__["noise"] = {ampName: -1. for ampName in ampNames} 

236 self.__dict__["noiseErr"] = {ampName: -1. for ampName in ampNames} 

237 

238 # if ptcFitTye in ["POLYNOMIAL", "EXPAPPROXIMATION"] 

239 # fit information 

240 self.__dict__["ptcFitPars"] = {ampName: [] for ampName in ampNames} 

241 self.__dict__["ptcFitParsError"] = {ampName: [] for ampName in ampNames} 

242 self.__dict__["ptcFitReducedChiSquared"] = {ampName: [] for ampName in ampNames} 

243 

244 # if ptcFitTye in ["FULLCOVARIANCE"] 

245 # "covariancesTuple" is a numpy recarray with entries of the form 

246 # ['mu', 'i', 'j', 'var', 'cov', 'npix', 'ext', 'expTime', 'ampName'] 

247 # "covariancesFits" has CovFit objects that fit the measured covariances to Eq. 20 of Astier+19. 

248 # In "covariancesFitsWithNoB", "b"=0 in the model described by Eq. 20 of Astier+19. 

249 self.__dict__["covariancesTuple"] = {ampName: [] for ampName in ampNames} 

250 self.__dict__["covariancesFitsWithNoB"] = {ampName: [] for ampName in ampNames} 

251 self.__dict__["covariancesFits"] = {ampName: [] for ampName in ampNames} 

252 self.__dict__["aMatrix"] = {ampName: [] for ampName in ampNames} 

253 self.__dict__["bMatrix"] = {ampName: [] for ampName in ampNames} 

254 

255 # "final" means that the "raw" vectors above had "expIdMask" applied. 

256 self.__dict__["finalVars"] = {ampName: [] for ampName in ampNames} 

257 self.__dict__["finalModelVars"] = {ampName: [] for ampName in ampNames} 

258 self.__dict__["finalMeans"] = {ampName: [] for ampName in ampNames} 

259 

260 def __setattr__(self, attribute, value): 

261 """Protect class attributes""" 

262 if attribute not in self.__dict__: 

263 raise AttributeError(f"{attribute} is not already a member of PhotonTransferCurveDataset, which" 

264 " does not support setting of new attributes.") 

265 else: 

266 self.__dict__[attribute] = value 

267 

268 def getExpIdsUsed(self, ampName): 

269 """Get the exposures used, i.e. not discarded, for a given amp. 

270 

271 If no mask has been created yet, all exposures are returned. 

272 """ 

273 if len(self.expIdMask[ampName]) == 0: 

274 return self.inputExpIdPairs[ampName] 

275 

276 # if the mask exists it had better be the same length as the expIdPairs 

277 assert len(self.expIdMask[ampName]) == len(self.inputExpIdPairs[ampName]) 

278 

279 pairs = self.inputExpIdPairs[ampName] 

280 mask = self.expIdMask[ampName] 

281 # cast to bool required because numpy 

282 return [(exp1, exp2) for ((exp1, exp2), m) in zip(pairs, mask) if bool(m) is True] 

283 

284 def getGoodAmps(self): 

285 return [amp for amp in self.ampNames if amp not in self.badAmps] 

286 

287 

288class MeasurePhotonTransferCurveTask(pipeBase.CmdLineTask): 

289 """A class to calculate, fit, and plot a PTC from a set of flat pairs. 

290 

291 The Photon Transfer Curve (var(signal) vs mean(signal)) is a standard tool 

292 used in astronomical detectors characterization (e.g., Janesick 2001, 

293 Janesick 2007). If ptcFitType is "EXPAPPROXIMATION" or "POLYNOMIAL", this task calculates the 

294 PTC from a series of pairs of flat-field images; each pair taken at identical exposure 

295 times. The difference image of each pair is formed to eliminate fixed pattern noise, 

296 and then the variance of the difference image and the mean of the average image 

297 are used to produce the PTC. An n-degree polynomial or the approximation in Equation 

298 16 of Astier+19 ("The Shape of the Photon Transfer Curve of CCD sensors", 

299 arXiv:1905.08677) can be fitted to the PTC curve. These models include 

300 parameters such as the gain (e/DN) and readout noise. 

301 

302 Linearizers to correct for signal-chain non-linearity are also calculated. 

303 The `Linearizer` class, in general, can support per-amp linearizers, but in this 

304 task this is not supported. 

305 

306 If ptcFitType is "FULLCOVARIANCE", the covariances of the difference images are calculated via the 

307 DFT methods described in Astier+19 and the variances for the PTC are given by the cov[0,0] elements 

308 at each signal level. The full model in Equation 20 of Astier+19 is fit to the PTC to get the gain 

309 and the noise. 

310 

311 Parameters 

312 ---------- 

313 

314 *args: `list` 

315 Positional arguments passed to the Task constructor. None used at this 

316 time. 

317 **kwargs: `dict` 

318 Keyword arguments passed on to the Task constructor. None used at this 

319 time. 

320 

321 """ 

322 

323 RunnerClass = DataRefListRunner 

324 ConfigClass = MeasurePhotonTransferCurveTaskConfig 

325 _DefaultName = "measurePhotonTransferCurve" 

326 

327 def __init__(self, *args, **kwargs): 

328 pipeBase.CmdLineTask.__init__(self, *args, **kwargs) 

329 self.makeSubtask("linearity") 

330 plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too 

331 self.config.validate() 

332 self.config.freeze() 

333 

334 @pipeBase.timeMethod 

335 def runDataRef(self, dataRefList): 

336 """Run the Photon Transfer Curve (PTC) measurement task. 

337 

338 For a dataRef (which is each detector here), 

339 and given a list of exposure pairs (postISR) at different exposure times, 

340 measure the PTC. 

341 

342 Parameters 

343 ---------- 

344 dataRefList : `list` [`lsst.daf.peristence.ButlerDataRef`] 

345 Data references for exposures for detectors to process. 

346 """ 

347 if len(dataRefList) < 2: 

348 raise RuntimeError("Insufficient inputs to combine.") 

349 

350 # setup necessary objects 

351 dataRef = dataRefList[0] 

352 

353 detNum = dataRef.dataId[self.config.ccdKey] 

354 camera = dataRef.get('camera') 

355 detector = camera[dataRef.dataId[self.config.ccdKey]] 

356 

357 amps = detector.getAmplifiers() 

358 ampNames = [amp.getName() for amp in amps] 

359 datasetPtc = PhotonTransferCurveDataset(ampNames, self.config.ptcFitType) 

360 

361 # Get the pairs of flat indexed by expTime 

362 expPairs = self.makePairs(dataRefList) 

363 expIds = [] 

364 for (exp1, exp2) in expPairs.values(): 

365 id1 = exp1.getInfo().getVisitInfo().getExposureId() 

366 id2 = exp2.getInfo().getVisitInfo().getExposureId() 

367 expIds.append((id1, id2)) 

368 self.log.info(f"Measuring PTC using {expIds} exposures for detector {detector.getId()}") 

369 

370 # get photodiode data early so that logic can be put in to only use the 

371 # data if all files are found, as partial corrections are not possible 

372 # or at least require significant logic to deal with 

373 if self.config.doPhotodiode: 

374 for (expId1, expId2) in expIds: 

375 charges = [-1, -1] # necessary to have a not-found value to keep lists in step 

376 for i, expId in enumerate([expId1, expId2]): 

377 # //1000 is a Gen2 only hack, working around the fact an 

378 # exposure's ID is not the same as the expId in the 

379 # registry. Currently expId is concatenated with the 

380 # zero-padded detector ID. This will all go away in Gen3. 

381 dataRef.dataId['expId'] = expId//1000 

382 if self.config.photodiodeDataPath: 

383 photodiodeData = getBOTphotodiodeData(dataRef, self.config.photodiodeDataPath) 

384 else: 

385 photodiodeData = getBOTphotodiodeData(dataRef) 

386 if photodiodeData: # default path stored in function def to keep task clean 

387 charges[i] = photodiodeData.getCharge() 

388 else: 

389 # full expId (not //1000) here, as that encodes the 

390 # the detector number as so is fully qualifying 

391 self.log.warn(f"No photodiode data found for {expId}") 

392 

393 for ampName in ampNames: 

394 datasetPtc.photoCharge[ampName].append((charges[0], charges[1])) 

395 

396 tupleRecords = [] 

397 allTags = [] 

398 for expTime, (exp1, exp2) in expPairs.items(): 

399 expId1 = exp1.getInfo().getVisitInfo().getExposureId() 

400 expId2 = exp2.getInfo().getVisitInfo().getExposureId() 

401 tupleRows = [] 

402 nAmpsNan = 0 

403 for ampNumber, amp in enumerate(detector): 

404 ampName = amp.getName() 

405 # covAstier: (i, j, var (cov[0,0]), cov, npix) 

406 doRealSpace = self.config.covAstierRealSpace 

407 muDiff, varDiff, covAstier = self.measureMeanVarCov(exp1, exp2, region=amp.getBBox(), 

408 covAstierRealSpace=doRealSpace) 

409 if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None): 

410 msg = (f"NaN mean or var, or None cov in amp {ampNumber} in exposure pair {expId1}," 

411 f" {expId2} of detector {detNum}.") 

412 self.log.warn(msg) 

413 nAmpsNan += 1 

414 continue 

415 tags = ['mu', 'i', 'j', 'var', 'cov', 'npix', 'ext', 'expTime', 'ampName'] 

416 if (muDiff <= self.config.minMeanSignal) or (muDiff >= self.config.maxMeanSignal): 

417 continue 

418 datasetPtc.rawExpTimes[ampName].append(expTime) 

419 datasetPtc.rawMeans[ampName].append(muDiff) 

420 datasetPtc.rawVars[ampName].append(varDiff) 

421 datasetPtc.inputExpIdPairs[ampName].append((expId1, expId2)) 

422 

423 tupleRows += [(muDiff, ) + covRow + (ampNumber, expTime, ampName) for covRow in covAstier] 

424 if nAmpsNan == len(ampNames): 

425 msg = f"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}." 

426 self.log.warn(msg) 

427 continue 

428 allTags += tags 

429 tupleRecords += tupleRows 

430 covariancesWithTags = np.core.records.fromrecords(tupleRecords, names=allTags) 

431 

432 if self.config.ptcFitType in ["FULLCOVARIANCE", ]: 

433 # Calculate covariances and fit them, including the PTC, to Astier+19 full model (Eq. 20) 

434 datasetPtc = self.fitCovariancesAstier(datasetPtc, covariancesWithTags) 

435 elif self.config.ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]: 

436 # Fit the PTC to a polynomial or to Astier+19 exponential approximation (Eq. 16) 

437 # Fill up PhotonTransferCurveDataset object. 

438 datasetPtc = self.fitPtc(datasetPtc, self.config.ptcFitType) 

439 

440 # Fit a poynomial to calculate non-linearity and persist linearizer. 

441 if self.config.doCreateLinearizer: 

442 # Fit (non)linearity of signal vs time curve. 

443 # Fill up PhotonTransferCurveDataset object. 

444 # Fill up array for LUT linearizer (tableArray). 

445 # Produce coefficients for Polynomial ans Squared linearizers. 

446 # Build linearizer objects. 

447 dimensions = {'camera': camera.getName(), 'detector': detector.getId()} 

448 linearityResults = self.linearity.run(datasetPtc, camera, dimensions) 

449 linearizer = linearityResults.outputLinearizer 

450 

451 butler = dataRef.getButler() 

452 self.log.info("Writing linearizer:") 

453 

454 detName = detector.getName() 

455 now = datetime.datetime.utcnow() 

456 calibDate = now.strftime("%Y-%m-%d") 

457 

458 butler.put(linearizer, datasetType='Linearizer', dataId={'detector': detNum, 

459 'detectorName': detName, 'calibDate': calibDate}) 

460 self.log.info("Writing PTC data.") 

461 dataRef.put(datasetPtc, datasetType="photonTransferCurveDataset") 

462 

463 return pipeBase.Struct(exitStatus=0) 

464 

465 def makePairs(self, dataRefList): 

466 """Produce a list of flat pairs indexed by exposure time. 

467 

468 Parameters 

469 ---------- 

470 dataRefList : `list` [`lsst.daf.peristence.ButlerDataRef`] 

471 Data references for exposures for detectors to process. 

472 

473 Return 

474 ------ 

475 flatPairs : `dict` [`float`, `lsst.afw.image.exposure.exposure.ExposureF`] 

476 Dictionary that groups flat-field exposures that have the same exposure time (seconds). 

477 

478 Notes 

479 ----- 

480 We use the difference of one pair of flat-field images taken at the same exposure time when 

481 calculating the PTC to reduce Fixed Pattern Noise. If there are > 2 flat-field images with the 

482 same exposure time, the first two are kept and the rest discarded. 

483 """ 

484 

485 # Organize exposures by observation date. 

486 expDict = {} 

487 for dataRef in dataRefList: 

488 try: 

489 tempFlat = dataRef.get("postISRCCD") 

490 except RuntimeError: 

491 self.log.warn("postISR exposure could not be retrieved. Ignoring flat.") 

492 continue 

493 expDate = tempFlat.getInfo().getVisitInfo().getDate().get() 

494 expDict.setdefault(expDate, tempFlat) 

495 sortedExps = {k: expDict[k] for k in sorted(expDict)} 

496 

497 flatPairs = {} 

498 for exp in sortedExps: 

499 tempFlat = sortedExps[exp] 

500 expTime = tempFlat.getInfo().getVisitInfo().getExposureTime() 

501 listAtExpTime = flatPairs.setdefault(expTime, []) 

502 if len(listAtExpTime) < 2: 

503 listAtExpTime.append(tempFlat) 

504 if len(listAtExpTime) > 2: 

505 self.log.warn(f"More than 2 exposures found at expTime {expTime}. Dropping exposures " 

506 f"{listAtExpTime[2:]}.") 

507 

508 for (key, value) in flatPairs.items(): 

509 if len(value) < 2: 

510 flatPairs.pop(key) 

511 self.log.warn(f"Only one exposure found at expTime {key}. Dropping exposure {value}.") 

512 return flatPairs 

513 

514 def fitCovariancesAstier(self, dataset, covariancesWithTagsArray): 

515 """Fit measured flat covariances to full model in Astier+19. 

516 

517 Parameters 

518 ---------- 

519 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

520 The dataset containing information such as the means, variances and exposure times. 

521 

522 covariancesWithTagsArray : `numpy.recarray` 

523 Tuple with at least (mu, cov, var, i, j, npix), where: 

524 mu : 0.5*(m1 + m2), where: 

525 mu1: mean value of flat1 

526 mu2: mean value of flat2 

527 cov: covariance value at lag(i, j) 

528 var: variance(covariance value at lag(0, 0)) 

529 i: lag dimension 

530 j: lag dimension 

531 npix: number of pixels used for covariance calculation. 

532 

533 Returns 

534 ------- 

535 dataset: `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

536 This is the same dataset as the input paramter, however, it has been modified 

537 to include information such as the fit vectors and the fit parameters. See 

538 the class `PhotonTransferCurveDatase`. 

539 """ 

540 

541 covFits, covFitsNoB = fitData(covariancesWithTagsArray, maxMu=self.config.maxMeanSignal, 

542 r=self.config.maximumRangeCovariancesAstier, 

543 nSigmaFullFit=self.config.sigmaClipFullFitCovariancesAstier, 

544 maxIterFullFit=self.config.maxIterFullFitCovariancesAstier) 

545 

546 dataset.covariancesTuple = covariancesWithTagsArray 

547 dataset.covariancesFits = covFits 

548 dataset.covariancesFitsWithNoB = covFitsNoB 

549 dataset = self.getOutputPtcDataCovAstier(dataset, covFits) 

550 

551 return dataset 

552 

553 def getOutputPtcDataCovAstier(self, dataset, covFits): 

554 """Get output data for PhotonTransferCurveCovAstierDataset from CovFit objects. 

555 

556 Parameters 

557 ---------- 

558 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

559 The dataset containing information such as the means, variances and exposure times. 

560 

561 covFits: `dict` 

562 Dictionary of CovFit objects, with amp names as keys. 

563 

564 Returns 

565 ------- 

566 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

567 This is the same dataset as the input paramter, however, it has been modified 

568 to include extra information such as the mask 1D array, gains, reoudout noise, measured signal, 

569 measured variance, modeled variance, a, and b coefficient matrices (see Astier+19) per amplifier. 

570 See the class `PhotonTransferCurveDatase`. 

571 """ 

572 

573 for i, amp in enumerate(covFits): 

574 fit = covFits[amp] 

575 (meanVecFinal, varVecFinal, varVecModel, 

576 wc, varMask) = fit.getFitData(0, 0, divideByMu=False, returnMasked=True) 

577 gain = fit.getGain() 

578 dataset.expIdMask[amp] = varMask 

579 dataset.gain[amp] = gain 

580 dataset.gainErr[amp] = fit.getGainErr() 

581 dataset.noise[amp] = np.sqrt(np.fabs(fit.getRon())) 

582 dataset.noiseErr[amp] = fit.getRonErr() 

583 dataset.finalVars[amp].append(varVecFinal/(gain**2)) 

584 dataset.finalModelVars[amp].append(varVecModel/(gain**2)) 

585 dataset.finalMeans[amp].append(meanVecFinal/gain) 

586 dataset.aMatrix[amp].append(fit.getA()) 

587 dataset.bMatrix[amp].append(fit.getB()) 

588 

589 return dataset 

590 

591 def measureMeanVarCov(self, exposure1, exposure2, region=None, covAstierRealSpace=False): 

592 """Calculate the mean of each of two exposures and the variance and covariance of their difference. 

593 

594 The variance is calculated via afwMath, and the covariance via the methods in Astier+19 (appendix A). 

595 In theory, var = covariance[0,0]. This should be validated, and in the future, we may decide to just 

596 keep one (covariance). 

597 

598 Parameters 

599 ---------- 

600 exposure1 : `lsst.afw.image.exposure.exposure.ExposureF` 

601 First exposure of flat field pair. 

602 

603 exposure2 : `lsst.afw.image.exposure.exposure.ExposureF` 

604 Second exposure of flat field pair. 

605 

606 region : `lsst.geom.Box2I`, optional 

607 Region of each exposure where to perform the calculations (e.g, an amplifier). 

608 

609 covAstierRealSpace : `bool`, optional 

610 Should the covariannces in Astier+19 be calculated in real space or via FFT? 

611 See Appendix A of Astier+19. 

612 

613 Returns 

614 ------- 

615 mu : `float` or `NaN` 

616 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means of the regions in 

617 both exposures. If either mu1 or m2 are NaN's, the returned value is NaN. 

618 

619 varDiff : `float` or `NaN` 

620 Half of the clipped variance of the difference of the regions inthe two input 

621 exposures. If either mu1 or m2 are NaN's, the returned value is NaN. 

622 

623 covDiffAstier : `list` or `NaN` 

624 List with tuples of the form (dx, dy, var, cov, npix), where: 

625 dx : `int` 

626 Lag in x 

627 dy : `int` 

628 Lag in y 

629 var : `float` 

630 Variance at (dx, dy). 

631 cov : `float` 

632 Covariance at (dx, dy). 

633 nPix : `int` 

634 Number of pixel pairs used to evaluate var and cov. 

635 If either mu1 or m2 are NaN's, the returned value is NaN. 

636 """ 

637 

638 if region is not None: 

639 im1Area = exposure1.maskedImage[region] 

640 im2Area = exposure2.maskedImage[region] 

641 else: 

642 im1Area = exposure1.maskedImage 

643 im2Area = exposure2.maskedImage 

644 

645 if self.config.binSize > 1: 

646 im1Area = afwMath.binImage(im1Area, self.config.binSize) 

647 im2Area = afwMath.binImage(im2Area, self.config.binSize) 

648 

649 im1MaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList) 

650 im1StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc, 

651 self.config.nIterSigmaClipPtc, 

652 im1MaskVal) 

653 im1StatsCtrl.setNanSafe(True) 

654 im1StatsCtrl.setAndMask(im1MaskVal) 

655 

656 im2MaskVal = exposure2.getMask().getPlaneBitMask(self.config.maskNameList) 

657 im2StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc, 

658 self.config.nIterSigmaClipPtc, 

659 im2MaskVal) 

660 im2StatsCtrl.setNanSafe(True) 

661 im2StatsCtrl.setAndMask(im2MaskVal) 

662 

663 # Clipped mean of images; then average of mean. 

664 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, im1StatsCtrl).getValue() 

665 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, im2StatsCtrl).getValue() 

666 if np.isnan(mu1) or np.isnan(mu2): 

667 return np.nan, np.nan, None 

668 mu = 0.5*(mu1 + mu2) 

669 

670 # Take difference of pairs 

671 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2)) 

672 temp = im2Area.clone() 

673 temp *= mu1 

674 diffIm = im1Area.clone() 

675 diffIm *= mu2 

676 diffIm -= temp 

677 diffIm /= mu 

678 

679 diffImMaskVal = diffIm.getMask().getPlaneBitMask(self.config.maskNameList) 

680 diffImStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc, 

681 self.config.nIterSigmaClipPtc, 

682 diffImMaskVal) 

683 diffImStatsCtrl.setNanSafe(True) 

684 diffImStatsCtrl.setAndMask(diffImMaskVal) 

685 

686 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, diffImStatsCtrl).getValue()) 

687 

688 # Get the mask and identify good pixels as '1', and the rest as '0'. 

689 w1 = np.where(im1Area.getMask().getArray() == 0, 1, 0) 

690 w2 = np.where(im2Area.getMask().getArray() == 0, 1, 0) 

691 

692 w12 = w1*w2 

693 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0) 

694 w = w12*wDiff 

695 

696 maxRangeCov = self.config.maximumRangeCovariancesAstier 

697 if covAstierRealSpace: 

698 covDiffAstier = computeCovDirect(diffIm.getImage().getArray(), w, maxRangeCov) 

699 else: 

700 shapeDiff = diffIm.getImage().getArray().shape 

701 fftShape = (fftSize(shapeDiff[0] + maxRangeCov), fftSize(shapeDiff[1]+maxRangeCov)) 

702 c = CovFft(diffIm.getImage().getArray(), w, fftShape, maxRangeCov) 

703 covDiffAstier = c.reportCovFft(maxRangeCov) 

704 

705 return mu, varDiff, covDiffAstier 

706 

707 def computeCovDirect(self, diffImage, weightImage, maxRange): 

708 """Compute covariances of diffImage in real space. 

709 

710 For lags larger than ~25, it is slower than the FFT way. 

711 Taken from https://github.com/PierreAstier/bfptc/ 

712 

713 Parameters 

714 ---------- 

715 diffImage : `numpy.array` 

716 Image to compute the covariance of. 

717 

718 weightImage : `numpy.array` 

719 Weight image of diffImage (1's and 0's for good and bad pixels, respectively). 

720 

721 maxRange : `int` 

722 Last index of the covariance to be computed. 

723 

724 Returns 

725 ------- 

726 outList : `list` 

727 List with tuples of the form (dx, dy, var, cov, npix), where: 

728 dx : `int` 

729 Lag in x 

730 dy : `int` 

731 Lag in y 

732 var : `float` 

733 Variance at (dx, dy). 

734 cov : `float` 

735 Covariance at (dx, dy). 

736 nPix : `int` 

737 Number of pixel pairs used to evaluate var and cov. 

738 """ 

739 outList = [] 

740 var = 0 

741 # (dy,dx) = (0,0) has to be first 

742 for dy in range(maxRange + 1): 

743 for dx in range(0, maxRange + 1): 

744 if (dx*dy > 0): 

745 cov1, nPix1 = self.covDirectValue(diffImage, weightImage, dx, dy) 

746 cov2, nPix2 = self.covDirectValue(diffImage, weightImage, dx, -dy) 

747 cov = 0.5*(cov1 + cov2) 

748 nPix = nPix1 + nPix2 

749 else: 

750 cov, nPix = self.covDirectValue(diffImage, weightImage, dx, dy) 

751 if (dx == 0 and dy == 0): 

752 var = cov 

753 outList.append((dx, dy, var, cov, nPix)) 

754 

755 return outList 

756 

757 def covDirectValue(self, diffImage, weightImage, dx, dy): 

758 """Compute covariances of diffImage in real space at lag (dx, dy). 

759 

760 Taken from https://github.com/PierreAstier/bfptc/ (c.f., appendix of Astier+19). 

761 

762 Parameters 

763 ---------- 

764 diffImage : `numpy.array` 

765 Image to compute the covariance of. 

766 

767 weightImage : `numpy.array` 

768 Weight image of diffImage (1's and 0's for good and bad pixels, respectively). 

769 

770 dx : `int` 

771 Lag in x. 

772 

773 dy : `int` 

774 Lag in y. 

775 

776 Returns 

777 ------- 

778 cov : `float` 

779 Covariance at (dx, dy) 

780 

781 nPix : `int` 

782 Number of pixel pairs used to evaluate var and cov. 

783 """ 

784 (nCols, nRows) = diffImage.shape 

785 # switching both signs does not change anything: 

786 # it just swaps im1 and im2 below 

787 if (dx < 0): 

788 (dx, dy) = (-dx, -dy) 

789 # now, we have dx >0. We have to distinguish two cases 

790 # depending on the sign of dy 

791 if dy >= 0: 

792 im1 = diffImage[dy:, dx:] 

793 w1 = weightImage[dy:, dx:] 

794 im2 = diffImage[:nCols - dy, :nRows - dx] 

795 w2 = weightImage[:nCols - dy, :nRows - dx] 

796 else: 

797 im1 = diffImage[:nCols + dy, dx:] 

798 w1 = weightImage[:nCols + dy, dx:] 

799 im2 = diffImage[-dy:, :nRows - dx] 

800 w2 = weightImage[-dy:, :nRows - dx] 

801 # use the same mask for all 3 calculations 

802 wAll = w1*w2 

803 # do not use mean() because weightImage=0 pixels would then count 

804 nPix = wAll.sum() 

805 im1TimesW = im1*wAll 

806 s1 = im1TimesW.sum()/nPix 

807 s2 = (im2*wAll).sum()/nPix 

808 p = (im1TimesW*im2).sum()/nPix 

809 cov = p - s1*s2 

810 

811 return cov, nPix 

812 

813 @staticmethod 

814 def _initialParsForPolynomial(order): 

815 assert(order >= 2) 

816 pars = np.zeros(order, dtype=np.float) 

817 pars[0] = 10 

818 pars[1] = 1 

819 pars[2:] = 0.0001 

820 return pars 

821 

822 @staticmethod 

823 def _boundsForPolynomial(initialPars): 

824 lowers = [np.NINF for p in initialPars] 

825 uppers = [np.inf for p in initialPars] 

826 lowers[1] = 0 # no negative gains 

827 return (lowers, uppers) 

828 

829 @staticmethod 

830 def _boundsForAstier(initialPars): 

831 lowers = [np.NINF for p in initialPars] 

832 uppers = [np.inf for p in initialPars] 

833 return (lowers, uppers) 

834 

835 @staticmethod 

836 def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative): 

837 """Return a boolean array to mask bad points. 

838 

839 A linear function has a constant ratio, so find the median 

840 value of the ratios, and exclude the points that deviate 

841 from that by more than a factor of maxDeviationPositive/negative. 

842 Asymmetric deviations are supported as we expect the PTC to turn 

843 down as the flux increases, but sometimes it anomalously turns 

844 upwards just before turning over, which ruins the fits, so it 

845 is wise to be stricter about restricting positive outliers than 

846 negative ones. 

847 

848 Too high and points that are so bad that fit will fail will be included 

849 Too low and the non-linear points will be excluded, biasing the NL fit.""" 

850 ratios = [b/a for (a, b) in zip(means, variances)] 

851 medianRatio = np.median(ratios) 

852 ratioDeviations = [(r/medianRatio)-1 for r in ratios] 

853 

854 # so that it doesn't matter if the deviation is expressed as positive or negative 

855 maxDeviationPositive = abs(maxDeviationPositive) 

856 maxDeviationNegative = -1. * abs(maxDeviationNegative) 

857 

858 goodPoints = np.array([True if (r < maxDeviationPositive and r > maxDeviationNegative) 

859 else False for r in ratioDeviations]) 

860 return goodPoints 

861 

862 def _makeZeroSafe(self, array, warn=True, substituteValue=1e-9): 

863 """""" 

864 nBad = Counter(array)[0] 

865 if nBad == 0: 

866 return array 

867 

868 if warn: 

869 msg = f"Found {nBad} zeros in array at elements {[x for x in np.where(array==0)[0]]}" 

870 self.log.warn(msg) 

871 

872 array[array == 0] = substituteValue 

873 return array 

874 

875 def fitPtc(self, dataset, ptcFitType): 

876 """Fit the photon transfer curve to a polynimial or to Astier+19 approximation. 

877 

878 Fit the photon transfer curve with either a polynomial of the order 

879 specified in the task config, or using the Astier approximation. 

880 

881 Sigma clipping is performed iteratively for the fit, as well as an 

882 initial clipping of data points that are more than 

883 config.initialNonLinearityExclusionThreshold away from lying on a 

884 straight line. This other step is necessary because the photon transfer 

885 curve turns over catastrophically at very high flux (because saturation 

886 drops the variance to ~0) and these far outliers cause the initial fit 

887 to fail, meaning the sigma cannot be calculated to perform the 

888 sigma-clipping. 

889 

890 Parameters 

891 ---------- 

892 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

893 The dataset containing the means, variances and exposure times 

894 

895 ptcFitType : `str` 

896 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or 

897 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC 

898 

899 Returns 

900 ------- 

901 dataset: `lsst.cp.pipe.ptc.PhotonTransferCurveDataset` 

902 This is the same dataset as the input paramter, however, it has been modified 

903 to include information such as the fit vectors and the fit parameters. See 

904 the class `PhotonTransferCurveDatase`. 

905 """ 

906 

907 def errFunc(p, x, y): 

908 return ptcFunc(p, x) - y 

909 

910 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers 

911 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers 

912 

913 for i, ampName in enumerate(dataset.ampNames): 

914 timeVecOriginal = np.array(dataset.rawExpTimes[ampName]) 

915 meanVecOriginal = np.array(dataset.rawMeans[ampName]) 

916 varVecOriginal = np.array(dataset.rawVars[ampName]) 

917 varVecOriginal = self._makeZeroSafe(varVecOriginal) 

918 

919 mask = ((meanVecOriginal >= self.config.minMeanSignal) & 

920 (meanVecOriginal <= self.config.maxMeanSignal)) 

921 

922 goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal, 

923 self.config.initialNonLinearityExclusionThresholdPositive, 

924 self.config.initialNonLinearityExclusionThresholdNegative) 

925 if not (mask.any() and goodPoints.any()): 

926 msg = (f"\nSERIOUS: All points in either mask: {mask} or goodPoints: {goodPoints} are bad." 

927 f"Setting {ampName} to BAD.") 

928 self.log.warn(msg) 

929 # The first and second parameters of initial fit are discarded (bias and gain) 

930 # for the final NL coefficients 

931 dataset.badAmps.append(ampName) 

932 dataset.gain[ampName] = np.nan 

933 dataset.gainErr[ampName] = np.nan 

934 dataset.noise[ampName] = np.nan 

935 dataset.noiseErr[ampName] = np.nan 

936 dataset.ptcFitPars[ampName] = np.nan 

937 dataset.ptcFitParsError[ampName] = np.nan 

938 dataset.ptcFitReducedChiSquared[ampName] = np.nan 

939 continue 

940 

941 mask = mask & goodPoints 

942 

943 if ptcFitType == 'EXPAPPROXIMATION': 

944 ptcFunc = funcAstier 

945 parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noise 

946 bounds = self._boundsForAstier(parsIniPtc) 

947 if ptcFitType == 'POLYNOMIAL': 

948 ptcFunc = funcPolynomial 

949 parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1) 

950 bounds = self._boundsForPolynomial(parsIniPtc) 

951 

952 # Before bootstrap fit, do an iterative fit to get rid of outliers 

953 count = 1 

954 while count <= maxIterationsPtcOutliers: 

955 # Note that application of the mask actually shrinks the array 

956 # to size rather than setting elements to zero (as we want) so 

957 # always update mask itself and re-apply to the original data 

958 meanTempVec = meanVecOriginal[mask] 

959 varTempVec = varVecOriginal[mask] 

960 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec)) 

961 pars = res.x 

962 

963 # change this to the original from the temp because the masks are ANDed 

964 # meaning once a point is masked it's always masked, and the masks must 

965 # always be the same length for broadcasting 

966 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal) 

967 newMask = np.array([True if np.abs(r) < sigmaCutPtcOutliers else False for r in sigResids]) 

968 mask = mask & newMask 

969 if not (mask.any() and newMask.any()): 

970 msg = (f"\nSERIOUS: All points in either mask: {mask} or newMask: {newMask} are bad. " 

971 f"Setting {ampName} to BAD.") 

972 self.log.warn(msg) 

973 # The first and second parameters of initial fit are discarded (bias and gain) 

974 # for the final NL coefficients 

975 dataset.badAmps.append(ampName) 

976 dataset.gain[ampName] = np.nan 

977 dataset.gainErr[ampName] = np.nan 

978 dataset.noise[ampName] = np.nan 

979 dataset.noiseErr[ampName] = np.nan 

980 dataset.ptcFitPars[ampName] = np.nan 

981 dataset.ptcFitParsError[ampName] = np.nan 

982 dataset.ptcFitReducedChiSquared[ampName] = np.nan 

983 break 

984 nDroppedTotal = Counter(mask)[False] 

985 self.log.debug(f"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}") 

986 count += 1 

987 # objects should never shrink 

988 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal)) 

989 

990 if not (mask.any() and newMask.any()): 

991 continue 

992 dataset.expIdMask[ampName] = mask # store the final mask 

993 parsIniPtc = pars 

994 meanVecFinal = meanVecOriginal[mask] 

995 varVecFinal = varVecOriginal[mask] 

996 

997 if Counter(mask)[False] > 0: 

998 self.log.info((f"Number of points discarded in PTC of amplifier {ampName}:" + 

999 f" {Counter(mask)[False]} out of {len(meanVecOriginal)}")) 

1000 

1001 if (len(meanVecFinal) < len(parsIniPtc)): 

1002 msg = (f"\nSERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of" 

1003 f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.") 

1004 self.log.warn(msg) 

1005 # The first and second parameters of initial fit are discarded (bias and gain) 

1006 # for the final NL coefficients 

1007 dataset.badAmps.append(ampName) 

1008 dataset.gain[ampName] = np.nan 

1009 dataset.gainErr[ampName] = np.nan 

1010 dataset.noise[ampName] = np.nan 

1011 dataset.noiseErr[ampName] = np.nan 

1012 dataset.ptcFitPars[ampName] = np.nan 

1013 dataset.ptcFitParsError[ampName] = np.nan 

1014 dataset.ptcFitReducedChiSquared[ampName] = np.nan 

1015 continue 

1016 

1017 # Fit the PTC 

1018 if self.config.doFitBootstrap: 

1019 parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal, 

1020 varVecFinal, ptcFunc, 

1021 weightsY=1./np.sqrt(varVecFinal)) 

1022 else: 

1023 parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal, 

1024 varVecFinal, ptcFunc, 

1025 weightsY=1./np.sqrt(varVecFinal)) 

1026 dataset.ptcFitPars[ampName] = parsFit 

1027 dataset.ptcFitParsError[ampName] = parsFitErr 

1028 dataset.ptcFitReducedChiSquared[ampName] = reducedChiSqPtc 

1029 

1030 if ptcFitType == 'EXPAPPROXIMATION': 

1031 ptcGain = parsFit[1] 

1032 ptcGainErr = parsFitErr[1] 

1033 ptcNoise = np.sqrt(np.fabs(parsFit[2])) 

1034 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2])) 

1035 if ptcFitType == 'POLYNOMIAL': 

1036 ptcGain = 1./parsFit[1] 

1037 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1]) 

1038 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain 

1039 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain 

1040 dataset.gain[ampName] = ptcGain 

1041 dataset.gainErr[ampName] = ptcGainErr 

1042 dataset.noise[ampName] = ptcNoise 

1043 dataset.noiseErr[ampName] = ptcNoiseErr 

1044 if not len(dataset.ptcFitType) == 0: 

1045 dataset.ptcFitType = ptcFitType 

1046 

1047 return dataset