Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22 

23__all__ = ['PlotPhotonTransferCurveTask'] 

24 

25import numpy as np 

26import matplotlib.pyplot as plt 

27import matplotlib as mpl 

28from matplotlib import gridspec 

29import os 

30from matplotlib.backends.backend_pdf import PdfPages 

31 

32import lsst.ip.isr as isr 

33import lsst.pex.config as pexConfig 

34import lsst.pipe.base as pipeBase 

35 

36from .utils import (funcAstier, funcPolynomial, NonexistentDatasetTaskDataIdContainer, 

37 calculateWeightedReducedChi2) 

38from matplotlib.ticker import MaxNLocator 

39 

40from .astierCovPtcFit import computeApproximateAcoeffs 

41from .astierCovPtcUtils import getFitDataFromCovariances 

42 

43from lsst.ip.isr import PhotonTransferCurveDataset 

44 

45 

46class PlotPhotonTransferCurveTaskConfig(pexConfig.Config): 

47 """Config class for photon transfer curve measurement task""" 

48 datasetFileName = pexConfig.Field( 

49 dtype=str, 

50 doc="datasetPtc file name (pkl)", 

51 default="", 

52 ) 

53 linearizerFileName = pexConfig.Field( 

54 dtype=str, 

55 doc="linearizer file name (fits)", 

56 default="", 

57 ) 

58 ccdKey = pexConfig.Field( 

59 dtype=str, 

60 doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'.", 

61 default='detector', 

62 ) 

63 signalElectronsRelativeA = pexConfig.Field( 

64 dtype=float, 

65 doc="Signal value for relative systematic bias between different methods of estimating a_ij " 

66 "(Fig. 15 of Astier+19).", 

67 default=75000, 

68 ) 

69 plotNormalizedCovariancesNumberOfBins = pexConfig.Field( 

70 dtype=int, 

71 doc="Number of bins in `plotNormalizedCovariancesNumber` function " 

72 "(Fig. 8, 10., of Astier+19).", 

73 default=10, 

74 ) 

75 

76 

77class PlotPhotonTransferCurveTask(pipeBase.CmdLineTask): 

78 """A class to plot the dataset from MeasurePhotonTransferCurveTask. 

79 

80 Parameters 

81 ---------- 

82 

83 *args: `list` 

84 Positional arguments passed to the Task constructor. None used at this 

85 time. 

86 **kwargs: `dict` 

87 Keyword arguments passed on to the Task constructor. None used at this 

88 time. 

89 

90 """ 

91 

92 ConfigClass = PlotPhotonTransferCurveTaskConfig 

93 _DefaultName = "plotPhotonTransferCurve" 

94 

95 def __init__(self, *args, **kwargs): 

96 pipeBase.CmdLineTask.__init__(self, *args, **kwargs) 

97 plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too 

98 self.config.validate() 

99 self.config.freeze() 

100 

101 @classmethod 

102 def _makeArgumentParser(cls): 

103 """Augment argument parser for the MeasurePhotonTransferCurveTask.""" 

104 parser = pipeBase.ArgumentParser(name=cls._DefaultName) 

105 parser.add_id_argument("--id", datasetType="photonTransferCurveDataset", 

106 ContainerClass=NonexistentDatasetTaskDataIdContainer, 

107 help="The ccds to use, e.g. --id ccd=0..100") 

108 return parser 

109 

110 @pipeBase.timeMethod 

111 def runDataRef(self, dataRef): 

112 """Run the Photon Transfer Curve (PTC) plotting measurement task. 

113 

114 Parameters 

115 ---------- 

116 dataRef : list of lsst.daf.persistence.ButlerDataRef 

117 dataRef for the detector for the expIds to be fit. 

118 """ 

119 

120 datasetFile = self.config.datasetFileName 

121 datasetPtc = PhotonTransferCurveDataset.readFits(datasetFile) 

122 

123 dirname = dataRef.getUri(datasetType='cpPipePlotRoot', write=True) 

124 if not os.path.exists(dirname): 

125 os.makedirs(dirname) 

126 

127 detNum = dataRef.dataId[self.config.ccdKey] 

128 filename = f"PTC_det{detNum}.pdf" 

129 filenameFull = os.path.join(dirname, filename) 

130 

131 if self.config.linearizerFileName: 

132 linearizer = isr.linearize.Linearizer.readFits(self.config.linearizerFileName) 

133 else: 

134 linearizer = None 

135 self.run(filenameFull, datasetPtc, linearizer=linearizer, log=self.log) 

136 

137 return pipeBase.Struct(exitStatus=0) 

138 

139 def run(self, filenameFull, datasetPtc, linearizer=None, log=None): 

140 """Make the plots for the PTC task""" 

141 ptcFitType = datasetPtc.ptcFitType 

142 with PdfPages(filenameFull) as pdfPages: 

143 if ptcFitType in ["FULLCOVARIANCE", ]: 

144 self.covAstierMakeAllPlots(datasetPtc, pdfPages, log=log) 

145 elif ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]: 

146 self._plotStandardPtc(datasetPtc, ptcFitType, pdfPages) 

147 else: 

148 raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n" + 

149 "Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.") 

150 if linearizer: 

151 self._plotLinearizer(datasetPtc, linearizer, pdfPages) 

152 

153 return 

154 

155 def covAstierMakeAllPlots(self, dataset, pdfPages, 

156 log=None): 

157 """Make plots for MeasurePhotonTransferCurve task when doCovariancesAstier=True. 

158 

159 This function call other functions that mostly reproduce the plots in Astier+19. 

160 Most of the code is ported from Pierre Astier's repository https://github.com/PierreAstier/bfptc 

161 

162 Parameters 

163 ---------- 

164 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

165 The dataset containing the necessary information to produce the plots. 

166 

167 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

168 PDF file where the plots will be saved. 

169 

170 log : `lsst.log.Log`, optional 

171 Logger to handle messages 

172 """ 

173 mu = dataset.rawMeans 

174 expIdMask = dataset.expIdMask 

175 # dictionaries with ampNames as keys 

176 fullCovs = dataset.covariances 

177 fullCovsModel = dataset.covariancesModel 

178 fullCovWeights = dataset.covariancesSqrtWeights 

179 aDict = dataset.aMatrix 

180 bDict = dataset.bMatrix 

181 fullCovsNoB = dataset.covariancesNoB 

182 fullCovsModelNoB = dataset.covariancesModelNoB 

183 fullCovWeightsNoB = dataset.covariancesSqrtWeightsNoB 

184 aDictNoB = dataset.aMatrixNoB 

185 gainDict = dataset.gain 

186 noiseDict = dataset.noise 

187 

188 self.plotCovariances(mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, fullCovsModelNoB, 

189 fullCovWeightsNoB, gainDict, noiseDict, aDict, bDict, expIdMask, pdfPages) 

190 self.plotNormalizedCovariances(0, 0, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, 

191 fullCovsModelNoB, fullCovWeightsNoB, expIdMask, pdfPages, 

192 offset=0.01, topPlot=True, 

193 numberOfBins=self.config.plotNormalizedCovariancesNumberOfBins, 

194 log=log) 

195 self.plotNormalizedCovariances(0, 1, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, 

196 fullCovsModelNoB, fullCovWeightsNoB, expIdMask, pdfPages, 

197 numberOfBins=self.config.plotNormalizedCovariancesNumberOfBins, 

198 log=log) 

199 self.plotNormalizedCovariances(1, 0, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, 

200 fullCovsModelNoB, fullCovWeightsNoB, expIdMask, pdfPages, 

201 numberOfBins=self.config.plotNormalizedCovariancesNumberOfBins, 

202 log=log) 

203 self.plot_a_b(aDict, bDict, pdfPages) 

204 self.ab_vs_dist(aDict, bDict, pdfPages, bRange=4) 

205 self.plotAcoeffsSum(aDict, bDict, pdfPages) 

206 self.plotRelativeBiasACoeffs(aDict, aDictNoB, fullCovsModel, fullCovsModelNoB, 

207 self.config.signalElectronsRelativeA, gainDict, pdfPages, maxr=4) 

208 

209 return 

210 

211 @staticmethod 

212 def plotCovariances(mu, covs, covsModel, covsWeights, covsNoB, covsModelNoB, covsWeightsNoB, 

213 gainDict, noiseDict, aDict, bDict, expIdMask, pdfPages): 

214 """Plot covariances and models: Cov00, Cov10, Cov01. 

215 

216 Figs. 6 and 7 of Astier+19 

217 

218 Parameters 

219 ---------- 

220 mu : `dict`, [`str`, `list`] 

221 Dictionary keyed by amp name with mean signal values. 

222 

223 covs : `dict`, [`str`, `list`] 

224 Dictionary keyed by amp names containing a list of measued covariances per mean flux. 

225 

226 covsModel : `dict`, [`str`, `list`] 

227 Dictionary keyed by amp names containinging covariances model (Eq. 20 of Astier+19) per mean flux. 

228 

229 covsWeights : `dict`, [`str`, `list`] 

230 Dictionary keyed by amp names containinging sqrt. of covariances weights. 

231 

232 covsNoB : `dict`, [`str`, `list`] 

233 Dictionary keyed by amp names containing a list of measued covariances per mean flux ('b'=0 in 

234 Astier+19). 

235 

236 covsModelNoB : `dict`, [`str`, `list`] 

237 Dictionary keyed by amp names containing covariances model (with 'b'=0 in Eq. 20 of Astier+19) 

238 per mean flux. 

239 

240 covsWeightsNoB : `dict`, [`str`, `list`] 

241 Dictionary keyed by amp names containing sqrt. of covariances weights ('b' = 0 in Eq. 20 of 

242 Astier+19). 

243 

244 gainDict : `dict`, [`str`, `float`] 

245 Dictionary keyed by amp names containing the gains in e-/ADU. 

246 

247 noiseDict : `dict`, [`str`, `float`] 

248 Dictionary keyed by amp names containing the rms redout noise in e-. 

249 

250 aDict : `dict`, [`str`, `numpy.array`] 

251 Dictionary keyed by amp names containing 'a' coefficients (Eq. 20 of Astier+19). 

252 

253 bDict : `dict`, [`str`, `numpy.array`] 

254 Dictionary keyed by amp names containing 'b' coefficients (Eq. 20 of Astier+19). 

255 

256 expIdMask : `dict`, [`str`, `list`] 

257 Dictionary keyed by amp names containing the masked exposure pairs. 

258 

259 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

260 PDF file where the plots will be saved. 

261 """ 

262 

263 legendFontSize = 6.5 

264 labelFontSize = 7 

265 titleFontSize = 9 

266 supTitleFontSize = 18 

267 markerSize = 25 

268 

269 nAmps = len(covs) 

270 if nAmps == 2: 

271 nRows, nCols = 2, 1 

272 nRows = np.sqrt(nAmps) 

273 mantissa, _ = np.modf(nRows) 

274 if mantissa > 0: 

275 nRows = int(nRows) + 1 

276 nCols = nRows 

277 else: 

278 nRows = int(nRows) 

279 nCols = nRows 

280 

281 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

282 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

283 fResCov00, axResCov00 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', 

284 figsize=(13, 10)) 

285 fCov01, axCov01 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

286 fCov10, axCov10 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

287 

288 assert(len(covsModel) == nAmps) 

289 assert(len(covsWeights) == nAmps) 

290 

291 assert(len(covsNoB) == nAmps) 

292 assert(len(covsModelNoB) == nAmps) 

293 assert(len(covsWeightsNoB) == nAmps) 

294 

295 for i, (amp, a, a2, aResVar, a3, a4) in enumerate(zip(covs, ax.flatten(), 

296 ax2.flatten(), axResCov00.flatten(), 

297 axCov01.flatten(), axCov10.flatten())): 

298 

299 muAmp, cov, model, weight = mu[amp], covs[amp], covsModel[amp], covsWeights[amp] 

300 if not np.isnan(np.array(cov)).all(): # If all the entries are np.nan, this is a bad amp. 

301 aCoeffs, bCoeffs = np.array(aDict[amp]), np.array(bDict[amp]) 

302 gain, noise = gainDict[amp], noiseDict[amp] 

303 mask = expIdMask[amp] 

304 

305 (meanVecOriginal, varVecOriginal, varVecModelOriginal, 

306 weightsOriginal, _) = getFitDataFromCovariances(0, 0, muAmp, cov, model, weight) 

307 meanVecFinal, varVecFinal = meanVecOriginal[mask], varVecOriginal[mask] 

308 varVecModelFinal = varVecModelOriginal[mask] 

309 meanVecOutliers = meanVecOriginal[np.invert(mask)] 

310 varVecOutliers = varVecOriginal[np.invert(mask)] 

311 varWeightsFinal = weightsOriginal[mask] 

312 # Get weighted reduced chi2 

313 chi2FullModelVar = calculateWeightedReducedChi2(varVecFinal, varVecModelFinal, 

314 varWeightsFinal, len(meanVecFinal), 4) 

315 

316 (meanVecOrigCov01, varVecOrigCov01, varVecModelOrigCov01, 

317 _, _) = getFitDataFromCovariances(0, 0, muAmp, cov, model, weight) 

318 meanVecFinalCov01, varVecFinalCov01 = meanVecOrigCov01[mask], varVecOrigCov01[mask] 

319 varVecModelFinalCov01 = varVecModelOrigCov01[mask] 

320 meanVecOutliersCov01 = meanVecOrigCov01[np.invert(mask)] 

321 varVecOutliersCov01 = varVecOrigCov01[np.invert(mask)] 

322 

323 (meanVecOrigCov10, varVecOrigCov10, varVecModelOrigCov10, 

324 _, _) = getFitDataFromCovariances(1, 0, muAmp, cov, model, weight) 

325 meanVecFinalCov10, varVecFinalCov10 = meanVecOrigCov10[mask], varVecOrigCov10[mask] 

326 varVecModelFinalCov10 = varVecModelOrigCov10[mask] 

327 meanVecOutliersCov10 = meanVecOrigCov10[np.invert(mask)] 

328 varVecOutliersCov10 = varVecOrigCov10[np.invert(mask)] 

329 

330 # cuadratic fit for residuals below 

331 par2 = np.polyfit(meanVecFinal, varVecFinal, 2, w=varWeightsFinal) 

332 varModelFinalQuadratic = np.polyval(par2, meanVecFinal) 

333 chi2QuadModelVar = calculateWeightedReducedChi2(varVecFinal, varModelFinalQuadratic, 

334 varWeightsFinal, len(meanVecFinal), 3) 

335 

336 # fit with no 'b' coefficient (c = a*b in Eq. 20 of Astier+19) 

337 covNoB, modelNoB, weightNoB = covsNoB[amp], covsModelNoB[amp], covsWeightsNoB[amp] 

338 (meanVecOriginalNoB, varVecOriginalNoB, varVecModelOriginalNoB, 

339 varWeightsOriginalNoB, _) = getFitDataFromCovariances(0, 0, muAmp, covNoB, modelNoB, 

340 weightNoB) 

341 

342 meanVecFinalNoB, varVecFinalNoB, varVecModelFinalNoB, varWeightsFinalNoB = ( 

343 meanVecOriginalNoB[mask], varVecOriginalNoB[mask], varVecModelOriginalNoB[mask], 

344 varWeightsOriginalNoB[mask]) 

345 

346 chi2FullModelNoBVar = calculateWeightedReducedChi2(varVecFinalNoB, varVecModelFinalNoB, 

347 varWeightsFinalNoB, len(meanVecFinalNoB), 

348 3) 

349 stringLegend = (f"Gain: {gain:.4} e/ADU \n" + 

350 f"Noise: {noise:.4} e \n" + 

351 r"$a_{00}$: %.3e 1/e"%aCoeffs[0, 0] + 

352 "\n" + r"$b_{00}$: %.3e 1/e"%bCoeffs[0, 0] + 

353 f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ") 

354 minMeanVecFinal = np.nanmin(meanVecFinal) 

355 maxMeanVecFinal = np.nanmax(meanVecFinal) 

356 deltaXlim = maxMeanVecFinal - minMeanVecFinal 

357 

358 a.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

359 a.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize) 

360 a.tick_params(labelsize=11) 

361 a.set_xscale('linear', fontsize=labelFontSize) 

362 a.set_yscale('linear', fontsize=labelFontSize) 

363 a.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize) 

364 a.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize) 

365 a.plot(meanVecFinal, varVecModelFinal, color='red', lineStyle='-') 

366 a.text(0.03, 0.7, stringLegend, transform=a.transAxes, fontsize=legendFontSize) 

367 a.set_title(amp, fontsize=titleFontSize) 

368 a.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim]) 

369 

370 # Same as above, but in log-scale 

371 a2.set_xlabel(r'Mean Signal ($\mu$, ADU)', fontsize=labelFontSize) 

372 a2.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize) 

373 a2.tick_params(labelsize=11) 

374 a2.set_xscale('log') 

375 a2.set_yscale('log') 

376 a2.plot(meanVecFinal, varVecModelFinal, color='red', lineStyle='-') 

377 a2.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize) 

378 a2.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize) 

379 a2.text(0.03, 0.7, stringLegend, transform=a2.transAxes, fontsize=legendFontSize) 

380 a2.set_title(amp, fontsize=titleFontSize) 

381 a2.set_xlim([minMeanVecFinal, maxMeanVecFinal]) 

382 

383 # Residuals var - model 

384 aResVar.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

385 aResVar.set_ylabel(r'Residuals (ADU$^2$)', fontsize=labelFontSize) 

386 aResVar.tick_params(labelsize=11) 

387 aResVar.set_xscale('linear', fontsize=labelFontSize) 

388 aResVar.set_yscale('linear', fontsize=labelFontSize) 

389 aResVar.plot(meanVecFinal, varVecFinal - varVecModelFinal, color='blue', lineStyle='-', 

390 label=r'Full fit ($\chi_{\rm{red}}^2$: %g)'%chi2FullModelVar) 

391 aResVar.plot(meanVecFinal, varVecFinal - varModelFinalQuadratic, color='red', lineStyle='-', 

392 label=r'Quadratic fit ($\chi_{\rm{red}}^2$: %g)'%chi2QuadModelVar) 

393 aResVar.plot(meanVecFinalNoB, varVecFinalNoB - varVecModelFinalNoB, color='green', 

394 lineStyle='-', 

395 label=r'Full fit (b=0) ($\chi_{\rm{red}}^2$: %g)'%chi2FullModelNoBVar) 

396 aResVar.axhline(color='black') 

397 aResVar.set_title(amp, fontsize=titleFontSize) 

398 aResVar.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim]) 

399 aResVar.legend(fontsize=7) 

400 

401 a3.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

402 a3.set_ylabel(r'Cov01 (ADU$^2$)', fontsize=labelFontSize) 

403 a3.tick_params(labelsize=11) 

404 a3.set_xscale('linear', fontsize=labelFontSize) 

405 a3.set_yscale('linear', fontsize=labelFontSize) 

406 a3.scatter(meanVecFinalCov01, varVecFinalCov01, c='blue', marker='o', s=markerSize) 

407 a3.scatter(meanVecOutliersCov01, varVecOutliersCov01, c='magenta', marker='s', s=markerSize) 

408 a3.plot(meanVecFinalCov01, varVecModelFinalCov01, color='red', lineStyle='-') 

409 a3.set_title(amp, fontsize=titleFontSize) 

410 a3.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim]) 

411 

412 a4.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

413 a4.set_ylabel(r'Cov10 (ADU$^2$)', fontsize=labelFontSize) 

414 a4.tick_params(labelsize=11) 

415 a4.set_xscale('linear', fontsize=labelFontSize) 

416 a4.set_yscale('linear', fontsize=labelFontSize) 

417 a4.scatter(meanVecFinalCov10, varVecFinalCov10, c='blue', marker='o', s=markerSize) 

418 a4.scatter(meanVecOutliersCov10, varVecOutliersCov10, c='magenta', marker='s', s=markerSize) 

419 a4.plot(meanVecFinalCov10, varVecModelFinalCov10, color='red', lineStyle='-') 

420 a4.set_title(amp, fontsize=titleFontSize) 

421 a4.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim]) 

422 

423 else: 

424 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

425 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

426 a3.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

427 a4.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

428 

429 f.suptitle("PTC from covariances as in Astier+19 \n Fit: Eq. 20, Astier+19", 

430 fontsize=supTitleFontSize) 

431 pdfPages.savefig(f) 

432 f2.suptitle("PTC from covariances as in Astier+19 (log-log) \n Fit: Eq. 20, Astier+19", 

433 fontsize=supTitleFontSize) 

434 pdfPages.savefig(f2) 

435 fResCov00.suptitle("Residuals (data-model) for Cov00 (Var)", fontsize=supTitleFontSize) 

436 pdfPages.savefig(fResCov00) 

437 fCov01.suptitle("Cov01 as in Astier+19 (nearest parallel neighbor covariance) \n" + 

438 " Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize) 

439 pdfPages.savefig(fCov01) 

440 fCov10.suptitle("Cov10 as in Astier+19 (nearest serial neighbor covariance) \n" + 

441 "Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize) 

442 pdfPages.savefig(fCov10) 

443 

444 return 

445 

446 def plotNormalizedCovariances(self, i, j, inputMu, covs, covsModel, covsWeights, covsNoB, covsModelNoB, 

447 covsWeightsNoB, expIdMask, pdfPages, offset=0.004, 

448 numberOfBins=10, plotData=True, topPlot=False, log=None): 

449 """Plot C_ij/mu vs mu. 

450 

451 Figs. 8, 10, and 11 of Astier+19 

452 

453 Parameters 

454 ---------- 

455 i : `int` 

456 Covariane lag 

457 

458 j : `int` 

459 Covariance lag 

460 

461 inputMu : `dict`, [`str`, `list`] 

462 Dictionary keyed by amp name with mean signal values. 

463 

464 covs : `dict`, [`str`, `list`] 

465 Dictionary keyed by amp names containing a list of measued covariances per mean flux. 

466 

467 covsModel : `dict`, [`str`, `list`] 

468 Dictionary keyed by amp names containinging covariances model (Eq. 20 of Astier+19) per mean flux. 

469 

470 covsWeights : `dict`, [`str`, `list`] 

471 Dictionary keyed by amp names containinging sqrt. of covariances weights. 

472 

473 covsNoB : `dict`, [`str`, `list`] 

474 Dictionary keyed by amp names containing a list of measued covariances per mean flux ('b'=0 in 

475 Astier+19). 

476 

477 covsModelNoB : `dict`, [`str`, `list`] 

478 Dictionary keyed by amp names containing covariances model (with 'b'=0 in Eq. 20 of Astier+19) 

479 per mean flux. 

480 

481 covsWeightsNoB : `dict`, [`str`, `list`] 

482 Dictionary keyed by amp names containing sqrt. of covariances weights ('b' = 0 in Eq. 20 of 

483 Astier+19). 

484 

485 expIdMask : `dict`, [`str`, `list`] 

486 Dictionary keyed by amp names containing the masked exposure pairs. 

487 

488 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

489 PDF file where the plots will be saved. 

490 

491 offset : `float`, optional 

492 Constant offset factor to plot covariances in same panel (so they don't overlap). 

493 

494 numberOfBins : `int`, optional 

495 Number of bins for top and bottom plot. 

496 

497 plotData : `bool`, optional 

498 Plot the data points? 

499 

500 topPlot : `bool`, optional 

501 Plot the top plot with the covariances, and the bottom plot with the model residuals? 

502 

503 log : `lsst.log.Log`, optional 

504 Logger to handle messages. 

505 """ 

506 if (not topPlot): 

507 fig = plt.figure(figsize=(8, 10)) 

508 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1]) 

509 gs.update(hspace=0) 

510 ax0 = plt.subplot(gs[0]) 

511 plt.setp(ax0.get_xticklabels(), visible=False) 

512 else: 

513 fig = plt.figure(figsize=(8, 8)) 

514 ax0 = plt.subplot(111) 

515 ax0.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) 

516 ax0.tick_params(axis='both', labelsize='x-large') 

517 mue, rese, wce = [], [], [] 

518 mueNoB, reseNoB, wceNoB = [], [], [] 

519 for counter, amp in enumerate(covs): 

520 mask = expIdMask[amp] 

521 

522 muAmp, fullCov, fullCovModel, fullCovWeight = (inputMu[amp], covs[amp], covsModel[amp], 

523 covsWeights[amp]) 

524 if len(fullCov) == 0: 

525 continue 

526 mu, cov, model, weightCov, _ = getFitDataFromCovariances(i, j, muAmp, fullCov, fullCovModel, 

527 fullCovWeight, divideByMu=True) 

528 

529 mu, cov, model, weightCov = mu[mask], cov[mask], model[mask], weightCov[mask] 

530 

531 mue += list(mu) 

532 rese += list(cov - model) 

533 wce += list(weightCov) 

534 

535 fullCovNoB, fullCovModelNoB, fullCovWeightNoB = (covsNoB[amp], covsModelNoB[amp], 

536 covsWeightsNoB[amp]) 

537 if len(fullCovNoB) == 0: 

538 continue 

539 (muNoB, covNoB, modelNoB, 

540 weightCovNoB, _) = getFitDataFromCovariances(i, j, muAmp, fullCovNoB, fullCovModelNoB, 

541 fullCovWeightNoB, divideByMu=True) 

542 

543 muNoB, covNoB, modelNoB, weightCovNoB = (muNoB[mask], covNoB[mask], modelNoB[mask], 

544 weightCovNoB[mask]) 

545 

546 mueNoB += list(muNoB) 

547 reseNoB += list(covNoB - modelNoB) 

548 wceNoB += list(weightCovNoB) 

549 

550 # the corresponding fit 

551 fit_curve, = plt.plot(mu, model + counter*offset, '-', linewidth=4.0) 

552 # bin plot. len(mu) = no binning 

553 gind = self.indexForBins(mu, numberOfBins) 

554 

555 xb, yb, wyb, sigyb = self.binData(mu, cov, gind, weightCov) 

556 plt.errorbar(xb, yb+counter*offset, yerr=sigyb, marker='o', linestyle='none', markersize=6.5, 

557 color=fit_curve.get_color(), label=f"{amp} (N: {len(mu)})") 

558 # plot the data 

559 if plotData: 

560 points, = plt.plot(mu, cov + counter*offset, '.', color=fit_curve.get_color()) 

561 plt.legend(loc='upper right', fontsize=8) 

562 # end loop on amps 

563 mue = np.array(mue) 

564 rese = np.array(rese) 

565 wce = np.array(wce) 

566 mueNoB = np.array(mueNoB) 

567 reseNoB = np.array(reseNoB) 

568 wceNoB = np.array(wceNoB) 

569 

570 plt.xlabel(r"$\mu (el)$", fontsize='x-large') 

571 plt.ylabel(r"$Cov{%d%d}/\mu + Cst (el)$"%(i, j), fontsize='x-large') 

572 if (not topPlot): 

573 gind = self.indexForBins(mue, numberOfBins) 

574 xb, yb, wyb, sigyb = self.binData(mue, rese, gind, wce) 

575 

576 ax1 = plt.subplot(gs[1], sharex=ax0) 

577 ax1.errorbar(xb, yb, yerr=sigyb, marker='o', linestyle='none', label='Full fit') 

578 gindNoB = self.indexForBins(mueNoB, numberOfBins) 

579 xb2, yb2, wyb2, sigyb2 = self.binData(mueNoB, reseNoB, gindNoB, wceNoB) 

580 

581 ax1.errorbar(xb2, yb2, yerr=sigyb2, marker='o', linestyle='none', label='b = 0') 

582 ax1.tick_params(axis='both', labelsize='x-large') 

583 plt.legend(loc='upper left', fontsize='large') 

584 # horizontal line at zero 

585 plt.plot(xb, [0]*len(xb), '--', color='k') 

586 plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) 

587 plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) 

588 plt.xlabel(r'$\mu (el)$', fontsize='x-large') 

589 plt.ylabel(r'$Cov{%d%d}/\mu$ -model (el)'%(i, j), fontsize='x-large') 

590 plt.tight_layout() 

591 plt.suptitle(f"Nbins: {numberOfBins}") 

592 # overlapping y labels: 

593 fig.canvas.draw() 

594 labels0 = [item.get_text() for item in ax0.get_yticklabels()] 

595 labels0[0] = u'' 

596 ax0.set_yticklabels(labels0) 

597 pdfPages.savefig(fig) 

598 

599 return 

600 

601 @staticmethod 

602 def plot_a_b(aDict, bDict, pdfPages, bRange=3): 

603 """Fig. 12 of Astier+19 

604 

605 Color display of a and b arrays fits, averaged over channels. 

606 

607 Parameters 

608 ---------- 

609 aDict : `dict`, [`numpy.array`] 

610 Dictionary keyed by amp names containing the fitted 'a' coefficients from the model 

611 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

612 

613 bDict : `dict`, [`numpy.array`] 

614 Dictionary keyed by amp names containing the fitted 'b' coefficients from the model 

615 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

616 

617 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

618 PDF file where the plots will be saved. 

619 

620 bRange : `int` 

621 Maximum lag for b arrays. 

622 """ 

623 a, b = [], [] 

624 for amp in aDict: 

625 if np.isnan(aDict[amp]).all(): 

626 continue 

627 a.append(aDict[amp]) 

628 b.append(bDict[amp]) 

629 a = np.array(a).mean(axis=0) 

630 b = np.array(b).mean(axis=0) 

631 fig = plt.figure(figsize=(7, 11)) 

632 ax0 = fig.add_subplot(2, 1, 1) 

633 im0 = ax0.imshow(np.abs(a.transpose()), origin='lower', norm=mpl.colors.LogNorm()) 

634 ax0.tick_params(axis='both', labelsize='x-large') 

635 ax0.set_title(r'$|a|$', fontsize='x-large') 

636 ax0.xaxis.set_ticks_position('bottom') 

637 cb0 = plt.colorbar(im0) 

638 cb0.ax.tick_params(labelsize='x-large') 

639 

640 ax1 = fig.add_subplot(2, 1, 2) 

641 ax1.tick_params(axis='both', labelsize='x-large') 

642 ax1.yaxis.set_major_locator(MaxNLocator(integer=True)) 

643 ax1.xaxis.set_major_locator(MaxNLocator(integer=True)) 

644 im1 = ax1.imshow(1e6*b[:bRange, :bRange].transpose(), origin='lower') 

645 cb1 = plt.colorbar(im1) 

646 cb1.ax.tick_params(labelsize='x-large') 

647 ax1.set_title(r'$b \times 10^6$', fontsize='x-large') 

648 ax1.xaxis.set_ticks_position('bottom') 

649 plt.tight_layout() 

650 pdfPages.savefig(fig) 

651 

652 return 

653 

654 @staticmethod 

655 def ab_vs_dist(aDict, bDict, pdfPages, bRange=4): 

656 """Fig. 13 of Astier+19. 

657 

658 Values of a and b arrays fits, averaged over amplifiers, as a function of distance. 

659 

660 Parameters 

661 ---------- 

662 aDict : `dict`, [`numpy.array`] 

663 Dictionary keyed by amp names containing the fitted 'a' coefficients from the model 

664 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

665 

666 bDict : `dict`, [`numpy.array`] 

667 Dictionary keyed by amp names containing the fitted 'b' coefficients from the model 

668 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

669 

670 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

671 PDF file where the plots will be saved. 

672 

673 bRange : `int` 

674 Maximum lag for b arrays. 

675 """ 

676 assert (len(aDict) == len(bDict)) 

677 a = [] 

678 for amp in aDict: 

679 if np.isnan(aDict[amp]).all(): 

680 continue 

681 a.append(aDict[amp]) 

682 a = np.array(a) 

683 y = a.mean(axis=0) 

684 sy = a.std(axis=0)/np.sqrt(len(aDict)) 

685 i, j = np.indices(y.shape) 

686 upper = (i >= j).ravel() 

687 r = np.sqrt(i**2 + j**2).ravel() 

688 y = y.ravel() 

689 sy = sy.ravel() 

690 fig = plt.figure(figsize=(6, 9)) 

691 ax = fig.add_subplot(211) 

692 ax.set_xlim([0.5, r.max()+1]) 

693 ax.errorbar(r[upper], y[upper], yerr=sy[upper], marker='o', linestyle='none', color='b', 

694 label='$i>=j$') 

695 ax.errorbar(r[~upper], y[~upper], yerr=sy[~upper], marker='o', linestyle='none', color='r', 

696 label='$i<j$') 

697 ax.legend(loc='upper center', fontsize='x-large') 

698 ax.set_xlabel(r'$\sqrt{i^2+j^2}$', fontsize='x-large') 

699 ax.set_ylabel(r'$a_{ij}$', fontsize='x-large') 

700 ax.set_yscale('log') 

701 ax.tick_params(axis='both', labelsize='x-large') 

702 

703 # 

704 axb = fig.add_subplot(212) 

705 b = [] 

706 for amp in bDict: 

707 if np.isnan(bDict[amp]).all(): 

708 continue 

709 b.append(bDict[amp]) 

710 b = np.array(b) 

711 yb = b.mean(axis=0) 

712 syb = b.std(axis=0)/np.sqrt(len(bDict)) 

713 ib, jb = np.indices(yb.shape) 

714 upper = (ib > jb).ravel() 

715 rb = np.sqrt(i**2 + j**2).ravel() 

716 yb = yb.ravel() 

717 syb = syb.ravel() 

718 xmin = -0.2 

719 xmax = bRange 

720 axb.set_xlim([xmin, xmax+0.2]) 

721 cutu = (r > xmin) & (r < xmax) & (upper) 

722 cutl = (r > xmin) & (r < xmax) & (~upper) 

723 axb.errorbar(rb[cutu], yb[cutu], yerr=syb[cutu], marker='o', linestyle='none', color='b', 

724 label='$i>=j$') 

725 axb.errorbar(rb[cutl], yb[cutl], yerr=syb[cutl], marker='o', linestyle='none', color='r', 

726 label='$i<j$') 

727 plt.legend(loc='upper center', fontsize='x-large') 

728 axb.set_xlabel(r'$\sqrt{i^2+j^2}$', fontsize='x-large') 

729 axb.set_ylabel(r'$b_{ij}$', fontsize='x-large') 

730 axb.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) 

731 axb.tick_params(axis='both', labelsize='x-large') 

732 plt.tight_layout() 

733 pdfPages.savefig(fig) 

734 

735 return 

736 

737 @staticmethod 

738 def plotAcoeffsSum(aDict, bDict, pdfPages): 

739 """Fig. 14. of Astier+19 

740 

741 Cumulative sum of a_ij as a function of maximum separation. This plot displays the average over 

742 channels. 

743 

744 Parameters 

745 ---------- 

746 aDict : `dict`, [`numpy.array`] 

747 Dictionary keyed by amp names containing the fitted 'a' coefficients from the model 

748 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

749 

750 bDict : `dict`, [`numpy.array`] 

751 Dictionary keyed by amp names containing the fitted 'b' coefficients from the model 

752 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

753 

754 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

755 PDF file where the plots will be saved. 

756 """ 

757 assert (len(aDict) == len(bDict)) 

758 a, b = [], [] 

759 for amp in aDict: 

760 if np.isnan(aDict[amp]).all() or np.isnan(bDict[amp]).all(): 

761 continue 

762 a.append(aDict[amp]) 

763 b.append(bDict[amp]) 

764 a = np.array(a).mean(axis=0) 

765 b = np.array(b).mean(axis=0) 

766 fig = plt.figure(figsize=(7, 6)) 

767 w = 4*np.ones_like(a) 

768 w[0, 1:] = 2 

769 w[1:, 0] = 2 

770 w[0, 0] = 1 

771 wa = w*a 

772 indices = range(1, a.shape[0]+1) 

773 sums = [wa[0:n, 0:n].sum() for n in indices] 

774 ax = plt.subplot(111) 

775 ax.plot(indices, sums/sums[0], 'o', color='b') 

776 ax.set_yscale('log') 

777 ax.set_xlim(indices[0]-0.5, indices[-1]+0.5) 

778 ax.set_ylim(None, 1.2) 

779 ax.set_ylabel(r'$[\sum_{|i|<n\ &\ |j|<n} a_{ij}] / |a_{00}|$', fontsize='x-large') 

780 ax.set_xlabel('n', fontsize='x-large') 

781 ax.tick_params(axis='both', labelsize='x-large') 

782 plt.tight_layout() 

783 pdfPages.savefig(fig) 

784 

785 return 

786 

787 @staticmethod 

788 def plotRelativeBiasACoeffs(aDict, aDictNoB, fullCovsModel, fullCovsModelNoB, signalElectrons, 

789 gainDict, pdfPages, maxr=None): 

790 """Fig. 15 in Astier+19. 

791 

792 Illustrates systematic bias from estimating 'a' 

793 coefficients from the slope of correlations as opposed to the 

794 full model in Astier+19. 

795 

796 Parameters 

797 ---------- 

798 aDict: `dict` 

799 Dictionary of 'a' matrices (Eq. 20, Astier+19), with amp names as keys. 

800 

801 aDictNoB: `dict` 

802 Dictionary of 'a' matrices ('b'= 0 in Eq. 20, Astier+19), with amp names as keys. 

803 

804 fullCovsModel : `dict`, [`str`, `list`] 

805 Dictionary keyed by amp names containing covariances model per mean flux. 

806 

807 fullCovsModelNoB : `dict`, [`str`, `list`] 

808 Dictionary keyed by amp names containing covariances model (with 'b'=0 in Eq. 20 of 

809 Astier+19) per mean flux. 

810 

811 signalElectrons : `float` 

812 Signal at which to evaluate the a_ij coefficients. 

813 

814 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

815 PDF file where the plots will be saved. 

816 

817 gainDict : `dict`, [`str`, `float`] 

818 Dicgionary keyed by amp names with the gains in e-/ADU. 

819 

820 maxr : `int`, optional 

821 Maximum lag. 

822 """ 

823 

824 fig = plt.figure(figsize=(7, 11)) 

825 title = [f"'a' relative bias at {signalElectrons} e", "'a' relative bias (b=0)"] 

826 data = [(aDict, fullCovsModel), (aDictNoB, fullCovsModelNoB)] 

827 

828 for k, pair in enumerate(data): 

829 diffs = [] 

830 amean = [] 

831 for amp in pair[0]: 

832 covModel = pair[1][amp] 

833 if np.isnan(covModel).all(): 

834 continue 

835 aOld = computeApproximateAcoeffs(covModel, signalElectrons, gainDict[amp]) 

836 a = pair[0][amp] 

837 amean.append(a) 

838 diffs.append((aOld-a)) 

839 amean = np.array(amean).mean(axis=0) 

840 diff = np.array(diffs).mean(axis=0) 

841 diff = diff/amean 

842 diff = diff[:] 

843 # The difference should be close to zero 

844 diff[0, 0] = 0 

845 if maxr is None: 

846 maxr = diff.shape[0] 

847 diff = diff[:maxr, :maxr] 

848 ax0 = fig.add_subplot(2, 1, k+1) 

849 im0 = ax0.imshow(diff.transpose(), origin='lower') 

850 ax0.yaxis.set_major_locator(MaxNLocator(integer=True)) 

851 ax0.xaxis.set_major_locator(MaxNLocator(integer=True)) 

852 ax0.tick_params(axis='both', labelsize='x-large') 

853 plt.colorbar(im0) 

854 ax0.set_title(title[k]) 

855 

856 plt.tight_layout() 

857 pdfPages.savefig(fig) 

858 

859 return 

860 

861 def _plotStandardPtc(self, dataset, ptcFitType, pdfPages): 

862 """Plot PTC, var/signal vs signal, linearity, and linearity residual per amplifier. 

863 

864 Parameters 

865 ---------- 

866 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

867 The dataset containing the means, variances, exposure times, and mask. 

868 

869 ptcFitType : `str` 

870 Type of the model fit to the PTC. Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'. 

871 

872 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

873 PDF file where the plots will be saved. 

874 """ 

875 

876 if ptcFitType == 'EXPAPPROXIMATION': 

877 ptcFunc = funcAstier 

878 stringTitle = (r"Var = $\frac{1}{2g^2a_{00}}(\exp (2a_{00} \mu g) - 1) + \frac{n_{00}}{g^2}$ ") 

879 elif ptcFitType == 'POLYNOMIAL': 

880 ptcFunc = funcPolynomial 

881 for key in dataset.ptcFitPars: 

882 deg = len(dataset.ptcFitPars[key]) - 1 

883 break 

884 stringTitle = r"Polynomial (degree: %g)" % (deg) 

885 else: 

886 raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n" + 

887 "Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.") 

888 

889 legendFontSize = 6.5 

890 labelFontSize = 8 

891 titleFontSize = 9 

892 supTitleFontSize = 18 

893 markerSize = 25 

894 

895 # General determination of the size of the plot grid 

896 nAmps = len(dataset.ampNames) 

897 if nAmps == 2: 

898 nRows, nCols = 2, 1 

899 nRows = np.sqrt(nAmps) 

900 mantissa, _ = np.modf(nRows) 

901 if mantissa > 0: 

902 nRows = int(nRows) + 1 

903 nCols = nRows 

904 else: 

905 nRows = int(nRows) 

906 nCols = nRows 

907 

908 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

909 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

910 f3, ax3 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

911 

912 for i, (amp, a, a2, a3) in enumerate(zip(dataset.ampNames, ax.flatten(), ax2.flatten(), 

913 ax3.flatten())): 

914 meanVecOriginal = np.array(dataset.rawMeans[amp]) 

915 varVecOriginal = np.array(dataset.rawVars[amp]) 

916 mask = np.array(dataset.expIdMask[amp]) 

917 if np.isnan(mask[0]): # All NaNs the whole amp is bad 

918 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

919 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

920 a3.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

921 continue 

922 else: 

923 mask = mask.astype(bool) 

924 meanVecFinal = meanVecOriginal[mask] 

925 varVecFinal = varVecOriginal[mask] 

926 meanVecOutliers = meanVecOriginal[np.invert(mask)] 

927 varVecOutliers = varVecOriginal[np.invert(mask)] 

928 pars, parsErr = np.array(dataset.ptcFitPars[amp]), np.array(dataset.ptcFitParsError[amp]) 

929 ptcRedChi2 = np.array(dataset.ptcFitChiSq[amp]) 

930 if ptcFitType == 'EXPAPPROXIMATION': 

931 if len(meanVecFinal): 

932 ptcA00, ptcA00error = pars[0], parsErr[0] 

933 ptcGain, ptcGainError = pars[1], parsErr[1] 

934 ptcNoise = np.sqrt((pars[2])) # pars[2] is in (e-)^2 

935 ptcNoiseAdu = ptcNoise*(1./ptcGain) 

936 ptcNoiseError = 0.5*(parsErr[2]/np.fabs(pars[2]))*np.sqrt(np.fabs(pars[2])) 

937 stringLegend = (f"a00: {ptcA00:.2e}+/-{ptcA00error:.2e} 1/e" 

938 f"\nGain: {ptcGain:.4}+/-{ptcGainError:.2e} e/ADU" 

939 f"\nNoise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e\n" 

940 r"$\chi^2_{\rm{red}}$: " + f"{ptcRedChi2:.4}" 

941 f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ") 

942 

943 if ptcFitType == 'POLYNOMIAL': 

944 if len(meanVecFinal): 

945 ptcGain, ptcGainError = 1./pars[1], np.fabs(1./pars[1])*(parsErr[1]/pars[1]) 

946 ptcNoiseAdu = np.sqrt((pars[0])) # pars[0] is in DU^2 

947 ptcNoise = ptcNoiseAdu*ptcGain 

948 ptcNoiseError = (0.5*(parsErr[0]/np.fabs(pars[0]))*(np.sqrt(np.fabs(pars[0]))))*ptcGain 

949 stringLegend = (f"Gain: {ptcGain:.4}+/-{ptcGainError:.2e} e/ADU\n" 

950 f"Noise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e\n" 

951 r"$\chi^2_{\rm{red}}$: " + f"{ptcRedChi2:.4}" 

952 f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ") 

953 

954 a.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

955 a.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize) 

956 a.tick_params(labelsize=11) 

957 a.set_xscale('linear', fontsize=labelFontSize) 

958 a.set_yscale('linear', fontsize=labelFontSize) 

959 

960 a2.set_xlabel(r'Mean Signal ($\mu$, ADU)', fontsize=labelFontSize) 

961 a2.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize) 

962 a2.tick_params(labelsize=11) 

963 a2.set_xscale('log') 

964 a2.set_yscale('log') 

965 

966 a3.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

967 a3.set_ylabel(r'Variance/$\mu$ (ADU)', fontsize=labelFontSize) 

968 a3.tick_params(labelsize=11) 

969 a3.set_xscale('log') 

970 a3.set_yscale('linear', fontsize=labelFontSize) 

971 

972 minMeanVecFinal = np.nanmin(meanVecFinal) 

973 maxMeanVecFinal = np.nanmax(meanVecFinal) 

974 meanVecFit = np.linspace(minMeanVecFinal, maxMeanVecFinal, 100*len(meanVecFinal)) 

975 minMeanVecOriginal = np.nanmin(meanVecOriginal) 

976 maxMeanVecOriginal = np.nanmax(meanVecOriginal) 

977 deltaXlim = maxMeanVecOriginal - minMeanVecOriginal 

978 a.plot(meanVecFit, ptcFunc(pars, meanVecFit), color='red') 

979 a.plot(meanVecFinal, ptcNoiseAdu**2 + (1./ptcGain)*meanVecFinal, color='green', 

980 linestyle='--') 

981 a.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize) 

982 a.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize) 

983 a.text(0.03, 0.66, stringLegend, transform=a.transAxes, fontsize=legendFontSize) 

984 a.set_title(amp, fontsize=titleFontSize) 

985 a.set_xlim([minMeanVecOriginal - 0.2*deltaXlim, maxMeanVecOriginal + 0.2*deltaXlim]) 

986 

987 # Same, but in log-scale 

988 a2.plot(meanVecFit, ptcFunc(pars, meanVecFit), color='red') 

989 a2.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize) 

990 a2.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize) 

991 a2.text(0.03, 0.66, stringLegend, transform=a2.transAxes, fontsize=legendFontSize) 

992 a2.set_title(amp, fontsize=titleFontSize) 

993 a2.set_xlim([minMeanVecOriginal, maxMeanVecOriginal]) 

994 

995 # Var/mu vs mu 

996 a3.plot(meanVecFit, ptcFunc(pars, meanVecFit)/meanVecFit, color='red') 

997 a3.scatter(meanVecFinal, varVecFinal/meanVecFinal, c='blue', marker='o', s=markerSize) 

998 a3.scatter(meanVecOutliers, varVecOutliers/meanVecOutliers, c='magenta', marker='s', 

999 s=markerSize) 

1000 a3.text(0.05, 0.1, stringLegend, transform=a3.transAxes, fontsize=legendFontSize) 

1001 a3.set_title(amp, fontsize=titleFontSize) 

1002 a3.set_xlim([minMeanVecOriginal - 0.2*deltaXlim, maxMeanVecOriginal + 0.2*deltaXlim]) 

1003 

1004 f.suptitle("PTC \n Fit: " + stringTitle, fontsize=supTitleFontSize) 

1005 pdfPages.savefig(f) 

1006 f2.suptitle("PTC (log-log)", fontsize=supTitleFontSize) 

1007 pdfPages.savefig(f2) 

1008 f3.suptitle(r"Var/$\mu$", fontsize=supTitleFontSize) 

1009 pdfPages.savefig(f3) 

1010 

1011 return 

1012 

1013 def _plotLinearizer(self, dataset, linearizer, pdfPages): 

1014 """Plot linearity and linearity residual per amplifier 

1015 

1016 Parameters 

1017 ---------- 

1018 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

1019 The dataset containing the means, variances, exposure times, and mask. 

1020 

1021 linearizer : `lsst.ip.isr.Linearizer` 

1022 Linearizer object 

1023 """ 

1024 legendFontSize = 7 

1025 labelFontSize = 7 

1026 titleFontSize = 9 

1027 supTitleFontSize = 18 

1028 

1029 # General determination of the size of the plot grid 

1030 nAmps = len(dataset.ampNames) 

1031 if nAmps == 2: 

1032 nRows, nCols = 2, 1 

1033 nRows = np.sqrt(nAmps) 

1034 mantissa, _ = np.modf(nRows) 

1035 if mantissa > 0: 

1036 nRows = int(nRows) + 1 

1037 nCols = nRows 

1038 else: 

1039 nRows = int(nRows) 

1040 nCols = nRows 

1041 

1042 # Plot mean vs time (f1), and fractional residuals (f2) 

1043 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

1044 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

1045 for i, (amp, a, a2) in enumerate(zip(dataset.ampNames, ax.flatten(), ax2.flatten())): 

1046 mask = dataset.expIdMask[amp] 

1047 if np.isnan(mask[0]): 

1048 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

1049 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

1050 continue 

1051 else: 

1052 mask = mask.astype(bool) 

1053 meanVecFinal = np.array(dataset.rawMeans[amp])[mask] 

1054 timeVecFinal = np.array(dataset.rawExpTimes[amp])[mask] 

1055 

1056 a.set_xlabel('Time (sec)', fontsize=labelFontSize) 

1057 a.set_ylabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

1058 a.tick_params(labelsize=labelFontSize) 

1059 a.set_xscale('linear', fontsize=labelFontSize) 

1060 a.set_yscale('linear', fontsize=labelFontSize) 

1061 

1062 a2.axhline(y=0, color='k') 

1063 a2.axvline(x=0, color='k', linestyle='-') 

1064 a2.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

1065 a2.set_ylabel('Fractional nonlinearity (%)', fontsize=labelFontSize) 

1066 a2.tick_params(labelsize=labelFontSize) 

1067 a2.set_xscale('linear', fontsize=labelFontSize) 

1068 a2.set_yscale('linear', fontsize=labelFontSize) 

1069 

1070 pars, parsErr = linearizer.fitParams[amp], linearizer.fitParamsErr[amp] 

1071 k0, k0Error = pars[0], parsErr[0] 

1072 k1, k1Error = pars[1], parsErr[1] 

1073 k2, k2Error = pars[2], parsErr[2] 

1074 linRedChi2 = linearizer.fitChiSq[amp] 

1075 stringLegend = (f"k0: {k0:.4}+/-{k0Error:.2e} ADU\nk1: {k1:.4}+/-{k1Error:.2e} ADU/t" 

1076 f"\nk2: {k2:.2e}+/-{k2Error:.2e} ADU/t^2\n" 

1077 r"$\chi^2_{\rm{red}}$: " + f"{linRedChi2:.4}") 

1078 a.scatter(timeVecFinal, meanVecFinal) 

1079 a.plot(timeVecFinal, funcPolynomial(pars, timeVecFinal), color='red') 

1080 a.text(0.03, 0.75, stringLegend, transform=a.transAxes, fontsize=legendFontSize) 

1081 a.set_title(f"{amp}", fontsize=titleFontSize) 

1082 

1083 linearPart = k0 + k1*timeVecFinal 

1084 fracLinRes = 100*(linearPart - meanVecFinal)/linearPart 

1085 a2.plot(meanVecFinal, fracLinRes, c='g') 

1086 a2.set_title(f"{amp}", fontsize=titleFontSize) 

1087 

1088 f.suptitle("Linearity \n Fit: Polynomial (degree: %g)" 

1089 % (len(pars)-1), 

1090 fontsize=supTitleFontSize) 

1091 f2.suptitle(r"Fractional NL residual" + "\n" + 

1092 r"$100\times \frac{(k_0 + k_1*Time-\mu)}{k_0+k_1*Time}$", 

1093 fontsize=supTitleFontSize) 

1094 pdfPages.savefig(f) 

1095 pdfPages.savefig(f2) 

1096 

1097 @staticmethod 

1098 def findGroups(x, maxDiff): 

1099 """Group data into bins, with at most maxDiff distance between bins. 

1100 

1101 Parameters 

1102 ---------- 

1103 x: `list` 

1104 Data to bin. 

1105 

1106 maxDiff: `int` 

1107 Maximum distance between bins. 

1108 

1109 Returns 

1110 ------- 

1111 index: `list` 

1112 Bin indices. 

1113 """ 

1114 ix = np.argsort(x) 

1115 xsort = np.sort(x) 

1116 index = np.zeros_like(x, dtype=np.int32) 

1117 xc = xsort[0] 

1118 group = 0 

1119 ng = 1 

1120 

1121 for i in range(1, len(ix)): 

1122 xval = xsort[i] 

1123 if (xval - xc < maxDiff): 

1124 xc = (ng*xc + xval)/(ng+1) 

1125 ng += 1 

1126 index[ix[i]] = group 

1127 else: 

1128 group += 1 

1129 ng = 1 

1130 index[ix[i]] = group 

1131 xc = xval 

1132 

1133 return index 

1134 

1135 @staticmethod 

1136 def indexForBins(x, nBins): 

1137 """Builds an index with regular binning. The result can be fed into binData. 

1138 

1139 Parameters 

1140 ---------- 

1141 x: `numpy.array` 

1142 Data to bin. 

1143 nBins: `int` 

1144 Number of bin. 

1145 

1146 Returns 

1147 ------- 

1148 np.digitize(x, bins): `numpy.array` 

1149 Bin indices. 

1150 """ 

1151 

1152 bins = np.linspace(x.min(), x.max() + abs(x.max() * 1e-7), nBins + 1) 

1153 return np.digitize(x, bins) 

1154 

1155 @staticmethod 

1156 def binData(x, y, binIndex, wy=None): 

1157 """Bin data (usually for display purposes). 

1158 

1159 Patrameters 

1160 ----------- 

1161 x: `numpy.array` 

1162 Data to bin. 

1163 

1164 y: `numpy.array` 

1165 Data to bin. 

1166 

1167 binIdex: `list` 

1168 Bin number of each datum. 

1169 

1170 wy: `numpy.array` 

1171 Inverse rms of each datum to use when averaging (the actual weight is wy**2). 

1172 

1173 Returns: 

1174 ------- 

1175 

1176 xbin: `numpy.array` 

1177 Binned data in x. 

1178 

1179 ybin: `numpy.array` 

1180 Binned data in y. 

1181 

1182 wybin: `numpy.array` 

1183 Binned weights in y, computed from wy's in each bin. 

1184 

1185 sybin: `numpy.array` 

1186 Uncertainty on the bin average, considering actual scatter, and ignoring weights. 

1187 """ 

1188 

1189 if wy is None: 

1190 wy = np.ones_like(x) 

1191 binIndexSet = set(binIndex) 

1192 w2 = wy*wy 

1193 xw2 = x*(w2) 

1194 xbin = np.array([xw2[binIndex == i].sum()/w2[binIndex == i].sum() for i in binIndexSet]) 

1195 

1196 yw2 = y*w2 

1197 ybin = np.array([yw2[binIndex == i].sum()/w2[binIndex == i].sum() for i in binIndexSet]) 

1198 

1199 wybin = np.sqrt(np.array([w2[binIndex == i].sum() for i in binIndexSet])) 

1200 sybin = np.array([y[binIndex == i].std()/np.sqrt(np.array([binIndex == i]).sum()) 

1201 for i in binIndexSet]) 

1202 

1203 return xbin, ybin, wybin, sybin