Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22 

23__all__ = ['PlotPhotonTransferCurveTask'] 

24 

25import numpy as np 

26import matplotlib.pyplot as plt 

27import matplotlib as mpl 

28from matplotlib import gridspec 

29import os 

30from matplotlib.backends.backend_pdf import PdfPages 

31 

32import lsst.ip.isr as isr 

33 

34from lsst.cp.pipe.utils import (funcAstier, funcPolynomial, 

35 calculateWeightedReducedChi2) 

36from matplotlib.ticker import MaxNLocator 

37from lsst.cp.pipe.ptc.astierCovPtcUtils import getFitDataFromCovariances 

38from lsst.ip.isr import PhotonTransferCurveDataset 

39import lsst.log as lsstLog 

40 

41 

42class PlotPhotonTransferCurveTask(): 

43 """A class to plot the dataset from MeasurePhotonTransferCurveTask. 

44 

45 Parameters 

46 ---------- 

47 

48 datasetFileName : `str` 

49 datasetPtc (lsst.ip.isr.PhotonTransferCurveDataset) file 

50 name (fits). 

51 

52 linearizerFileName : `str`, optional 

53 linearizer (isr.linearize.Linearizer) file 

54 name (fits). 

55 

56 outDir : `str`, optional 

57 Path to the output directory where the final PDF will 

58 be placed. 

59 

60 detNum : `int`, optional 

61 Detector number. 

62 

63 signalElectronsRelativeA : `float`, optional 

64 Signal value for relative systematic bias between different 

65 methods of estimating a_ij (Fig. 15 of Astier+19). 

66 

67 plotNormalizedCovariancesNumberOfBins : `float`, optional 

68 Number of bins in `plotNormalizedCovariancesNumber` function 

69 (Fig. 8, 10., of Astier+19). 

70 

71 Notes 

72 ----- 

73 The plotting code in this file is almost identical to the code in 

74 `plotPtcGen2.py`. If further changes are implemented in this file, 

75 `plotPtcGen2.py` needs to be updated accordingly, and vice versa. 

76 The file `plotPtcGen2.py` helps with maintaining backwards 

77 compatibility with gen2 as we transition to gen3; the code 

78 duplication is meant to only last for few month from now 

79 (Jan, 2021). At that point only this file, `plotPtc.py`, will 

80 remain. 

81 """ 

82 

83 def __init__(self, datasetFilename, linearizerFileName=None, 

84 outDir='.', detNum=999, signalElectronsRelativeA=75000, 

85 plotNormalizedCovariancesNumberOfBins=10): 

86 self.datasetFilename = datasetFilename 

87 self.linearizerFileName = linearizerFileName 

88 self.detNum = detNum 

89 self.signalElectronsRelativeA = signalElectronsRelativeA 

90 self.plotNormalizedCovariancesNumberOfBins = plotNormalizedCovariancesNumberOfBins 

91 self.outDir = outDir 

92 

93 def runDataRef(self): 

94 """Run the Photon Transfer Curve (PTC) plotting measurement task. 

95 """ 

96 

97 datasetFile = self.datasetFilename 

98 datasetPtc = PhotonTransferCurveDataset.readFits(datasetFile) 

99 

100 dirname = self.outDir 

101 if not os.path.exists(dirname): 

102 os.makedirs(dirname) 

103 

104 detNum = self.detNum 

105 filename = f"PTC_det{detNum}.pdf" 

106 filenameFull = os.path.join(dirname, filename) 

107 

108 if self.linearizerFileName: 

109 linearizer = isr.linearize.Linearizer.readFits(self.linearizerFileName) 

110 else: 

111 linearizer = None 

112 self.run(filenameFull, datasetPtc, linearizer=linearizer, log=lsstLog) 

113 

114 return 

115 

116 def run(self, filenameFull, datasetPtc, linearizer=None, log=None): 

117 """Make the plots for the PTC task""" 

118 ptcFitType = datasetPtc.ptcFitType 

119 with PdfPages(filenameFull) as pdfPages: 

120 if ptcFitType in ["FULLCOVARIANCE", ]: 

121 self.covAstierMakeAllPlots(datasetPtc, pdfPages, log=log) 

122 elif ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]: 

123 self._plotStandardPtc(datasetPtc, ptcFitType, pdfPages) 

124 else: 

125 raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n" 

126 "Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.") 

127 if linearizer: 

128 self._plotLinearizer(datasetPtc, linearizer, pdfPages) 

129 

130 return 

131 

132 def covAstierMakeAllPlots(self, dataset, pdfPages, 

133 log=None): 

134 """Make plots for MeasurePhotonTransferCurve task when doCovariancesAstier=True. 

135 

136 This function call other functions that mostly reproduce the plots in Astier+19. 

137 Most of the code is ported from Pierre Astier's repository https://github.com/PierreAstier/bfptc 

138 

139 Parameters 

140 ---------- 

141 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

142 The dataset containing the necessary information to produce the plots. 

143 

144 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

145 PDF file where the plots will be saved. 

146 

147 log : `lsst.log.Log`, optional 

148 Logger to handle messages 

149 """ 

150 mu = dataset.finalMeans 

151 # dictionaries with ampNames as keys 

152 fullCovs = dataset.covariances 

153 fullCovsModel = dataset.covariancesModel 

154 fullCovWeights = dataset.covariancesSqrtWeights 

155 aDict = dataset.aMatrix 

156 bDict = dataset.bMatrix 

157 fullCovsNoB = dataset.covariances 

158 fullCovsModelNoB = dataset.covariancesModelNoB 

159 fullCovWeightsNoB = dataset.covariancesSqrtWeights 

160 aDictNoB = dataset.aMatrixNoB 

161 gainDict = dataset.gain 

162 noiseDict = dataset.noise 

163 

164 self.plotCovariances(mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, fullCovsModelNoB, 

165 fullCovWeightsNoB, gainDict, noiseDict, aDict, bDict, pdfPages) 

166 self.plotNormalizedCovariances(0, 0, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, 

167 fullCovsModelNoB, fullCovWeightsNoB, pdfPages, 

168 offset=0.01, topPlot=True, 

169 numberOfBins=self.plotNormalizedCovariancesNumberOfBins, 

170 log=log) 

171 self.plotNormalizedCovariances(0, 1, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, 

172 fullCovsModelNoB, fullCovWeightsNoB, pdfPages, 

173 numberOfBins=self.plotNormalizedCovariancesNumberOfBins, 

174 log=log) 

175 self.plotNormalizedCovariances(1, 0, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, 

176 fullCovsModelNoB, fullCovWeightsNoB, pdfPages, 

177 numberOfBins=self.plotNormalizedCovariancesNumberOfBins, 

178 log=log) 

179 self.plot_a_b(aDict, bDict, pdfPages) 

180 self.ab_vs_dist(aDict, bDict, pdfPages, bRange=4) 

181 self.plotAcoeffsSum(aDict, bDict, pdfPages) 

182 self.plotRelativeBiasACoeffs(aDict, aDictNoB, fullCovsModel, fullCovsModelNoB, 

183 self.signalElectronsRelativeA, gainDict, pdfPages, maxr=4) 

184 

185 return 

186 

187 @staticmethod 

188 def plotCovariances(mu, covs, covsModel, covsWeights, covsNoB, covsModelNoB, covsWeightsNoB, 

189 gainDict, noiseDict, aDict, bDict, pdfPages): 

190 """Plot covariances and models: Cov00, Cov10, Cov01. 

191 

192 Figs. 6 and 7 of Astier+19 

193 

194 Parameters 

195 ---------- 

196 mu : `dict`, [`str`, `list`] 

197 Dictionary keyed by amp name with mean signal values. 

198 

199 covs : `dict`, [`str`, `list`] 

200 Dictionary keyed by amp names containing a list of measued covariances per mean flux. 

201 

202 covsModel : `dict`, [`str`, `list`] 

203 Dictionary keyed by amp names containinging covariances model (Eq. 20 of Astier+19) per mean flux. 

204 

205 covsWeights : `dict`, [`str`, `list`] 

206 Dictionary keyed by amp names containinging sqrt. of covariances weights. 

207 

208 covsNoB : `dict`, [`str`, `list`] 

209 Dictionary keyed by amp names containing a list of measued covariances per mean flux ('b'=0 in 

210 Astier+19). 

211 

212 covsModelNoB : `dict`, [`str`, `list`] 

213 Dictionary keyed by amp names containing covariances model (with 'b'=0 in Eq. 20 of Astier+19) 

214 per mean flux. 

215 

216 covsWeightsNoB : `dict`, [`str`, `list`] 

217 Dictionary keyed by amp names containing sqrt. of covariances weights ('b' = 0 in Eq. 20 of 

218 Astier+19). 

219 

220 gainDict : `dict`, [`str`, `float`] 

221 Dictionary keyed by amp names containing the gains in e-/ADU. 

222 

223 noiseDict : `dict`, [`str`, `float`] 

224 Dictionary keyed by amp names containing the rms redout noise in e-. 

225 

226 aDict : `dict`, [`str`, `numpy.array`] 

227 Dictionary keyed by amp names containing 'a' coefficients (Eq. 20 of Astier+19). 

228 

229 bDict : `dict`, [`str`, `numpy.array`] 

230 Dictionary keyed by amp names containing 'b' coefficients (Eq. 20 of Astier+19). 

231 

232 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

233 PDF file where the plots will be saved. 

234 """ 

235 

236 legendFontSize = 6.5 

237 labelFontSize = 7 

238 titleFontSize = 9 

239 supTitleFontSize = 18 

240 markerSize = 25 

241 

242 nAmps = len(covs) 

243 if nAmps == 2: 

244 nRows, nCols = 2, 1 

245 nRows = np.sqrt(nAmps) 

246 mantissa, _ = np.modf(nRows) 

247 if mantissa > 0: 

248 nRows = int(nRows) + 1 

249 nCols = nRows 

250 else: 

251 nRows = int(nRows) 

252 nCols = nRows 

253 

254 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

255 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

256 fResCov00, axResCov00 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', 

257 figsize=(13, 10)) 

258 fCov01, axCov01 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

259 fCov10, axCov10 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

260 

261 assert(len(covsModel) == nAmps) 

262 assert(len(covsWeights) == nAmps) 

263 

264 assert(len(covsNoB) == nAmps) 

265 assert(len(covsModelNoB) == nAmps) 

266 assert(len(covsWeightsNoB) == nAmps) 

267 

268 for i, (amp, a, a2, aResVar, a3, a4) in enumerate(zip(covs, ax.flatten(), 

269 ax2.flatten(), axResCov00.flatten(), 

270 axCov01.flatten(), axCov10.flatten())): 

271 

272 muAmp, cov, model, weight = mu[amp], covs[amp], covsModel[amp], covsWeights[amp] 

273 if not np.isnan(np.array(cov)).all(): # If all the entries are np.nan, this is a bad amp. 

274 aCoeffs, bCoeffs = np.array(aDict[amp]), np.array(bDict[amp]) 

275 gain, noise = gainDict[amp], noiseDict[amp] 

276 (meanVecFinal, varVecFinal, varVecModelFinal, 

277 varWeightsFinal, _) = getFitDataFromCovariances(0, 0, muAmp, cov, model, weight, 

278 returnMasked=True) 

279 

280 # Get weighted reduced chi2 

281 chi2FullModelVar = calculateWeightedReducedChi2(varVecFinal, varVecModelFinal, 

282 varWeightsFinal, len(meanVecFinal), 4) 

283 

284 (meanVecFinalCov01, varVecFinalCov01, varVecModelFinalCov01, 

285 _, _) = getFitDataFromCovariances(0, 0, muAmp, cov, model, weight, returnMasked=True) 

286 

287 (meanVecFinalCov10, varVecFinalCov10, varVecModelFinalCov10, 

288 _, _) = getFitDataFromCovariances(1, 0, muAmp, cov, model, weight, returnMasked=True) 

289 

290 # cuadratic fit for residuals below 

291 par2 = np.polyfit(meanVecFinal, varVecFinal, 2, w=varWeightsFinal) 

292 varModelFinalQuadratic = np.polyval(par2, meanVecFinal) 

293 chi2QuadModelVar = calculateWeightedReducedChi2(varVecFinal, varModelFinalQuadratic, 

294 varWeightsFinal, len(meanVecFinal), 3) 

295 

296 # fit with no 'b' coefficient (c = a*b in Eq. 20 of Astier+19) 

297 covNoB, modelNoB, weightNoB = covsNoB[amp], covsModelNoB[amp], covsWeightsNoB[amp] 

298 (meanVecFinalNoB, varVecFinalNoB, varVecModelFinalNoB, 

299 varWeightsFinalNoB, _) = getFitDataFromCovariances(0, 0, muAmp, covNoB, modelNoB, 

300 weightNoB, returnMasked=True) 

301 

302 chi2FullModelNoBVar = calculateWeightedReducedChi2(varVecFinalNoB, varVecModelFinalNoB, 

303 varWeightsFinalNoB, len(meanVecFinalNoB), 

304 3) 

305 stringLegend = (f"Gain: {gain:.4} e/ADU \n" 

306 f"Noise: {noise:.4} e \n" 

307 + r"$a_{00}$: %.3e 1/e"%aCoeffs[0, 0] + "\n" 

308 + r"$b_{00}$: %.3e 1/e"%bCoeffs[0, 0] 

309 + f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ") 

310 minMeanVecFinal = np.nanmin(meanVecFinal) 

311 maxMeanVecFinal = np.nanmax(meanVecFinal) 

312 deltaXlim = maxMeanVecFinal - minMeanVecFinal 

313 

314 a.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

315 a.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize) 

316 a.tick_params(labelsize=11) 

317 a.set_xscale('linear') 

318 a.set_yscale('linear') 

319 a.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize) 

320 a.plot(meanVecFinal, varVecModelFinal, color='red', lineStyle='-') 

321 a.text(0.03, 0.7, stringLegend, transform=a.transAxes, fontsize=legendFontSize) 

322 a.set_title(amp, fontsize=titleFontSize) 

323 a.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim]) 

324 

325 # Same as above, but in log-scale 

326 a2.set_xlabel(r'Mean Signal ($\mu$, ADU)', fontsize=labelFontSize) 

327 a2.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize) 

328 a2.tick_params(labelsize=11) 

329 a2.set_xscale('log') 

330 a2.set_yscale('log') 

331 a2.plot(meanVecFinal, varVecModelFinal, color='red', lineStyle='-') 

332 a2.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize) 

333 a2.text(0.03, 0.7, stringLegend, transform=a2.transAxes, fontsize=legendFontSize) 

334 a2.set_title(amp, fontsize=titleFontSize) 

335 a2.set_xlim([minMeanVecFinal, maxMeanVecFinal]) 

336 

337 # Residuals var - model 

338 aResVar.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

339 aResVar.set_ylabel(r'Residuals (ADU$^2$)', fontsize=labelFontSize) 

340 aResVar.tick_params(labelsize=11) 

341 aResVar.set_xscale('linear') 

342 aResVar.set_yscale('linear') 

343 aResVar.plot(meanVecFinal, varVecFinal - varVecModelFinal, color='blue', lineStyle='-', 

344 label=r'Full fit ($\chi_{\rm{red}}^2$: %g)'%chi2FullModelVar) 

345 aResVar.plot(meanVecFinal, varVecFinal - varModelFinalQuadratic, color='red', lineStyle='-', 

346 label=r'Quadratic fit ($\chi_{\rm{red}}^2$: %g)'%chi2QuadModelVar) 

347 aResVar.plot(meanVecFinalNoB, varVecFinalNoB - varVecModelFinalNoB, color='green', 

348 lineStyle='-', 

349 label=r'Full fit (b=0) ($\chi_{\rm{red}}^2$: %g)'%chi2FullModelNoBVar) 

350 aResVar.axhline(color='black') 

351 aResVar.set_title(amp, fontsize=titleFontSize) 

352 aResVar.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim]) 

353 aResVar.legend(fontsize=7) 

354 

355 a3.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

356 a3.set_ylabel(r'Cov01 (ADU$^2$)', fontsize=labelFontSize) 

357 a3.tick_params(labelsize=11) 

358 a3.set_xscale('linear') 

359 a3.set_yscale('linear') 

360 a3.scatter(meanVecFinalCov01, varVecFinalCov01, c='blue', marker='o', s=markerSize) 

361 a3.plot(meanVecFinalCov01, varVecModelFinalCov01, color='red', lineStyle='-') 

362 a3.set_title(amp, fontsize=titleFontSize) 

363 a3.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim]) 

364 

365 a4.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

366 a4.set_ylabel(r'Cov10 (ADU$^2$)', fontsize=labelFontSize) 

367 a4.tick_params(labelsize=11) 

368 a4.set_xscale('linear') 

369 a4.set_yscale('linear') 

370 a4.scatter(meanVecFinalCov10, varVecFinalCov10, c='blue', marker='o', s=markerSize) 

371 a4.plot(meanVecFinalCov10, varVecModelFinalCov10, color='red', lineStyle='-') 

372 a4.set_title(amp, fontsize=titleFontSize) 

373 a4.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim]) 

374 

375 else: 

376 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

377 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

378 a3.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

379 a4.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

380 

381 f.suptitle("PTC from covariances as in Astier+19 \n Fit: Eq. 20, Astier+19", 

382 fontsize=supTitleFontSize) 

383 pdfPages.savefig(f) 

384 f2.suptitle("PTC from covariances as in Astier+19 (log-log) \n Fit: Eq. 20, Astier+19", 

385 fontsize=supTitleFontSize) 

386 pdfPages.savefig(f2) 

387 fResCov00.suptitle("Residuals (data-model) for Cov00 (Var)", fontsize=supTitleFontSize) 

388 pdfPages.savefig(fResCov00) 

389 fCov01.suptitle("Cov01 as in Astier+19 (nearest parallel neighbor covariance) \n" 

390 " Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize) 

391 pdfPages.savefig(fCov01) 

392 fCov10.suptitle("Cov10 as in Astier+19 (nearest serial neighbor covariance) \n" 

393 "Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize) 

394 pdfPages.savefig(fCov10) 

395 

396 return 

397 

398 def plotNormalizedCovariances(self, i, j, inputMu, covs, covsModel, covsWeights, covsNoB, covsModelNoB, 

399 covsWeightsNoB, pdfPages, offset=0.004, 

400 numberOfBins=10, plotData=True, topPlot=False, log=None): 

401 """Plot C_ij/mu vs mu. 

402 

403 Figs. 8, 10, and 11 of Astier+19 

404 

405 Parameters 

406 ---------- 

407 i : `int` 

408 Covariane lag 

409 

410 j : `int` 

411 Covariance lag 

412 

413 inputMu : `dict`, [`str`, `list`] 

414 Dictionary keyed by amp name with mean signal values. 

415 

416 covs : `dict`, [`str`, `list`] 

417 Dictionary keyed by amp names containing a list of measued covariances per mean flux. 

418 

419 covsModel : `dict`, [`str`, `list`] 

420 Dictionary keyed by amp names containinging covariances model (Eq. 20 of Astier+19) per mean flux. 

421 

422 covsWeights : `dict`, [`str`, `list`] 

423 Dictionary keyed by amp names containinging sqrt. of covariances weights. 

424 

425 covsNoB : `dict`, [`str`, `list`] 

426 Dictionary keyed by amp names containing a list of measued covariances per mean flux ('b'=0 in 

427 Astier+19). 

428 

429 covsModelNoB : `dict`, [`str`, `list`] 

430 Dictionary keyed by amp names containing covariances model (with 'b'=0 in Eq. 20 of Astier+19) 

431 per mean flux. 

432 

433 covsWeightsNoB : `dict`, [`str`, `list`] 

434 Dictionary keyed by amp names containing sqrt. of covariances weights ('b' = 0 in Eq. 20 of 

435 Astier+19). 

436 

437 expIdMask : `dict`, [`str`, `list`] 

438 Dictionary keyed by amp names containing the masked exposure pairs. 

439 

440 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

441 PDF file where the plots will be saved. 

442 

443 offset : `float`, optional 

444 Constant offset factor to plot covariances in same panel (so they don't overlap). 

445 

446 numberOfBins : `int`, optional 

447 Number of bins for top and bottom plot. 

448 

449 plotData : `bool`, optional 

450 Plot the data points? 

451 

452 topPlot : `bool`, optional 

453 Plot the top plot with the covariances, and the bottom plot with the model residuals? 

454 

455 log : `lsst.log.Log`, optional 

456 Logger to handle messages. 

457 """ 

458 if not topPlot: 

459 fig = plt.figure(figsize=(8, 10)) 

460 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1]) 

461 gs.update(hspace=0) 

462 ax0 = plt.subplot(gs[0]) 

463 plt.setp(ax0.get_xticklabels(), visible=False) 

464 else: 

465 fig = plt.figure(figsize=(8, 8)) 

466 ax0 = plt.subplot(111) 

467 ax0.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) 

468 ax0.tick_params(axis='both', labelsize='x-large') 

469 mue, rese, wce = [], [], [] 

470 mueNoB, reseNoB, wceNoB = [], [], [] 

471 for counter, amp in enumerate(covs): 

472 muAmp, fullCov, fullCovModel, fullCovWeight = (inputMu[amp], covs[amp], covsModel[amp], 

473 covsWeights[amp]) 

474 if len(fullCov) == 0: 

475 continue 

476 mu, cov, model, weightCov, _ = getFitDataFromCovariances(i, j, muAmp, fullCov, fullCovModel, 

477 fullCovWeight, divideByMu=True, 

478 returnMasked=True) 

479 

480 mue += list(mu) 

481 rese += list(cov - model) 

482 wce += list(weightCov) 

483 

484 fullCovNoB, fullCovModelNoB, fullCovWeightNoB = (covsNoB[amp], covsModelNoB[amp], 

485 covsWeightsNoB[amp]) 

486 if len(fullCovNoB) == 0: 

487 continue 

488 (muNoB, covNoB, modelNoB, 

489 weightCovNoB, _) = getFitDataFromCovariances(i, j, muAmp, fullCovNoB, fullCovModelNoB, 

490 fullCovWeightNoB, divideByMu=True, 

491 returnMasked=True) 

492 

493 mueNoB += list(muNoB) 

494 reseNoB += list(covNoB - modelNoB) 

495 wceNoB += list(weightCovNoB) 

496 

497 # the corresponding fit 

498 fit_curve, = plt.plot(mu, model + counter*offset, '-', linewidth=4.0) 

499 # bin plot. len(mu) = no binning 

500 gind = self.indexForBins(mu, numberOfBins) 

501 

502 xb, yb, wyb, sigyb = self.binData(mu, cov, gind, weightCov) 

503 plt.errorbar(xb, yb+counter*offset, yerr=sigyb, marker='o', linestyle='none', markersize=6.5, 

504 color=fit_curve.get_color(), label=f"{amp} (N: {len(mu)})") 

505 # plot the data 

506 if plotData: 

507 points, = plt.plot(mu, cov + counter*offset, '.', color=fit_curve.get_color()) 

508 plt.legend(loc='upper right', fontsize=8) 

509 # end loop on amps 

510 mue = np.array(mue) 

511 rese = np.array(rese) 

512 wce = np.array(wce) 

513 mueNoB = np.array(mueNoB) 

514 reseNoB = np.array(reseNoB) 

515 wceNoB = np.array(wceNoB) 

516 

517 plt.xlabel(r"$\mu (el)$", fontsize='x-large') 

518 plt.ylabel(r"$Cov{%d%d}/\mu + Cst (el)$"%(i, j), fontsize='x-large') 

519 if (not topPlot): 

520 gind = self.indexForBins(mue, numberOfBins) 

521 xb, yb, wyb, sigyb = self.binData(mue, rese, gind, wce) 

522 

523 ax1 = plt.subplot(gs[1], sharex=ax0) 

524 ax1.errorbar(xb, yb, yerr=sigyb, marker='o', linestyle='none', label='Full fit') 

525 gindNoB = self.indexForBins(mueNoB, numberOfBins) 

526 xb2, yb2, wyb2, sigyb2 = self.binData(mueNoB, reseNoB, gindNoB, wceNoB) 

527 

528 ax1.errorbar(xb2, yb2, yerr=sigyb2, marker='o', linestyle='none', label='b = 0') 

529 ax1.tick_params(axis='both', labelsize='x-large') 

530 plt.legend(loc='upper left', fontsize='large') 

531 # horizontal line at zero 

532 plt.plot(xb, [0]*len(xb), '--', color='k') 

533 plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) 

534 plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) 

535 plt.xlabel(r'$\mu (el)$', fontsize='x-large') 

536 plt.ylabel(r'$Cov{%d%d}/\mu$ -model (el)'%(i, j), fontsize='x-large') 

537 plt.tight_layout() 

538 plt.suptitle(f"Nbins: {numberOfBins}") 

539 # overlapping y labels: 

540 fig.canvas.draw() 

541 labels0 = [item.get_text() for item in ax0.get_yticklabels()] 

542 labels0[0] = u'' 

543 ax0.set_yticklabels(labels0) 

544 pdfPages.savefig(fig) 

545 

546 return 

547 

548 @staticmethod 

549 def plot_a_b(aDict, bDict, pdfPages, bRange=3): 

550 """Fig. 12 of Astier+19 

551 

552 Color display of a and b arrays fits, averaged over channels. 

553 

554 Parameters 

555 ---------- 

556 aDict : `dict`, [`numpy.array`] 

557 Dictionary keyed by amp names containing the fitted 'a' coefficients from the model 

558 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

559 

560 bDict : `dict`, [`numpy.array`] 

561 Dictionary keyed by amp names containing the fitted 'b' coefficients from the model 

562 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

563 

564 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

565 PDF file where the plots will be saved. 

566 

567 bRange : `int` 

568 Maximum lag for b arrays. 

569 """ 

570 a, b = [], [] 

571 for amp in aDict: 

572 if np.isnan(aDict[amp]).all(): 

573 continue 

574 a.append(aDict[amp]) 

575 b.append(bDict[amp]) 

576 a = np.array(a).mean(axis=0) 

577 b = np.array(b).mean(axis=0) 

578 fig = plt.figure(figsize=(7, 11)) 

579 ax0 = fig.add_subplot(2, 1, 1) 

580 im0 = ax0.imshow(np.abs(a.transpose()), origin='lower', norm=mpl.colors.LogNorm()) 

581 ax0.tick_params(axis='both', labelsize='x-large') 

582 ax0.set_title(r'$|a|$', fontsize='x-large') 

583 ax0.xaxis.set_ticks_position('bottom') 

584 cb0 = plt.colorbar(im0) 

585 cb0.ax.tick_params(labelsize='x-large') 

586 

587 ax1 = fig.add_subplot(2, 1, 2) 

588 ax1.tick_params(axis='both', labelsize='x-large') 

589 ax1.yaxis.set_major_locator(MaxNLocator(integer=True)) 

590 ax1.xaxis.set_major_locator(MaxNLocator(integer=True)) 

591 im1 = ax1.imshow(1e6*b[:bRange, :bRange].transpose(), origin='lower') 

592 cb1 = plt.colorbar(im1) 

593 cb1.ax.tick_params(labelsize='x-large') 

594 ax1.set_title(r'$b \times 10^6$', fontsize='x-large') 

595 ax1.xaxis.set_ticks_position('bottom') 

596 plt.tight_layout() 

597 pdfPages.savefig(fig) 

598 

599 return 

600 

601 @staticmethod 

602 def ab_vs_dist(aDict, bDict, pdfPages, bRange=4): 

603 """Fig. 13 of Astier+19. 

604 

605 Values of a and b arrays fits, averaged over amplifiers, as a function of distance. 

606 

607 Parameters 

608 ---------- 

609 aDict : `dict`, [`numpy.array`] 

610 Dictionary keyed by amp names containing the fitted 'a' coefficients from the model 

611 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

612 

613 bDict : `dict`, [`numpy.array`] 

614 Dictionary keyed by amp names containing the fitted 'b' coefficients from the model 

615 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

616 

617 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

618 PDF file where the plots will be saved. 

619 

620 bRange : `int` 

621 Maximum lag for b arrays. 

622 """ 

623 assert (len(aDict) == len(bDict)) 

624 a = [] 

625 for amp in aDict: 

626 if np.isnan(aDict[amp]).all(): 

627 continue 

628 a.append(aDict[amp]) 

629 a = np.array(a) 

630 y = a.mean(axis=0) 

631 sy = a.std(axis=0)/np.sqrt(len(aDict)) 

632 i, j = np.indices(y.shape) 

633 upper = (i >= j).ravel() 

634 r = np.sqrt(i**2 + j**2).ravel() 

635 y = y.ravel() 

636 sy = sy.ravel() 

637 fig = plt.figure(figsize=(6, 9)) 

638 ax = fig.add_subplot(211) 

639 ax.set_xlim([0.5, r.max()+1]) 

640 ax.errorbar(r[upper], y[upper], yerr=sy[upper], marker='o', linestyle='none', color='b', 

641 label='$i>=j$') 

642 ax.errorbar(r[~upper], y[~upper], yerr=sy[~upper], marker='o', linestyle='none', color='r', 

643 label='$i<j$') 

644 ax.legend(loc='upper center', fontsize='x-large') 

645 ax.set_xlabel(r'$\sqrt{i^2+j^2}$', fontsize='x-large') 

646 ax.set_ylabel(r'$a_{ij}$', fontsize='x-large') 

647 ax.set_yscale('log') 

648 ax.tick_params(axis='both', labelsize='x-large') 

649 

650 # 

651 axb = fig.add_subplot(212) 

652 b = [] 

653 for amp in bDict: 

654 if np.isnan(bDict[amp]).all(): 

655 continue 

656 b.append(bDict[amp]) 

657 b = np.array(b) 

658 yb = b.mean(axis=0) 

659 syb = b.std(axis=0)/np.sqrt(len(bDict)) 

660 ib, jb = np.indices(yb.shape) 

661 upper = (ib > jb).ravel() 

662 rb = np.sqrt(i**2 + j**2).ravel() 

663 yb = yb.ravel() 

664 syb = syb.ravel() 

665 xmin = -0.2 

666 xmax = bRange 

667 axb.set_xlim([xmin, xmax+0.2]) 

668 cutu = (r > xmin) & (r < xmax) & (upper) 

669 cutl = (r > xmin) & (r < xmax) & (~upper) 

670 axb.errorbar(rb[cutu], yb[cutu], yerr=syb[cutu], marker='o', linestyle='none', color='b', 

671 label='$i>=j$') 

672 axb.errorbar(rb[cutl], yb[cutl], yerr=syb[cutl], marker='o', linestyle='none', color='r', 

673 label='$i<j$') 

674 plt.legend(loc='upper center', fontsize='x-large') 

675 axb.set_xlabel(r'$\sqrt{i^2+j^2}$', fontsize='x-large') 

676 axb.set_ylabel(r'$b_{ij}$', fontsize='x-large') 

677 axb.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) 

678 axb.tick_params(axis='both', labelsize='x-large') 

679 plt.tight_layout() 

680 pdfPages.savefig(fig) 

681 

682 return 

683 

684 @staticmethod 

685 def plotAcoeffsSum(aDict, bDict, pdfPages): 

686 """Fig. 14. of Astier+19 

687 

688 Cumulative sum of a_ij as a function of maximum separation. This plot displays the average over 

689 channels. 

690 

691 Parameters 

692 ---------- 

693 aDict : `dict`, [`numpy.array`] 

694 Dictionary keyed by amp names containing the fitted 'a' coefficients from the model 

695 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

696 

697 bDict : `dict`, [`numpy.array`] 

698 Dictionary keyed by amp names containing the fitted 'b' coefficients from the model 

699 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

700 

701 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

702 PDF file where the plots will be saved. 

703 """ 

704 assert (len(aDict) == len(bDict)) 

705 a, b = [], [] 

706 for amp in aDict: 

707 if np.isnan(aDict[amp]).all() or np.isnan(bDict[amp]).all(): 

708 continue 

709 a.append(aDict[amp]) 

710 b.append(bDict[amp]) 

711 a = np.array(a).mean(axis=0) 

712 b = np.array(b).mean(axis=0) 

713 fig = plt.figure(figsize=(7, 6)) 

714 w = 4*np.ones_like(a) 

715 w[0, 1:] = 2 

716 w[1:, 0] = 2 

717 w[0, 0] = 1 

718 wa = w*a 

719 indices = range(1, a.shape[0]+1) 

720 sums = [wa[0:n, 0:n].sum() for n in indices] 

721 ax = plt.subplot(111) 

722 ax.plot(indices, sums/sums[0], 'o', color='b') 

723 ax.set_yscale('log') 

724 ax.set_xlim(indices[0]-0.5, indices[-1]+0.5) 

725 ax.set_ylim(None, 1.2) 

726 ax.set_ylabel(r'$[\sum_{|i|<n\ &\ |j|<n} a_{ij}] / |a_{00}|$', fontsize='x-large') 

727 ax.set_xlabel('n', fontsize='x-large') 

728 ax.tick_params(axis='both', labelsize='x-large') 

729 plt.tight_layout() 

730 pdfPages.savefig(fig) 

731 

732 return 

733 

734 @staticmethod 

735 def plotRelativeBiasACoeffs(aDict, aDictNoB, fullCovsModel, fullCovsModelNoB, signalElectrons, 

736 gainDict, pdfPages, maxr=None): 

737 """Fig. 15 in Astier+19. 

738 

739 Illustrates systematic bias from estimating 'a' 

740 coefficients from the slope of correlations as opposed to the 

741 full model in Astier+19. 

742 

743 Parameters 

744 ---------- 

745 aDict: `dict` 

746 Dictionary of 'a' matrices (Eq. 20, Astier+19), with amp names as keys. 

747 

748 aDictNoB: `dict` 

749 Dictionary of 'a' matrices ('b'= 0 in Eq. 20, Astier+19), with amp names as keys. 

750 

751 fullCovsModel : `dict`, [`str`, `list`] 

752 Dictionary keyed by amp names containing covariances model per mean flux. 

753 

754 fullCovsModelNoB : `dict`, [`str`, `list`] 

755 Dictionary keyed by amp names containing covariances model (with 'b'=0 in Eq. 20 of 

756 Astier+19) per mean flux. 

757 

758 signalElectrons : `float` 

759 Signal at which to evaluate the a_ij coefficients. 

760 

761 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

762 PDF file where the plots will be saved. 

763 

764 gainDict : `dict`, [`str`, `float`] 

765 Dicgionary keyed by amp names with the gains in e-/ADU. 

766 

767 maxr : `int`, optional 

768 Maximum lag. 

769 """ 

770 

771 fig = plt.figure(figsize=(7, 11)) 

772 title = [f"'a' relative bias at {signalElectrons} e", "'a' relative bias (b=0)"] 

773 data = [(aDict, fullCovsModel), (aDictNoB, fullCovsModelNoB)] 

774 

775 for k, pair in enumerate(data): 

776 diffs = [] 

777 amean = [] 

778 for amp in pair[0]: 

779 covModel = np.array(pair[1][amp]) 

780 if np.isnan(covModel).all(): 

781 continue 

782 # Compute the "a" coefficients of the Antilogus+14 (1402.0725) model as in 

783 # Guyonnet+15 (1501.01577, eq. 16, the slope of cov/var at a given flux mu in 

784 # electrons). Eq. 16 of 1501.01577 is an approximation to the more complete 

785 # model in Astier+19 (1905.08677). 

786 var = covModel[0, 0, 0] # ADU^2 

787 # For a result in electrons^-1, we have to use mu in electrons 

788 aOld = covModel[0, :, :]/(var*signalElectrons) 

789 a = pair[0][amp] 

790 amean.append(a) 

791 diffs.append((aOld-a)) 

792 amean = np.array(amean).mean(axis=0) 

793 diff = np.array(diffs).mean(axis=0) 

794 diff = diff/amean 

795 diff = diff[:] 

796 # The difference should be close to zero 

797 diff[0, 0] = 0 

798 if maxr is None: 

799 maxr = diff.shape[0] 

800 diff = diff[:maxr, :maxr] 

801 ax0 = fig.add_subplot(2, 1, k+1) 

802 im0 = ax0.imshow(diff.transpose(), origin='lower') 

803 ax0.yaxis.set_major_locator(MaxNLocator(integer=True)) 

804 ax0.xaxis.set_major_locator(MaxNLocator(integer=True)) 

805 ax0.tick_params(axis='both', labelsize='x-large') 

806 plt.colorbar(im0) 

807 ax0.set_title(title[k]) 

808 

809 plt.tight_layout() 

810 pdfPages.savefig(fig) 

811 

812 return 

813 

814 def _plotStandardPtc(self, dataset, ptcFitType, pdfPages): 

815 """Plot PTC, var/signal vs signal, linearity, and linearity residual per amplifier. 

816 

817 Parameters 

818 ---------- 

819 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

820 The dataset containing the means, variances, exposure times, and mask. 

821 

822 ptcFitType : `str` 

823 Type of the model fit to the PTC. Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'. 

824 

825 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

826 PDF file where the plots will be saved. 

827 """ 

828 

829 if ptcFitType == 'EXPAPPROXIMATION': 

830 ptcFunc = funcAstier 

831 stringTitle = (r"Var = $\frac{1}{2g^2a_{00}}(\exp (2a_{00} \mu g) - 1) + \frac{n_{00}}{g^2}$ ") 

832 elif ptcFitType == 'POLYNOMIAL': 

833 ptcFunc = funcPolynomial 

834 for key in dataset.ptcFitPars: 

835 deg = len(dataset.ptcFitPars[key]) - 1 

836 break 

837 stringTitle = r"Polynomial (degree: %g)" % (deg) 

838 else: 

839 raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n" 

840 "Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.") 

841 

842 legendFontSize = 6.5 

843 labelFontSize = 8 

844 titleFontSize = 9 

845 supTitleFontSize = 18 

846 markerSize = 25 

847 

848 # General determination of the size of the plot grid 

849 nAmps = len(dataset.ampNames) 

850 if nAmps == 2: 

851 nRows, nCols = 2, 1 

852 nRows = np.sqrt(nAmps) 

853 mantissa, _ = np.modf(nRows) 

854 if mantissa > 0: 

855 nRows = int(nRows) + 1 

856 nCols = nRows 

857 else: 

858 nRows = int(nRows) 

859 nCols = nRows 

860 

861 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

862 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

863 f3, ax3 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

864 

865 for i, (amp, a, a2, a3) in enumerate(zip(dataset.ampNames, ax.flatten(), ax2.flatten(), 

866 ax3.flatten())): 

867 meanVecOriginal = np.ravel(np.array(dataset.rawMeans[amp])) 

868 varVecOriginal = np.ravel(np.array(dataset.rawVars[amp])) 

869 mask = np.ravel(np.array(dataset.expIdMask[amp])) 

870 if np.isnan(mask[0]): # All NaNs the whole amp is bad 

871 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

872 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

873 a3.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

874 continue 

875 else: 

876 mask = mask.astype(bool) 

877 meanVecFinal = meanVecOriginal[mask] 

878 varVecFinal = varVecOriginal[mask] 

879 meanVecOutliers = meanVecOriginal[np.invert(mask)] 

880 varVecOutliers = varVecOriginal[np.invert(mask)] 

881 pars, parsErr = np.array(dataset.ptcFitPars[amp]), np.array(dataset.ptcFitParsError[amp]) 

882 ptcRedChi2 = dataset.ptcFitChiSq[amp] 

883 if ptcFitType == 'EXPAPPROXIMATION': 

884 if len(meanVecFinal): 

885 ptcA00, ptcA00error = pars[0], parsErr[0] 

886 ptcGain, ptcGainError = pars[1], parsErr[1] 

887 ptcNoise = np.sqrt((pars[2])) # pars[2] is in (e-)^2 

888 ptcNoiseAdu = ptcNoise*(1./ptcGain) 

889 ptcNoiseError = 0.5*(parsErr[2]/np.fabs(pars[2]))*np.sqrt(np.fabs(pars[2])) 

890 stringLegend = (f"a00: {ptcA00:.2e}+/-{ptcA00error:.2e} 1/e" 

891 f"\nGain: {ptcGain:.4}+/-{ptcGainError:.2e} e/ADU" 

892 f"\nNoise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e\n" 

893 r"$\chi^2_{\rm{red}}$: " + f"{ptcRedChi2:.4}" 

894 f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ") 

895 

896 if ptcFitType == 'POLYNOMIAL': 

897 if len(meanVecFinal): 

898 ptcGain, ptcGainError = 1./pars[1], np.fabs(1./pars[1])*(parsErr[1]/pars[1]) 

899 ptcNoiseAdu = np.sqrt((pars[0])) # pars[0] is in ADU^2 

900 ptcNoise = ptcNoiseAdu*ptcGain 

901 ptcNoiseError = (0.5*(parsErr[0]/np.fabs(pars[0]))*(np.sqrt(np.fabs(pars[0]))))*ptcGain 

902 stringLegend = (f"Gain: {ptcGain:.4}+/-{ptcGainError:.2e} e/ADU\n" 

903 f"Noise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e\n" 

904 r"$\chi^2_{\rm{red}}$: " + f"{ptcRedChi2:.4}" 

905 f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ") 

906 

907 a.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

908 a.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize) 

909 a.tick_params(labelsize=11) 

910 a.set_xscale('linear') 

911 a.set_yscale('linear') 

912 

913 a2.set_xlabel(r'Mean Signal ($\mu$, ADU)', fontsize=labelFontSize) 

914 a2.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize) 

915 a2.tick_params(labelsize=11) 

916 a2.set_xscale('log') 

917 a2.set_yscale('log') 

918 

919 a3.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

920 a3.set_ylabel(r'Variance/$\mu$ (ADU)', fontsize=labelFontSize) 

921 a3.tick_params(labelsize=11) 

922 a3.set_xscale('log') 

923 a3.set_yscale('linear') 

924 

925 minMeanVecFinal = np.nanmin(meanVecFinal) 

926 maxMeanVecFinal = np.nanmax(meanVecFinal) 

927 meanVecFit = np.linspace(minMeanVecFinal, maxMeanVecFinal, 100*len(meanVecFinal)) 

928 minMeanVecOriginal = np.nanmin(meanVecOriginal) 

929 maxMeanVecOriginal = np.nanmax(meanVecOriginal) 

930 deltaXlim = maxMeanVecOriginal - minMeanVecOriginal 

931 a.plot(meanVecFit, ptcFunc(pars, meanVecFit), color='red') 

932 a.plot(meanVecFinal, ptcNoiseAdu**2 + (1./ptcGain)*meanVecFinal, color='green', 

933 linestyle='--') 

934 a.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize) 

935 a.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize) 

936 a.text(0.03, 0.66, stringLegend, transform=a.transAxes, fontsize=legendFontSize) 

937 a.set_title(amp, fontsize=titleFontSize) 

938 a.set_xlim([minMeanVecOriginal - 0.2*deltaXlim, maxMeanVecOriginal + 0.2*deltaXlim]) 

939 

940 # Same, but in log-scale 

941 a2.plot(meanVecFit, ptcFunc(pars, meanVecFit), color='red') 

942 a2.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize) 

943 a2.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize) 

944 a2.text(0.03, 0.66, stringLegend, transform=a2.transAxes, fontsize=legendFontSize) 

945 a2.set_title(amp, fontsize=titleFontSize) 

946 a2.set_xlim([minMeanVecOriginal, maxMeanVecOriginal]) 

947 

948 # Var/mu vs mu 

949 a3.plot(meanVecFit, ptcFunc(pars, meanVecFit)/meanVecFit, color='red') 

950 a3.scatter(meanVecFinal, varVecFinal/meanVecFinal, c='blue', marker='o', s=markerSize) 

951 a3.scatter(meanVecOutliers, varVecOutliers/meanVecOutliers, c='magenta', marker='s', 

952 s=markerSize) 

953 a3.text(0.05, 0.1, stringLegend, transform=a3.transAxes, fontsize=legendFontSize) 

954 a3.set_title(amp, fontsize=titleFontSize) 

955 a3.set_xlim([minMeanVecOriginal - 0.2*deltaXlim, maxMeanVecOriginal + 0.2*deltaXlim]) 

956 

957 f.suptitle("PTC \n Fit: " + stringTitle, fontsize=supTitleFontSize) 

958 pdfPages.savefig(f) 

959 f2.suptitle("PTC (log-log)", fontsize=supTitleFontSize) 

960 pdfPages.savefig(f2) 

961 f3.suptitle(r"Var/$\mu$", fontsize=supTitleFontSize) 

962 pdfPages.savefig(f3) 

963 

964 return 

965 

966 def _plotLinearizer(self, dataset, linearizer, pdfPages): 

967 """Plot linearity and linearity residual per amplifier 

968 

969 Parameters 

970 ---------- 

971 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

972 The dataset containing the means, variances, exposure times, and mask. 

973 

974 linearizer : `lsst.ip.isr.Linearizer` 

975 Linearizer object 

976 """ 

977 legendFontSize = 7 

978 labelFontSize = 7 

979 titleFontSize = 9 

980 supTitleFontSize = 18 

981 

982 # General determination of the size of the plot grid 

983 nAmps = len(dataset.ampNames) 

984 if nAmps == 2: 

985 nRows, nCols = 2, 1 

986 nRows = np.sqrt(nAmps) 

987 mantissa, _ = np.modf(nRows) 

988 if mantissa > 0: 

989 nRows = int(nRows) + 1 

990 nCols = nRows 

991 else: 

992 nRows = int(nRows) 

993 nCols = nRows 

994 

995 # Plot mean vs time (f1), and fractional residuals (f2) 

996 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

997 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

998 for i, (amp, a, a2) in enumerate(zip(dataset.ampNames, ax.flatten(), ax2.flatten())): 

999 mask = dataset.expIdMask[amp] 

1000 if np.isnan(mask[0]): 

1001 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

1002 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

1003 continue 

1004 else: 

1005 mask = mask.astype(bool) 

1006 meanVecFinal = np.array(dataset.rawMeans[amp])[mask] 

1007 timeVecFinal = np.array(dataset.rawExpTimes[amp])[mask] 

1008 

1009 a.set_xlabel('Time (sec)', fontsize=labelFontSize) 

1010 a.set_ylabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

1011 a.tick_params(labelsize=labelFontSize) 

1012 a.set_xscale('linear') 

1013 a.set_yscale('linear') 

1014 

1015 a2.axhline(y=0, color='k') 

1016 a2.axvline(x=0, color='k', linestyle='-') 

1017 a2.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

1018 a2.set_ylabel('Fractional nonlinearity (%)', fontsize=labelFontSize) 

1019 a2.tick_params(labelsize=labelFontSize) 

1020 a2.set_xscale('linear') 

1021 a2.set_yscale('linear') 

1022 

1023 pars, parsErr = linearizer.fitParams[amp], linearizer.fitParamsErr[amp] 

1024 k0, k0Error = pars[0], parsErr[0] 

1025 k1, k1Error = pars[1], parsErr[1] 

1026 k2, k2Error = pars[2], parsErr[2] 

1027 linRedChi2 = linearizer.fitChiSq[amp] 

1028 stringLegend = (f"k0: {k0:.4}+/-{k0Error:.2e} ADU\nk1: {k1:.4}+/-{k1Error:.2e} ADU/t" 

1029 f"\nk2: {k2:.2e}+/-{k2Error:.2e} ADU/t^2\n" 

1030 r"$\chi^2_{\rm{red}}$: " + f"{linRedChi2:.4}") 

1031 a.scatter(timeVecFinal, meanVecFinal) 

1032 a.plot(timeVecFinal, funcPolynomial(pars, timeVecFinal), color='red') 

1033 a.text(0.03, 0.75, stringLegend, transform=a.transAxes, fontsize=legendFontSize) 

1034 a.set_title(f"{amp}", fontsize=titleFontSize) 

1035 

1036 linearPart = k0 + k1*timeVecFinal 

1037 fracLinRes = 100*(linearPart - meanVecFinal)/linearPart 

1038 a2.plot(meanVecFinal, fracLinRes, c='g') 

1039 a2.set_title(f"{amp}", fontsize=titleFontSize) 

1040 

1041 f.suptitle("Linearity \n Fit: Polynomial (degree: %g)" 

1042 % (len(pars)-1), 

1043 fontsize=supTitleFontSize) 

1044 f2.suptitle(r"Fractional NL residual" "\n" 

1045 r"$100\times \frac{(k_0 + k_1*Time-\mu)}{k_0+k_1*Time}$", 

1046 fontsize=supTitleFontSize) 

1047 pdfPages.savefig(f) 

1048 pdfPages.savefig(f2) 

1049 

1050 @staticmethod 

1051 def findGroups(x, maxDiff): 

1052 """Group data into bins, with at most maxDiff distance between bins. 

1053 

1054 Parameters 

1055 ---------- 

1056 x: `list` 

1057 Data to bin. 

1058 

1059 maxDiff: `int` 

1060 Maximum distance between bins. 

1061 

1062 Returns 

1063 ------- 

1064 index: `list` 

1065 Bin indices. 

1066 """ 

1067 ix = np.argsort(x) 

1068 xsort = np.sort(x) 

1069 index = np.zeros_like(x, dtype=np.int32) 

1070 xc = xsort[0] 

1071 group = 0 

1072 ng = 1 

1073 

1074 for i in range(1, len(ix)): 

1075 xval = xsort[i] 

1076 if (xval - xc < maxDiff): 

1077 xc = (ng*xc + xval)/(ng+1) 

1078 ng += 1 

1079 index[ix[i]] = group 

1080 else: 

1081 group += 1 

1082 ng = 1 

1083 index[ix[i]] = group 

1084 xc = xval 

1085 

1086 return index 

1087 

1088 @staticmethod 

1089 def indexForBins(x, nBins): 

1090 """Builds an index with regular binning. The result can be fed into binData. 

1091 

1092 Parameters 

1093 ---------- 

1094 x: `numpy.array` 

1095 Data to bin. 

1096 nBins: `int` 

1097 Number of bin. 

1098 

1099 Returns 

1100 ------- 

1101 np.digitize(x, bins): `numpy.array` 

1102 Bin indices. 

1103 """ 

1104 

1105 bins = np.linspace(x.min(), x.max() + abs(x.max() * 1e-7), nBins + 1) 

1106 return np.digitize(x, bins) 

1107 

1108 @staticmethod 

1109 def binData(x, y, binIndex, wy=None): 

1110 """Bin data (usually for display purposes). 

1111 

1112 Patrameters 

1113 ----------- 

1114 x: `numpy.array` 

1115 Data to bin. 

1116 

1117 y: `numpy.array` 

1118 Data to bin. 

1119 

1120 binIdex: `list` 

1121 Bin number of each datum. 

1122 

1123 wy: `numpy.array` 

1124 Inverse rms of each datum to use when averaging (the actual weight is wy**2). 

1125 

1126 Returns: 

1127 ------- 

1128 

1129 xbin: `numpy.array` 

1130 Binned data in x. 

1131 

1132 ybin: `numpy.array` 

1133 Binned data in y. 

1134 

1135 wybin: `numpy.array` 

1136 Binned weights in y, computed from wy's in each bin. 

1137 

1138 sybin: `numpy.array` 

1139 Uncertainty on the bin average, considering actual scatter, and ignoring weights. 

1140 """ 

1141 

1142 if wy is None: 

1143 wy = np.ones_like(x) 

1144 binIndexSet = set(binIndex) 

1145 w2 = wy*wy 

1146 xw2 = x*(w2) 

1147 xbin = np.array([xw2[binIndex == i].sum()/w2[binIndex == i].sum() for i in binIndexSet]) 

1148 

1149 yw2 = y*w2 

1150 ybin = np.array([yw2[binIndex == i].sum()/w2[binIndex == i].sum() for i in binIndexSet]) 

1151 

1152 wybin = np.sqrt(np.array([w2[binIndex == i].sum() for i in binIndexSet])) 

1153 sybin = np.array([y[binIndex == i].std()/np.sqrt(np.array([binIndex == i]).sum()) 

1154 for i in binIndexSet]) 

1155 

1156 return xbin, ybin, wybin, sybin