Coverage for python/lsst/cp/pipe/ptc/plotPtc.py: 6%

Shortcuts on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

594 statements  

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22 

23__all__ = ['PlotPhotonTransferCurveTask'] 

24 

25import logging 

26import numpy as np 

27import matplotlib.pyplot as plt 

28import matplotlib as mpl 

29from matplotlib import gridspec 

30import os 

31from matplotlib.backends.backend_pdf import PdfPages 

32 

33import lsst.ip.isr as isr 

34 

35from lsst.cp.pipe.utils import (funcAstier, funcPolynomial, 

36 calculateWeightedReducedChi2) 

37from matplotlib.ticker import MaxNLocator 

38from lsst.cp.pipe.ptc.astierCovPtcUtils import getFitDataFromCovariances 

39from lsst.ip.isr import PhotonTransferCurveDataset 

40 

41 

42class PlotPhotonTransferCurveTask(): 

43 """A class to plot the dataset from MeasurePhotonTransferCurveTask. 

44 

45 Parameters 

46 ---------- 

47 

48 datasetFileName : `str` 

49 datasetPtc (lsst.ip.isr.PhotonTransferCurveDataset) file 

50 name (fits). 

51 

52 linearizerFileName : `str`, optional 

53 linearizer (isr.linearize.Linearizer) file 

54 name (fits). 

55 

56 outDir : `str`, optional 

57 Path to the output directory where the final PDF will 

58 be placed. 

59 

60 detNum : `int`, optional 

61 Detector number. 

62 

63 signalElectronsRelativeA : `float`, optional 

64 Signal value for relative systematic bias between different 

65 methods of estimating a_ij (Fig. 15 of Astier+19). 

66 

67 plotNormalizedCovariancesNumberOfBins : `float`, optional 

68 Number of bins in `plotNormalizedCovariancesNumber` function 

69 (Fig. 8, 10., of Astier+19). 

70 

71 Notes 

72 ----- 

73 The plotting code in this file is almost identical to the code in 

74 `plotPtcGen2.py`. If further changes are implemented in this file, 

75 `plotPtcGen2.py` needs to be updated accordingly, and vice versa. 

76 The file `plotPtcGen2.py` helps with maintaining backwards 

77 compatibility with gen2 as we transition to gen3; the code 

78 duplication is meant to only last for few month from now 

79 (Jan, 2021). At that point only this file, `plotPtc.py`, will 

80 remain. 

81 """ 

82 

83 def __init__(self, datasetFilename, linearizerFileName=None, 

84 outDir='.', detNum=999, signalElectronsRelativeA=75000, 

85 plotNormalizedCovariancesNumberOfBins=10): 

86 self.datasetFilename = datasetFilename 

87 self.linearizerFileName = linearizerFileName 

88 self.detNum = detNum 

89 self.signalElectronsRelativeA = signalElectronsRelativeA 

90 self.plotNormalizedCovariancesNumberOfBins = plotNormalizedCovariancesNumberOfBins 

91 self.outDir = outDir 

92 

93 def runDataRef(self): 

94 """Run the Photon Transfer Curve (PTC) plotting measurement task. 

95 """ 

96 datasetFile = self.datasetFilename 

97 datasetPtc = PhotonTransferCurveDataset.readFits(datasetFile) 

98 

99 dirname = self.outDir 

100 if not os.path.exists(dirname): 

101 os.makedirs(dirname) 

102 

103 detNum = self.detNum 

104 filename = f"PTC_det{detNum}.pdf" 

105 filenameFull = os.path.join(dirname, filename) 

106 

107 if self.linearizerFileName: 

108 linearizer = isr.linearize.Linearizer.readFits(self.linearizerFileName) 

109 else: 

110 linearizer = None 

111 self.run(filenameFull, datasetPtc, linearizer=linearizer, log=logging.getLogger(__name__)) 

112 

113 return 

114 

115 def run(self, filenameFull, datasetPtc, linearizer=None, log=None): 

116 """Make the plots for the PTC task""" 

117 ptcFitType = datasetPtc.ptcFitType 

118 with PdfPages(filenameFull) as pdfPages: 

119 if ptcFitType in ["FULLCOVARIANCE", ]: 

120 self.covAstierMakeAllPlots(datasetPtc, pdfPages, log=log) 

121 elif ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]: 

122 self._plotStandardPtc(datasetPtc, ptcFitType, pdfPages) 

123 else: 

124 raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n" 

125 "Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.") 

126 if linearizer: 

127 self._plotLinearizer(datasetPtc, linearizer, pdfPages) 

128 

129 return 

130 

131 def covAstierMakeAllPlots(self, dataset, pdfPages, 

132 log=None): 

133 """Make plots for MeasurePhotonTransferCurve task when 

134 doCovariancesAstier=True. 

135 

136 This function call other functions that mostly reproduce the 

137 plots in Astier+19. Most of the code is ported from Pierre 

138 Astier's repository https://github.com/PierreAstier/bfptc 

139 

140 Parameters 

141 ---------- 

142 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

143 The dataset containing the necessary information to 

144 produce the plots. 

145 

146 pdfPages : `matplotlib.backends.backend_pdf.PdfPages` 

147 PDF file where the plots will be saved. 

148 

149 log : `logging.Logger`, optional 

150 Logger to handle messages 

151 """ 

152 mu = dataset.finalMeans 

153 # dictionaries with ampNames as keys 

154 fullCovs = dataset.covariances 

155 fullCovsModel = dataset.covariancesModel 

156 fullCovWeights = dataset.covariancesSqrtWeights 

157 aDict = dataset.aMatrix 

158 bDict = dataset.bMatrix 

159 fullCovsNoB = dataset.covariances 

160 fullCovsModelNoB = dataset.covariancesModelNoB 

161 fullCovWeightsNoB = dataset.covariancesSqrtWeights 

162 aDictNoB = dataset.aMatrixNoB 

163 gainDict = dataset.gain 

164 noiseDict = dataset.noise 

165 

166 self.plotCovariances(mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, fullCovsModelNoB, 

167 fullCovWeightsNoB, gainDict, noiseDict, aDict, bDict, pdfPages) 

168 self.plotNormalizedCovariances(0, 0, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, 

169 fullCovsModelNoB, fullCovWeightsNoB, pdfPages, 

170 offset=0.01, topPlot=True, 

171 numberOfBins=self.plotNormalizedCovariancesNumberOfBins, 

172 log=log) 

173 self.plotNormalizedCovariances(0, 1, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, 

174 fullCovsModelNoB, fullCovWeightsNoB, pdfPages, 

175 numberOfBins=self.plotNormalizedCovariancesNumberOfBins, 

176 log=log) 

177 self.plotNormalizedCovariances(1, 0, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, 

178 fullCovsModelNoB, fullCovWeightsNoB, pdfPages, 

179 numberOfBins=self.plotNormalizedCovariancesNumberOfBins, 

180 log=log) 

181 self.plot_a_b(aDict, bDict, pdfPages) 

182 self.ab_vs_dist(aDict, bDict, pdfPages, bRange=4) 

183 self.plotAcoeffsSum(aDict, bDict, pdfPages) 

184 self.plotRelativeBiasACoeffs(aDict, aDictNoB, fullCovsModel, fullCovsModelNoB, 

185 self.signalElectronsRelativeA, gainDict, pdfPages, maxr=4) 

186 

187 return 

188 

189 @staticmethod 

190 def plotCovariances(mu, covs, covsModel, covsWeights, covsNoB, covsModelNoB, covsWeightsNoB, 

191 gainDict, noiseDict, aDict, bDict, pdfPages): 

192 """Plot covariances and models: Cov00, Cov10, Cov01. 

193 

194 Figs. 6 and 7 of Astier+19 

195 

196 Parameters 

197 ---------- 

198 mu : `dict` [`str`, `list`] 

199 Dictionary keyed by amp name with mean signal values. 

200 

201 covs : `dict` [`str`, `list`] 

202 Dictionary keyed by amp names containing a list of measued 

203 covariances per mean flux. 

204 

205 covsModel : `dict` [`str`, `list`] 

206 Dictionary keyed by amp names containinging covariances 

207 model (Eq. 20 of Astier+19) per mean flux. 

208 

209 covsWeights : `dict` [`str`, `list`] 

210 Dictionary keyed by amp names containinging sqrt. of 

211 covariances weights. 

212 

213 covsNoB : `dict` [`str`, `list`] 

214 Dictionary keyed by amp names containing a list of measued 

215 covariances per mean flux ('b'=0 in Astier+19). 

216 

217 covsModelNoB : `dict` [`str`, `list`] 

218 Dictionary keyed by amp names containing covariances model 

219 (with 'b'=0 in Eq. 20 of Astier+19) per mean flux. 

220 

221 covsWeightsNoB : `dict` [`str`, `list`] 

222 Dictionary keyed by amp names containing sqrt. of 

223 covariances weights ('b' = 0 in Eq. 20 of Astier+19). 

224 

225 gainDict : `dict` [`str`, `float`] 

226 Dictionary keyed by amp names containing the gains in e-/ADU. 

227 

228 noiseDict : `dict` [`str`, `float`] 

229 Dictionary keyed by amp names containing the rms redout 

230 noise in e-. 

231 

232 aDict : `dict` [`str`, `numpy.array`] 

233 Dictionary keyed by amp names containing 'a' coefficients 

234 (Eq. 20 of Astier+19). 

235 

236 bDict : `dict` [`str`, `numpy.array`] 

237 Dictionary keyed by amp names containing 'b' coefficients 

238 (Eq. 20 of Astier+19). 

239 

240 pdfPages : `matplotlib.backends.backend_pdf.PdfPages` 

241 PDF file where the plots will be saved. 

242 """ 

243 legendFontSize = 6.5 

244 labelFontSize = 7 

245 titleFontSize = 9 

246 supTitleFontSize = 18 

247 markerSize = 25 

248 

249 nAmps = len(covs) 

250 if nAmps == 2: 

251 nRows, nCols = 2, 1 

252 nRows = np.sqrt(nAmps) 

253 mantissa, _ = np.modf(nRows) 

254 if mantissa > 0: 

255 nRows = int(nRows) + 1 

256 nCols = nRows 

257 else: 

258 nRows = int(nRows) 

259 nCols = nRows 

260 

261 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

262 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

263 fResCov00, axResCov00 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', 

264 figsize=(13, 10)) 

265 fCov01, axCov01 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

266 fCov10, axCov10 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

267 

268 assert(len(covsModel) == nAmps) 

269 assert(len(covsWeights) == nAmps) 

270 

271 assert(len(covsNoB) == nAmps) 

272 assert(len(covsModelNoB) == nAmps) 

273 assert(len(covsWeightsNoB) == nAmps) 

274 

275 for i, (amp, a, a2, aResVar, a3, a4) in enumerate(zip(covs, ax.flatten(), 

276 ax2.flatten(), axResCov00.flatten(), 

277 axCov01.flatten(), axCov10.flatten())): 

278 

279 muAmp, cov, model, weight = mu[amp], covs[amp], covsModel[amp], covsWeights[amp] 

280 if not np.isnan(np.array(cov)).all(): # If all the entries are np.nan, this is a bad amp. 

281 aCoeffs, bCoeffs = np.array(aDict[amp]), np.array(bDict[amp]) 

282 gain, noise = gainDict[amp], noiseDict[amp] 

283 (meanVecFinal, varVecFinal, varVecModelFinal, 

284 varWeightsFinal, _) = getFitDataFromCovariances(0, 0, muAmp, cov, model, weight, 

285 returnMasked=True) 

286 

287 # Get weighted reduced chi2 

288 chi2FullModelVar = calculateWeightedReducedChi2(varVecFinal, varVecModelFinal, 

289 varWeightsFinal, len(meanVecFinal), 4) 

290 

291 (meanVecFinalCov01, varVecFinalCov01, varVecModelFinalCov01, 

292 _, _) = getFitDataFromCovariances(0, 0, muAmp, cov, model, weight, returnMasked=True) 

293 

294 (meanVecFinalCov10, varVecFinalCov10, varVecModelFinalCov10, 

295 _, _) = getFitDataFromCovariances(1, 0, muAmp, cov, model, weight, returnMasked=True) 

296 

297 # cuadratic fit for residuals below 

298 par2 = np.polyfit(meanVecFinal, varVecFinal, 2, w=varWeightsFinal) 

299 varModelFinalQuadratic = np.polyval(par2, meanVecFinal) 

300 chi2QuadModelVar = calculateWeightedReducedChi2(varVecFinal, varModelFinalQuadratic, 

301 varWeightsFinal, len(meanVecFinal), 3) 

302 

303 # fit with no 'b' coefficient (c = a*b in Eq. 20 of Astier+19) 

304 covNoB, modelNoB, weightNoB = covsNoB[amp], covsModelNoB[amp], covsWeightsNoB[amp] 

305 (meanVecFinalNoB, varVecFinalNoB, varVecModelFinalNoB, 

306 varWeightsFinalNoB, _) = getFitDataFromCovariances(0, 0, muAmp, covNoB, modelNoB, 

307 weightNoB, returnMasked=True) 

308 

309 chi2FullModelNoBVar = calculateWeightedReducedChi2(varVecFinalNoB, varVecModelFinalNoB, 

310 varWeightsFinalNoB, len(meanVecFinalNoB), 

311 3) 

312 stringLegend = (f"Gain: {gain:.4} e/ADU \n" 

313 f"Noise: {noise:.4} e \n" 

314 + r"$a_{00}$: %.3e 1/e"%aCoeffs[0, 0] + "\n" 

315 + r"$b_{00}$: %.3e 1/e"%bCoeffs[0, 0] 

316 + f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ") 

317 minMeanVecFinal = np.nanmin(meanVecFinal) 

318 maxMeanVecFinal = np.nanmax(meanVecFinal) 

319 deltaXlim = maxMeanVecFinal - minMeanVecFinal 

320 

321 a.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

322 a.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize) 

323 a.tick_params(labelsize=11) 

324 a.set_xscale('linear') 

325 a.set_yscale('linear') 

326 a.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize) 

327 a.plot(meanVecFinal, varVecModelFinal, color='red', lineStyle='-') 

328 a.text(0.03, 0.7, stringLegend, transform=a.transAxes, fontsize=legendFontSize) 

329 a.set_title(amp, fontsize=titleFontSize) 

330 a.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim]) 

331 

332 # Same as above, but in log-scale 

333 a2.set_xlabel(r'Mean Signal ($\mu$, ADU)', fontsize=labelFontSize) 

334 a2.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize) 

335 a2.tick_params(labelsize=11) 

336 a2.set_xscale('log') 

337 a2.set_yscale('log') 

338 a2.plot(meanVecFinal, varVecModelFinal, color='red', lineStyle='-') 

339 a2.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize) 

340 a2.text(0.03, 0.7, stringLegend, transform=a2.transAxes, fontsize=legendFontSize) 

341 a2.set_title(amp, fontsize=titleFontSize) 

342 a2.set_xlim([minMeanVecFinal, maxMeanVecFinal]) 

343 

344 # Residuals var - model 

345 aResVar.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

346 aResVar.set_ylabel(r'Residuals (ADU$^2$)', fontsize=labelFontSize) 

347 aResVar.tick_params(labelsize=11) 

348 aResVar.set_xscale('linear') 

349 aResVar.set_yscale('linear') 

350 aResVar.plot(meanVecFinal, varVecFinal - varVecModelFinal, color='blue', lineStyle='-', 

351 label=r'Full fit ($\chi_{\rm{red}}^2$: %g)'%chi2FullModelVar) 

352 aResVar.plot(meanVecFinal, varVecFinal - varModelFinalQuadratic, color='red', lineStyle='-', 

353 label=r'Quadratic fit ($\chi_{\rm{red}}^2$: %g)'%chi2QuadModelVar) 

354 aResVar.plot(meanVecFinalNoB, varVecFinalNoB - varVecModelFinalNoB, color='green', 

355 lineStyle='-', 

356 label=r'Full fit (b=0) ($\chi_{\rm{red}}^2$: %g)'%chi2FullModelNoBVar) 

357 aResVar.axhline(color='black') 

358 aResVar.set_title(amp, fontsize=titleFontSize) 

359 aResVar.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim]) 

360 aResVar.legend(fontsize=7) 

361 

362 a3.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

363 a3.set_ylabel(r'Cov01 (ADU$^2$)', fontsize=labelFontSize) 

364 a3.tick_params(labelsize=11) 

365 a3.set_xscale('linear') 

366 a3.set_yscale('linear') 

367 a3.scatter(meanVecFinalCov01, varVecFinalCov01, c='blue', marker='o', s=markerSize) 

368 a3.plot(meanVecFinalCov01, varVecModelFinalCov01, color='red', lineStyle='-') 

369 a3.set_title(amp, fontsize=titleFontSize) 

370 a3.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim]) 

371 

372 a4.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

373 a4.set_ylabel(r'Cov10 (ADU$^2$)', fontsize=labelFontSize) 

374 a4.tick_params(labelsize=11) 

375 a4.set_xscale('linear') 

376 a4.set_yscale('linear') 

377 a4.scatter(meanVecFinalCov10, varVecFinalCov10, c='blue', marker='o', s=markerSize) 

378 a4.plot(meanVecFinalCov10, varVecModelFinalCov10, color='red', lineStyle='-') 

379 a4.set_title(amp, fontsize=titleFontSize) 

380 a4.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim]) 

381 

382 else: 

383 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

384 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

385 a3.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

386 a4.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

387 

388 f.suptitle("PTC from covariances as in Astier+19 \n Fit: Eq. 20, Astier+19", 

389 fontsize=supTitleFontSize) 

390 pdfPages.savefig(f) 

391 f2.suptitle("PTC from covariances as in Astier+19 (log-log) \n Fit: Eq. 20, Astier+19", 

392 fontsize=supTitleFontSize) 

393 pdfPages.savefig(f2) 

394 fResCov00.suptitle("Residuals (data-model) for Cov00 (Var)", fontsize=supTitleFontSize) 

395 pdfPages.savefig(fResCov00) 

396 fCov01.suptitle("Cov01 as in Astier+19 (nearest parallel neighbor covariance) \n" 

397 " Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize) 

398 pdfPages.savefig(fCov01) 

399 fCov10.suptitle("Cov10 as in Astier+19 (nearest serial neighbor covariance) \n" 

400 "Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize) 

401 pdfPages.savefig(fCov10) 

402 

403 return 

404 

405 def plotNormalizedCovariances(self, i, j, inputMu, covs, covsModel, covsWeights, covsNoB, covsModelNoB, 

406 covsWeightsNoB, pdfPages, offset=0.004, 

407 numberOfBins=10, plotData=True, topPlot=False, log=None): 

408 """Plot C_ij/mu vs mu. 

409 

410 Figs. 8, 10, and 11 of Astier+19 

411 

412 Parameters 

413 ---------- 

414 i : `int` 

415 Covariane lag 

416 

417 j : `int` 

418 Covariance lag 

419 

420 inputMu : `dict` [`str`, `list`] 

421 Dictionary keyed by amp name with mean signal values. 

422 

423 covs : `dict` [`str`, `list`] 

424 Dictionary keyed by amp names containing a list of measued 

425 covariances per mean flux. 

426 

427 covsModel : `dict` [`str`, `list`] 

428 Dictionary keyed by amp names containinging covariances 

429 model (Eq. 20 of Astier+19) per mean flux. 

430 

431 covsWeights : `dict` [`str`, `list`] 

432 Dictionary keyed by amp names containinging sqrt. of 

433 covariances weights. 

434 

435 covsNoB : `dict` [`str`, `list`] 

436 Dictionary keyed by amp names containing a list of measued 

437 covariances per mean flux ('b'=0 in Astier+19). 

438 

439 covsModelNoB : `dict` [`str`, `list`] 

440 Dictionary keyed by amp names containing covariances model 

441 (with 'b'=0 in Eq. 20 of Astier+19) per mean flux. 

442 

443 covsWeightsNoB : `dict` [`str`, `list`] 

444 Dictionary keyed by amp names containing sqrt. of 

445 covariances weights ('b' = 0 in Eq. 20 of Astier+19). 

446 

447 expIdMask : `dict` [`str`, `list`] 

448 Dictionary keyed by amp names containing the masked 

449 exposure pairs. 

450 

451 pdfPages : `matplotlib.backends.backend_pdf.PdfPages` 

452 PDF file where the plots will be saved. 

453 

454 offset : `float`, optional 

455 Constant offset factor to plot covariances in same panel 

456 (so they don't overlap). 

457 

458 numberOfBins : `int`, optional 

459 Number of bins for top and bottom plot. 

460 

461 plotData : `bool`, optional 

462 Plot the data points? 

463 

464 topPlot : `bool`, optional 

465 Plot the top plot with the covariances, and the bottom 

466 plot with the model residuals? 

467 

468 log : `logging.Logger`, optional 

469 Logger to handle messages. 

470 """ 

471 if not topPlot: 

472 fig = plt.figure(figsize=(8, 10)) 

473 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1]) 

474 gs.update(hspace=0) 

475 ax0 = plt.subplot(gs[0]) 

476 plt.setp(ax0.get_xticklabels(), visible=False) 

477 else: 

478 fig = plt.figure(figsize=(8, 8)) 

479 ax0 = plt.subplot(111) 

480 ax0.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) 

481 ax0.tick_params(axis='both', labelsize='x-large') 

482 mue, rese, wce = [], [], [] 

483 mueNoB, reseNoB, wceNoB = [], [], [] 

484 for counter, amp in enumerate(covs): 

485 muAmp, fullCov, fullCovModel, fullCovWeight = (inputMu[amp], covs[amp], covsModel[amp], 

486 covsWeights[amp]) 

487 if len(fullCov) == 0: 

488 continue 

489 mu, cov, model, weightCov, _ = getFitDataFromCovariances(i, j, muAmp, fullCov, fullCovModel, 

490 fullCovWeight, divideByMu=True, 

491 returnMasked=True) 

492 

493 mue += list(mu) 

494 rese += list(cov - model) 

495 wce += list(weightCov) 

496 

497 fullCovNoB, fullCovModelNoB, fullCovWeightNoB = (covsNoB[amp], covsModelNoB[amp], 

498 covsWeightsNoB[amp]) 

499 if len(fullCovNoB) == 0: 

500 continue 

501 (muNoB, covNoB, modelNoB, 

502 weightCovNoB, _) = getFitDataFromCovariances(i, j, muAmp, fullCovNoB, fullCovModelNoB, 

503 fullCovWeightNoB, divideByMu=True, 

504 returnMasked=True) 

505 

506 mueNoB += list(muNoB) 

507 reseNoB += list(covNoB - modelNoB) 

508 wceNoB += list(weightCovNoB) 

509 

510 # the corresponding fit 

511 fit_curve, = plt.plot(mu, model + counter*offset, '-', linewidth=4.0) 

512 # bin plot. len(mu) = no binning 

513 gind = self.indexForBins(mu, numberOfBins) 

514 

515 xb, yb, wyb, sigyb = self.binData(mu, cov, gind, weightCov) 

516 plt.errorbar(xb, yb+counter*offset, yerr=sigyb, marker='o', linestyle='none', markersize=6.5, 

517 color=fit_curve.get_color(), label=f"{amp} (N: {len(mu)})") 

518 # plot the data 

519 if plotData: 

520 points, = plt.plot(mu, cov + counter*offset, '.', color=fit_curve.get_color()) 

521 plt.legend(loc='upper right', fontsize=8) 

522 # end loop on amps 

523 mue = np.array(mue) 

524 rese = np.array(rese) 

525 wce = np.array(wce) 

526 mueNoB = np.array(mueNoB) 

527 reseNoB = np.array(reseNoB) 

528 wceNoB = np.array(wceNoB) 

529 

530 plt.xlabel(r"$\mu (el)$", fontsize='x-large') 

531 plt.ylabel(r"$Cov{%d%d}/\mu + Cst (el)$"%(i, j), fontsize='x-large') 

532 if (not topPlot): 

533 gind = self.indexForBins(mue, numberOfBins) 

534 xb, yb, wyb, sigyb = self.binData(mue, rese, gind, wce) 

535 

536 ax1 = plt.subplot(gs[1], sharex=ax0) 

537 ax1.errorbar(xb, yb, yerr=sigyb, marker='o', linestyle='none', label='Full fit') 

538 gindNoB = self.indexForBins(mueNoB, numberOfBins) 

539 xb2, yb2, wyb2, sigyb2 = self.binData(mueNoB, reseNoB, gindNoB, wceNoB) 

540 

541 ax1.errorbar(xb2, yb2, yerr=sigyb2, marker='o', linestyle='none', label='b = 0') 

542 ax1.tick_params(axis='both', labelsize='x-large') 

543 plt.legend(loc='upper left', fontsize='large') 

544 # horizontal line at zero 

545 plt.plot(xb, [0]*len(xb), '--', color='k') 

546 plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) 

547 plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) 

548 plt.xlabel(r'$\mu (el)$', fontsize='x-large') 

549 plt.ylabel(r'$Cov{%d%d}/\mu$ -model (el)'%(i, j), fontsize='x-large') 

550 plt.tight_layout() 

551 plt.suptitle(f"Nbins: {numberOfBins}") 

552 # overlapping y labels: 

553 fig.canvas.draw() 

554 labels0 = [item.get_text() for item in ax0.get_yticklabels()] 

555 labels0[0] = u'' 

556 ax0.set_yticklabels(labels0) 

557 pdfPages.savefig(fig) 

558 

559 return 

560 

561 @staticmethod 

562 def plot_a_b(aDict, bDict, pdfPages, bRange=3): 

563 """Fig. 12 of Astier+19 

564 

565 Color display of a and b arrays fits, averaged over channels. 

566 

567 Parameters 

568 ---------- 

569 aDict : `dict` [`numpy.array`] 

570 Dictionary keyed by amp names containing the fitted 'a' 

571 coefficients from the model in Eq. 20 of Astier+19 (if 

572 `ptcFitType` is `FULLCOVARIANCE`). 

573 

574 bDict : `dict` [`numpy.array`] 

575 Dictionary keyed by amp names containing the fitted 'b' 

576 coefficients from the model in Eq. 20 of Astier+19 (if 

577 `ptcFitType` is `FULLCOVARIANCE`). 

578 

579 pdfPages : `matplotlib.backends.backend_pdf.PdfPages` 

580 PDF file where the plots will be saved. 

581 

582 bRange : `int` 

583 Maximum lag for b arrays. 

584 """ 

585 a, b = [], [] 

586 for amp in aDict: 

587 if np.isnan(aDict[amp]).all(): 

588 continue 

589 a.append(aDict[amp]) 

590 b.append(bDict[amp]) 

591 a = np.array(a).mean(axis=0) 

592 b = np.array(b).mean(axis=0) 

593 fig = plt.figure(figsize=(7, 11)) 

594 ax0 = fig.add_subplot(2, 1, 1) 

595 im0 = ax0.imshow(np.abs(a.transpose()), origin='lower', norm=mpl.colors.LogNorm()) 

596 ax0.tick_params(axis='both', labelsize='x-large') 

597 ax0.set_title(r'$|a|$', fontsize='x-large') 

598 ax0.xaxis.set_ticks_position('bottom') 

599 cb0 = plt.colorbar(im0) 

600 cb0.ax.tick_params(labelsize='x-large') 

601 

602 ax1 = fig.add_subplot(2, 1, 2) 

603 ax1.tick_params(axis='both', labelsize='x-large') 

604 ax1.yaxis.set_major_locator(MaxNLocator(integer=True)) 

605 ax1.xaxis.set_major_locator(MaxNLocator(integer=True)) 

606 im1 = ax1.imshow(1e6*b[:bRange, :bRange].transpose(), origin='lower') 

607 cb1 = plt.colorbar(im1) 

608 cb1.ax.tick_params(labelsize='x-large') 

609 ax1.set_title(r'$b \times 10^6$', fontsize='x-large') 

610 ax1.xaxis.set_ticks_position('bottom') 

611 plt.tight_layout() 

612 pdfPages.savefig(fig) 

613 

614 return 

615 

616 @staticmethod 

617 def ab_vs_dist(aDict, bDict, pdfPages, bRange=4): 

618 """Fig. 13 of Astier+19. 

619 

620 Values of a and b arrays fits, averaged over amplifiers, as a 

621 function of distance. 

622 

623 Parameters 

624 ---------- 

625 aDict : `dict` [`numpy.array`] 

626 Dictionary keyed by amp names containing the fitted 'a' 

627 coefficients from the model in Eq. 20 of Astier+19 (if 

628 `ptcFitType` is `FULLCOVARIANCE`). 

629 

630 bDict : `dict` [`numpy.array`] 

631 Dictionary keyed by amp names containing the fitted 'b' 

632 coefficients from the model in Eq. 20 of Astier+19 (if 

633 `ptcFitType` is `FULLCOVARIANCE`). 

634 

635 pdfPages : `matplotlib.backends.backend_pdf.PdfPages` 

636 PDF file where the plots will be saved. 

637 

638 bRange : `int` 

639 Maximum lag for b arrays. 

640 """ 

641 assert (len(aDict) == len(bDict)) 

642 a = [] 

643 for amp in aDict: 

644 if np.isnan(aDict[amp]).all(): 

645 continue 

646 a.append(aDict[amp]) 

647 a = np.array(a) 

648 y = a.mean(axis=0) 

649 sy = a.std(axis=0)/np.sqrt(len(aDict)) 

650 i, j = np.indices(y.shape) 

651 upper = (i >= j).ravel() 

652 r = np.sqrt(i**2 + j**2).ravel() 

653 y = y.ravel() 

654 sy = sy.ravel() 

655 fig = plt.figure(figsize=(6, 9)) 

656 ax = fig.add_subplot(211) 

657 ax.set_xlim([0.5, r.max()+1]) 

658 ax.errorbar(r[upper], y[upper], yerr=sy[upper], marker='o', linestyle='none', color='b', 

659 label='$i>=j$') 

660 ax.errorbar(r[~upper], y[~upper], yerr=sy[~upper], marker='o', linestyle='none', color='r', 

661 label='$i<j$') 

662 ax.legend(loc='upper center', fontsize='x-large') 

663 ax.set_xlabel(r'$\sqrt{i^2+j^2}$', fontsize='x-large') 

664 ax.set_ylabel(r'$a_{ij}$', fontsize='x-large') 

665 ax.set_yscale('log') 

666 ax.tick_params(axis='both', labelsize='x-large') 

667 

668 # 

669 axb = fig.add_subplot(212) 

670 b = [] 

671 for amp in bDict: 

672 if np.isnan(bDict[amp]).all(): 

673 continue 

674 b.append(bDict[amp]) 

675 b = np.array(b) 

676 yb = b.mean(axis=0) 

677 syb = b.std(axis=0)/np.sqrt(len(bDict)) 

678 ib, jb = np.indices(yb.shape) 

679 upper = (ib > jb).ravel() 

680 rb = np.sqrt(i**2 + j**2).ravel() 

681 yb = yb.ravel() 

682 syb = syb.ravel() 

683 xmin = -0.2 

684 xmax = bRange 

685 axb.set_xlim([xmin, xmax+0.2]) 

686 cutu = (r > xmin) & (r < xmax) & (upper) 

687 cutl = (r > xmin) & (r < xmax) & (~upper) 

688 axb.errorbar(rb[cutu], yb[cutu], yerr=syb[cutu], marker='o', linestyle='none', color='b', 

689 label='$i>=j$') 

690 axb.errorbar(rb[cutl], yb[cutl], yerr=syb[cutl], marker='o', linestyle='none', color='r', 

691 label='$i<j$') 

692 plt.legend(loc='upper center', fontsize='x-large') 

693 axb.set_xlabel(r'$\sqrt{i^2+j^2}$', fontsize='x-large') 

694 axb.set_ylabel(r'$b_{ij}$', fontsize='x-large') 

695 axb.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) 

696 axb.tick_params(axis='both', labelsize='x-large') 

697 plt.tight_layout() 

698 pdfPages.savefig(fig) 

699 

700 return 

701 

702 @staticmethod 

703 def plotAcoeffsSum(aDict, bDict, pdfPages): 

704 """Fig. 14. of Astier+19 

705 

706 Cumulative sum of a_ij as a function of maximum 

707 separation. This plot displays the average over channels. 

708 

709 Parameters 

710 ---------- 

711 aDict : `dict` [`numpy.array`] 

712 Dictionary keyed by amp names containing the fitted 'a' 

713 coefficients from the model in Eq. 20 of Astier+19 (if 

714 `ptcFitType` is `FULLCOVARIANCE`). 

715 

716 bDict : `dict` [`numpy.array`] 

717 Dictionary keyed by amp names containing the fitted 'b' 

718 coefficients from the model in Eq. 20 of Astier+19 (if 

719 `ptcFitType` is `FULLCOVARIANCE`). 

720 

721 pdfPages : `matplotlib.backends.backend_pdf.PdfPages` 

722 PDF file where the plots will be saved. 

723 """ 

724 assert (len(aDict) == len(bDict)) 

725 a, b = [], [] 

726 for amp in aDict: 

727 if np.isnan(aDict[amp]).all() or np.isnan(bDict[amp]).all(): 

728 continue 

729 a.append(aDict[amp]) 

730 b.append(bDict[amp]) 

731 a = np.array(a).mean(axis=0) 

732 b = np.array(b).mean(axis=0) 

733 fig = plt.figure(figsize=(7, 6)) 

734 w = 4*np.ones_like(a) 

735 w[0, 1:] = 2 

736 w[1:, 0] = 2 

737 w[0, 0] = 1 

738 wa = w*a 

739 indices = range(1, a.shape[0]+1) 

740 sums = [wa[0:n, 0:n].sum() for n in indices] 

741 ax = plt.subplot(111) 

742 ax.plot(indices, sums/sums[0], 'o', color='b') 

743 ax.set_yscale('log') 

744 ax.set_xlim(indices[0]-0.5, indices[-1]+0.5) 

745 ax.set_ylim(None, 1.2) 

746 ax.set_ylabel(r'$[\sum_{|i|<n\ &\ |j|<n} a_{ij}] / |a_{00}|$', fontsize='x-large') 

747 ax.set_xlabel('n', fontsize='x-large') 

748 ax.tick_params(axis='both', labelsize='x-large') 

749 plt.tight_layout() 

750 pdfPages.savefig(fig) 

751 

752 return 

753 

754 @staticmethod 

755 def plotRelativeBiasACoeffs(aDict, aDictNoB, fullCovsModel, fullCovsModelNoB, signalElectrons, 

756 gainDict, pdfPages, maxr=None): 

757 """Fig. 15 in Astier+19. 

758 

759 Illustrates systematic bias from estimating 'a' 

760 coefficients from the slope of correlations as opposed to the 

761 full model in Astier+19. 

762 

763 Parameters 

764 ---------- 

765 aDict : `dict` 

766 Dictionary of 'a' matrices (Eq. 20, Astier+19), with amp 

767 names as keys. 

768 

769 aDictNoB : `dict` 

770 Dictionary of 'a' matrices ('b'= 0 in Eq. 20, Astier+19), 

771 with amp names as keys. 

772 

773 fullCovsModel : `dict` [`str`, `list`] 

774 Dictionary keyed by amp names containing covariances model 

775 per mean flux. 

776 

777 fullCovsModelNoB : `dict` [`str`, `list`] 

778 Dictionary keyed by amp names containing covariances model 

779 (with 'b'=0 in Eq. 20 of Astier+19) per mean flux. 

780 

781 signalElectrons : `float` 

782 Signal at which to evaluate the a_ij coefficients. 

783 

784 pdfPages : `matplotlib.backends.backend_pdf.PdfPages` 

785 PDF file where the plots will be saved. 

786 

787 gainDict : `dict` [`str`, `float`] 

788 Dicgionary keyed by amp names with the gains in e-/ADU. 

789 

790 maxr : `int`, optional 

791 Maximum lag. 

792 """ 

793 fig = plt.figure(figsize=(7, 11)) 

794 title = [f"'a' relative bias at {signalElectrons} e", "'a' relative bias (b=0)"] 

795 data = [(aDict, fullCovsModel), (aDictNoB, fullCovsModelNoB)] 

796 

797 for k, pair in enumerate(data): 

798 diffs = [] 

799 amean = [] 

800 for amp in pair[0]: 

801 covModel = np.array(pair[1][amp]) 

802 if np.isnan(covModel).all(): 

803 continue 

804 # Compute the "a" coefficients of the Antilogus+14 

805 # (1402.0725) model as in Guyonnet+15 (1501.01577, 

806 # eq. 16, the slope of cov/var at a given flux mu in 

807 # electrons). Eq. 16 of 1501.01577 is an approximation 

808 # to the more complete model in Astier+19 

809 # (1905.08677). 

810 var = covModel[0, 0, 0] # ADU^2 

811 # For a result in electrons^-1, we have to use mu in electrons 

812 aOld = covModel[0, :, :]/(var*signalElectrons) 

813 a = pair[0][amp] 

814 amean.append(a) 

815 diffs.append((aOld-a)) 

816 amean = np.array(amean).mean(axis=0) 

817 diff = np.array(diffs).mean(axis=0) 

818 diff = diff/amean 

819 diff = diff[:] 

820 # The difference should be close to zero 

821 diff[0, 0] = 0 

822 if maxr is None: 

823 maxr = diff.shape[0] 

824 diff = diff[:maxr, :maxr] 

825 ax0 = fig.add_subplot(2, 1, k+1) 

826 im0 = ax0.imshow(diff.transpose(), origin='lower') 

827 ax0.yaxis.set_major_locator(MaxNLocator(integer=True)) 

828 ax0.xaxis.set_major_locator(MaxNLocator(integer=True)) 

829 ax0.tick_params(axis='both', labelsize='x-large') 

830 plt.colorbar(im0) 

831 ax0.set_title(title[k]) 

832 

833 plt.tight_layout() 

834 pdfPages.savefig(fig) 

835 

836 return 

837 

838 def _plotStandardPtc(self, dataset, ptcFitType, pdfPages): 

839 """Plot PTC, var/signal vs signal, linearity, and linearity residual 

840 per amplifier. 

841 

842 Parameters 

843 ---------- 

844 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

845 The dataset containing the means, variances, exposure 

846 times, and mask. 

847 

848 ptcFitType : `str` 

849 Type of the model fit to the PTC. Options: 

850 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'. 

851 

852 pdfPages : `matplotlib.backends.backend_pdf.PdfPages` 

853 PDF file where the plots will be saved. 

854 """ 

855 if ptcFitType == 'EXPAPPROXIMATION': 

856 ptcFunc = funcAstier 

857 stringTitle = (r"Var = $\frac{1}{2g^2a_{00}}(\exp (2a_{00} \mu g) - 1) + \frac{n_{00}}{g^2}$ ") 

858 elif ptcFitType == 'POLYNOMIAL': 

859 ptcFunc = funcPolynomial 

860 for key in dataset.ptcFitPars: 

861 deg = len(dataset.ptcFitPars[key]) - 1 

862 break 

863 stringTitle = r"Polynomial (degree: %g)" % (deg) 

864 else: 

865 raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n" 

866 "Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.") 

867 

868 legendFontSize = 6.5 

869 labelFontSize = 8 

870 titleFontSize = 9 

871 supTitleFontSize = 18 

872 markerSize = 25 

873 

874 # General determination of the size of the plot grid 

875 nAmps = len(dataset.ampNames) 

876 if nAmps == 2: 

877 nRows, nCols = 2, 1 

878 nRows = np.sqrt(nAmps) 

879 mantissa, _ = np.modf(nRows) 

880 if mantissa > 0: 

881 nRows = int(nRows) + 1 

882 nCols = nRows 

883 else: 

884 nRows = int(nRows) 

885 nCols = nRows 

886 

887 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

888 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

889 f3, ax3 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

890 

891 for i, (amp, a, a2, a3) in enumerate(zip(dataset.ampNames, ax.flatten(), ax2.flatten(), 

892 ax3.flatten())): 

893 meanVecOriginal = np.ravel(np.array(dataset.rawMeans[amp])) 

894 varVecOriginal = np.ravel(np.array(dataset.rawVars[amp])) 

895 mask = np.ravel(np.array(dataset.expIdMask[amp])) 

896 if np.sum(mask) == 0: # The whole amp is bad 

897 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

898 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

899 a3.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

900 continue 

901 else: 

902 mask = mask.astype(bool) 

903 meanVecFinal = meanVecOriginal[mask] 

904 varVecFinal = varVecOriginal[mask] 

905 meanVecOutliers = meanVecOriginal[np.invert(mask)] 

906 varVecOutliers = varVecOriginal[np.invert(mask)] 

907 pars, parsErr = np.array(dataset.ptcFitPars[amp]), np.array(dataset.ptcFitParsError[amp]) 

908 ptcRedChi2 = dataset.ptcFitChiSq[amp] 

909 if ptcFitType == 'EXPAPPROXIMATION': 

910 if len(meanVecFinal): 

911 ptcA00, ptcA00error = pars[0], parsErr[0] 

912 ptcGain, ptcGainError = pars[1], parsErr[1] 

913 ptcNoise = np.sqrt((pars[2])) # pars[2] is in (e-)^2 

914 ptcNoiseAdu = ptcNoise*(1./ptcGain) 

915 ptcNoiseError = 0.5*(parsErr[2]/np.fabs(pars[2]))*np.sqrt(np.fabs(pars[2])) 

916 stringLegend = (f"a00: {ptcA00:.2e}+/-{ptcA00error:.2e} 1/e" 

917 f"\nGain: {ptcGain:.4}+/-{ptcGainError:.2e} e/ADU" 

918 f"\nNoise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e\n" 

919 r"$\chi^2_{\rm{red}}$: " + f"{ptcRedChi2:.4}" 

920 f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ") 

921 

922 if ptcFitType == 'POLYNOMIAL': 

923 if len(meanVecFinal): 

924 ptcGain, ptcGainError = 1./pars[1], np.fabs(1./pars[1])*(parsErr[1]/pars[1]) 

925 ptcNoiseAdu = np.sqrt((pars[0])) # pars[0] is in ADU^2 

926 ptcNoise = ptcNoiseAdu*ptcGain 

927 ptcNoiseError = (0.5*(parsErr[0]/np.fabs(pars[0]))*(np.sqrt(np.fabs(pars[0]))))*ptcGain 

928 stringLegend = (f"Gain: {ptcGain:.4}+/-{ptcGainError:.2e} e/ADU\n" 

929 f"Noise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e\n" 

930 r"$\chi^2_{\rm{red}}$: " + f"{ptcRedChi2:.4}" 

931 f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ") 

932 a.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

933 a.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize) 

934 a.tick_params(labelsize=11) 

935 a.set_xscale('linear') 

936 a.set_yscale('linear') 

937 

938 a2.set_xlabel(r'Mean Signal ($\mu$, ADU)', fontsize=labelFontSize) 

939 a2.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize) 

940 a2.tick_params(labelsize=11) 

941 a2.set_xscale('log') 

942 a2.set_yscale('log') 

943 

944 a3.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

945 a3.set_ylabel(r'Variance/$\mu$ (ADU)', fontsize=labelFontSize) 

946 a3.tick_params(labelsize=11) 

947 a3.set_xscale('log') 

948 a3.set_yscale('linear') 

949 minMeanVecFinal = np.nanmin(meanVecFinal) 

950 maxMeanVecFinal = np.nanmax(meanVecFinal) 

951 meanVecFit = np.linspace(minMeanVecFinal, maxMeanVecFinal, 100*len(meanVecFinal)) 

952 minMeanVecOriginal = np.nanmin(meanVecOriginal) 

953 maxMeanVecOriginal = np.nanmax(meanVecOriginal) 

954 deltaXlim = maxMeanVecOriginal - minMeanVecOriginal 

955 a.plot(meanVecFit, ptcFunc(pars, meanVecFit), color='red') 

956 a.plot(meanVecFinal, ptcNoiseAdu**2 + (1./ptcGain)*meanVecFinal, color='green', 

957 linestyle='--') 

958 a.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize) 

959 a.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize) 

960 a.text(0.03, 0.66, stringLegend, transform=a.transAxes, fontsize=legendFontSize) 

961 a.set_title(amp, fontsize=titleFontSize) 

962 a.set_xlim([minMeanVecOriginal - 0.2*deltaXlim, maxMeanVecOriginal + 0.2*deltaXlim]) 

963 

964 # Same, but in log-scale 

965 a2.plot(meanVecFit, ptcFunc(pars, meanVecFit), color='red') 

966 a2.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize) 

967 a2.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize) 

968 a2.text(0.03, 0.66, stringLegend, transform=a2.transAxes, fontsize=legendFontSize) 

969 a2.set_title(amp, fontsize=titleFontSize) 

970 a2.set_xlim([minMeanVecOriginal, maxMeanVecOriginal]) 

971 

972 # Var/mu vs mu 

973 a3.plot(meanVecFit, ptcFunc(pars, meanVecFit)/meanVecFit, color='red') 

974 a3.scatter(meanVecFinal, varVecFinal/meanVecFinal, c='blue', marker='o', s=markerSize) 

975 a3.scatter(meanVecOutliers, varVecOutliers/meanVecOutliers, c='magenta', marker='s', 

976 s=markerSize) 

977 a3.text(0.05, 0.1, stringLegend, transform=a3.transAxes, fontsize=legendFontSize) 

978 a3.set_title(amp, fontsize=titleFontSize) 

979 a3.set_xlim([minMeanVecOriginal - 0.2*deltaXlim, maxMeanVecOriginal + 0.2*deltaXlim]) 

980 f.suptitle("PTC \n Fit: " + stringTitle, fontsize=supTitleFontSize) 

981 pdfPages.savefig(f) 

982 f2.suptitle("PTC (log-log)", fontsize=supTitleFontSize) 

983 pdfPages.savefig(f2) 

984 f3.suptitle(r"Var/$\mu$", fontsize=supTitleFontSize) 

985 pdfPages.savefig(f3) 

986 

987 return 

988 

989 def _plotLinearizer(self, dataset, linearizer, pdfPages): 

990 """Plot linearity and linearity residual per amplifier 

991 

992 Parameters 

993 ---------- 

994 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

995 The dataset containing the means, variances, exposure 

996 times, and mask. 

997 

998 linearizer : `lsst.ip.isr.Linearizer` 

999 Linearizer object 

1000 """ 

1001 legendFontSize = 7 

1002 labelFontSize = 7 

1003 titleFontSize = 9 

1004 supTitleFontSize = 18 

1005 

1006 # General determination of the size of the plot grid 

1007 nAmps = len(dataset.ampNames) 

1008 if nAmps == 2: 

1009 nRows, nCols = 2, 1 

1010 nRows = np.sqrt(nAmps) 

1011 mantissa, _ = np.modf(nRows) 

1012 if mantissa > 0: 

1013 nRows = int(nRows) + 1 

1014 nCols = nRows 

1015 else: 

1016 nRows = int(nRows) 

1017 nCols = nRows 

1018 

1019 # Plot mean vs time (f1), and fractional residuals (f2) 

1020 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

1021 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

1022 for i, (amp, a, a2) in enumerate(zip(dataset.ampNames, ax.flatten(), ax2.flatten())): 

1023 mask = dataset.expIdMask[amp] 

1024 if np.sum(mask) == 0: # Bad amp 

1025 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

1026 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

1027 continue 

1028 else: 

1029 mask = mask.astype(bool) 

1030 meanVecFinal = np.array(dataset.rawMeans[amp])[mask] 

1031 timeVecFinal = np.array(dataset.rawExpTimes[amp])[mask] 

1032 

1033 a.set_xlabel('Time (sec)', fontsize=labelFontSize) 

1034 a.set_ylabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

1035 a.tick_params(labelsize=labelFontSize) 

1036 a.set_xscale('linear') 

1037 a.set_yscale('linear') 

1038 

1039 a2.axhline(y=0, color='k') 

1040 a2.axvline(x=0, color='k', linestyle='-') 

1041 a2.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize) 

1042 a2.set_ylabel('Fractional nonlinearity (%)', fontsize=labelFontSize) 

1043 a2.tick_params(labelsize=labelFontSize) 

1044 a2.set_xscale('linear') 

1045 a2.set_yscale('linear') 

1046 

1047 pars, parsErr = linearizer.fitParams[amp], linearizer.fitParamsErr[amp] 

1048 k0, k0Error = pars[0], parsErr[0] 

1049 k1, k1Error = pars[1], parsErr[1] 

1050 k2, k2Error = pars[2], parsErr[2] 

1051 linRedChi2 = linearizer.fitChiSq[amp] 

1052 stringLegend = (f"k0: {k0:.4}+/-{k0Error:.2e} ADU\nk1: {k1:.4}+/-{k1Error:.2e} ADU/t" 

1053 f"\nk2: {k2:.2e}+/-{k2Error:.2e} ADU/t^2\n" 

1054 r"$\chi^2_{\rm{red}}$: " + f"{linRedChi2:.4}") 

1055 a.scatter(timeVecFinal, meanVecFinal) 

1056 a.plot(timeVecFinal, funcPolynomial(pars, timeVecFinal), color='red') 

1057 a.text(0.03, 0.75, stringLegend, transform=a.transAxes, fontsize=legendFontSize) 

1058 a.set_title(f"{amp}", fontsize=titleFontSize) 

1059 

1060 linearPart = k0 + k1*timeVecFinal 

1061 fracLinRes = 100*(linearPart - meanVecFinal)/linearPart 

1062 a2.plot(meanVecFinal, fracLinRes, c='g') 

1063 a2.set_title(f"{amp}", fontsize=titleFontSize) 

1064 

1065 f.suptitle("Linearity \n Fit: Polynomial (degree: %g)" 

1066 % (len(pars)-1), 

1067 fontsize=supTitleFontSize) 

1068 f2.suptitle(r"Fractional NL residual" "\n" 

1069 r"$100\times \frac{(k_0 + k_1*Time-\mu)}{k_0+k_1*Time}$", 

1070 fontsize=supTitleFontSize) 

1071 pdfPages.savefig(f) 

1072 pdfPages.savefig(f2) 

1073 

1074 @staticmethod 

1075 def findGroups(x, maxDiff): 

1076 """Group data into bins, with at most maxDiff distance between bins. 

1077 

1078 Parameters 

1079 ---------- 

1080 x : `list` 

1081 Data to bin. 

1082 

1083 maxDiff : `int` 

1084 Maximum distance between bins. 

1085 

1086 Returns 

1087 ------- 

1088 index : `list` 

1089 Bin indices. 

1090 """ 

1091 ix = np.argsort(x) 

1092 xsort = np.sort(x) 

1093 index = np.zeros_like(x, dtype=np.int32) 

1094 xc = xsort[0] 

1095 group = 0 

1096 ng = 1 

1097 

1098 for i in range(1, len(ix)): 

1099 xval = xsort[i] 

1100 if (xval - xc < maxDiff): 

1101 xc = (ng*xc + xval)/(ng+1) 

1102 ng += 1 

1103 index[ix[i]] = group 

1104 else: 

1105 group += 1 

1106 ng = 1 

1107 index[ix[i]] = group 

1108 xc = xval 

1109 

1110 return index 

1111 

1112 @staticmethod 

1113 def indexForBins(x, nBins): 

1114 """Builds an index with regular binning. The result can be fed into 

1115 binData. 

1116 

1117 Parameters 

1118 ---------- 

1119 x : `numpy.array` 

1120 Data to bin. 

1121 nBins : `int` 

1122 Number of bin. 

1123 

1124 Returns 

1125 ------- 

1126 np.digitize(x, bins): `numpy.array` 

1127 Bin indices. 

1128 """ 

1129 bins = np.linspace(x.min(), x.max() + abs(x.max() * 1e-7), nBins + 1) 

1130 return np.digitize(x, bins) 

1131 

1132 @staticmethod 

1133 def binData(x, y, binIndex, wy=None): 

1134 """Bin data (usually for display purposes). 

1135 

1136 Parameters 

1137 ---------- 

1138 x : `numpy.array` 

1139 Data to bin. 

1140 

1141 y : `numpy.array` 

1142 Data to bin. 

1143 

1144 binIdex : `list` 

1145 Bin number of each datum. 

1146 

1147 wy : `numpy.array` 

1148 Inverse rms of each datum to use when averaging (the 

1149 actual weight is wy**2). 

1150 

1151 Returns 

1152 ------- 

1153 xbin : `numpy.array` 

1154 Binned data in x. 

1155 

1156 ybin : `numpy.array` 

1157 Binned data in y. 

1158 

1159 wybin : `numpy.array` 

1160 Binned weights in y, computed from wy's in each bin. 

1161 

1162 sybin : `numpy.array` 

1163 Uncertainty on the bin average, considering actual 

1164 scatter, and ignoring weights. 

1165 """ 

1166 if wy is None: 

1167 wy = np.ones_like(x) 

1168 binIndexSet = set(binIndex) 

1169 w2 = wy*wy 

1170 xw2 = x*(w2) 

1171 xbin = np.array([xw2[binIndex == i].sum()/w2[binIndex == i].sum() for i in binIndexSet]) 

1172 

1173 yw2 = y*w2 

1174 ybin = np.array([yw2[binIndex == i].sum()/w2[binIndex == i].sum() for i in binIndexSet]) 

1175 

1176 wybin = np.sqrt(np.array([w2[binIndex == i].sum() for i in binIndexSet])) 

1177 sybin = np.array([y[binIndex == i].std()/np.sqrt(np.array([binIndex == i]).sum()) 

1178 for i in binIndexSet]) 

1179 

1180 return xbin, ybin, wybin, sybin