Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22 

23__all__ = ['PlotPhotonTransferCurveTask'] 

24 

25import numpy as np 

26import matplotlib.pyplot as plt 

27import matplotlib as mpl 

28from matplotlib import gridspec 

29import os 

30from matplotlib.backends.backend_pdf import PdfPages 

31 

32import lsst.ip.isr as isr 

33import lsst.pex.config as pexConfig 

34import lsst.pipe.base as pipeBase 

35 

36from .utils import (funcAstier, funcPolynomial, NonexistentDatasetTaskDataIdContainer, 

37 calculateWeightedReducedChi2) 

38from matplotlib.ticker import MaxNLocator 

39 

40from .astierCovPtcFit import computeApproximateAcoeffs 

41from .astierCovPtcUtils import getFitDataFromCovariances 

42 

43from lsst.ip.isr import PhotonTransferCurveDataset 

44 

45 

46class PlotPhotonTransferCurveTaskConfig(pexConfig.Config): 

47 """Config class for photon transfer curve measurement task""" 

48 datasetFileName = pexConfig.Field( 

49 dtype=str, 

50 doc="datasetPtc file name (pkl)", 

51 default="", 

52 ) 

53 linearizerFileName = pexConfig.Field( 

54 dtype=str, 

55 doc="linearizer file name (fits)", 

56 default="", 

57 ) 

58 ccdKey = pexConfig.Field( 

59 dtype=str, 

60 doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'.", 

61 default='detector', 

62 ) 

63 signalElectronsRelativeA = pexConfig.Field( 

64 dtype=float, 

65 doc="Signal value for relative systematic bias between different methods of estimating a_ij " 

66 "(Fig. 15 of Astier+19).", 

67 default=75000, 

68 ) 

69 plotNormalizedCovariancesNumberOfBins = pexConfig.Field( 

70 dtype=int, 

71 doc="Number of bins in `plotNormalizedCovariancesNumber` function " 

72 "(Fig. 8, 10., of Astier+19).", 

73 default=10, 

74 ) 

75 

76 

77class PlotPhotonTransferCurveTask(pipeBase.CmdLineTask): 

78 """A class to plot the dataset from MeasurePhotonTransferCurveTask. 

79 

80 Parameters 

81 ---------- 

82 

83 *args: `list` 

84 Positional arguments passed to the Task constructor. None used at this 

85 time. 

86 **kwargs: `dict` 

87 Keyword arguments passed on to the Task constructor. None used at this 

88 time. 

89 

90 """ 

91 

92 ConfigClass = PlotPhotonTransferCurveTaskConfig 

93 _DefaultName = "plotPhotonTransferCurve" 

94 

95 def __init__(self, *args, **kwargs): 

96 pipeBase.CmdLineTask.__init__(self, *args, **kwargs) 

97 plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too 

98 self.config.validate() 

99 self.config.freeze() 

100 

101 @classmethod 

102 def _makeArgumentParser(cls): 

103 """Augment argument parser for the MeasurePhotonTransferCurveTask.""" 

104 parser = pipeBase.ArgumentParser(name=cls._DefaultName) 

105 parser.add_id_argument("--id", datasetType="photonTransferCurveDataset", 

106 ContainerClass=NonexistentDatasetTaskDataIdContainer, 

107 help="The ccds to use, e.g. --id ccd=0..100") 

108 return parser 

109 

110 @pipeBase.timeMethod 

111 def runDataRef(self, dataRef): 

112 """Run the Photon Transfer Curve (PTC) plotting measurement task. 

113 

114 Parameters 

115 ---------- 

116 dataRef : list of lsst.daf.persistence.ButlerDataRef 

117 dataRef for the detector for the expIds to be fit. 

118 """ 

119 

120 datasetFile = self.config.datasetFileName 

121 datasetPtc = PhotonTransferCurveDataset.readFits(datasetFile) 

122 

123 dirname = dataRef.getUri(datasetType='cpPipePlotRoot', write=True) 

124 if not os.path.exists(dirname): 

125 os.makedirs(dirname) 

126 

127 detNum = dataRef.dataId[self.config.ccdKey] 

128 filename = f"PTC_det{detNum}.pdf" 

129 filenameFull = os.path.join(dirname, filename) 

130 

131 if self.config.linearizerFileName: 

132 linearizer = isr.linearize.Linearizer.readFits(self.config.linearizerFileName) 

133 else: 

134 linearizer = None 

135 self.run(filenameFull, datasetPtc, linearizer=linearizer, log=self.log) 

136 

137 return pipeBase.Struct(exitStatus=0) 

138 

139 def run(self, filenameFull, datasetPtc, linearizer=None, log=None): 

140 """Make the plots for the PTC task""" 

141 ptcFitType = datasetPtc.ptcFitType 

142 with PdfPages(filenameFull) as pdfPages: 

143 if ptcFitType in ["FULLCOVARIANCE", ]: 

144 self.covAstierMakeAllPlots(datasetPtc, pdfPages, log=log) 

145 elif ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]: 

146 self._plotStandardPtc(datasetPtc, ptcFitType, pdfPages) 

147 else: 

148 raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n" + 

149 "Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.") 

150 if linearizer: 

151 self._plotLinearizer(datasetPtc, linearizer, pdfPages) 

152 

153 return 

154 

155 def covAstierMakeAllPlots(self, dataset, pdfPages, 

156 log=None): 

157 """Make plots for MeasurePhotonTransferCurve task when doCovariancesAstier=True. 

158 

159 This function call other functions that mostly reproduce the plots in Astier+19. 

160 Most of the code is ported from Pierre Astier's repository https://github.com/PierreAstier/bfptc 

161 

162 Parameters 

163 ---------- 

164 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

165 The dataset containing the necessary information to produce the plots. 

166 

167 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

168 PDF file where the plots will be saved. 

169 

170 log : `lsst.log.Log`, optional 

171 Logger to handle messages 

172 """ 

173 mu = dataset.rawMeans 

174 # dictionaries with ampNames as keys 

175 fullCovs = dataset.covariances 

176 fullCovsModel = dataset.covariancesModel 

177 fullCovWeights = dataset.covariancesSqrtWeights 

178 aDict = dataset.aMatrix 

179 bDict = dataset.bMatrix 

180 fullCovsNoB = dataset.covariancesNoB 

181 fullCovsModelNoB = dataset.covariancesModelNoB 

182 fullCovWeightsNoB = dataset.covariancesSqrtWeightsNoB 

183 aDictNoB = dataset.aMatrixNoB 

184 gainDict = dataset.gain 

185 noiseDict = dataset.noise 

186 

187 self.plotCovariances(mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, fullCovsModelNoB, 

188 fullCovWeightsNoB, gainDict, noiseDict, aDict, bDict, pdfPages) 

189 self.plotNormalizedCovariances(0, 0, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, 

190 fullCovsModelNoB, fullCovWeightsNoB, pdfPages, offset=0.01, 

191 topPlot=True, 

192 numberOfBins=self.config.plotNormalizedCovariancesNumberOfBins, 

193 log=log) 

194 self.plotNormalizedCovariances(0, 1, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, 

195 fullCovsModelNoB, fullCovWeightsNoB, pdfPages, 

196 numberOfBins=self.config.plotNormalizedCovariancesNumberOfBins, 

197 log=log) 

198 self.plotNormalizedCovariances(1, 0, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, 

199 fullCovsModelNoB, fullCovWeightsNoB, pdfPages, 

200 numberOfBins=self.config.plotNormalizedCovariancesNumberOfBins, 

201 log=log) 

202 self.plot_a_b(aDict, bDict, pdfPages) 

203 self.ab_vs_dist(aDict, bDict, pdfPages, bRange=4) 

204 self.plotAcoeffsSum(aDict, bDict, pdfPages) 

205 self.plotRelativeBiasACoeffs(aDict, aDictNoB, fullCovsModel, fullCovsModelNoB, 

206 self.config.signalElectronsRelativeA, gainDict, pdfPages, maxr=4) 

207 

208 return 

209 

210 @staticmethod 

211 def plotCovariances(mu, covs, covsModel, covsWeights, covsNoB, covsModelNoB, covsWeightsNoB, 

212 gainDict, noiseDict, aDict, bDict, pdfPages): 

213 """Plot covariances and models: Cov00, Cov10, Cov01. 

214 

215 Figs. 6 and 7 of Astier+19 

216 

217 Parameters 

218 ---------- 

219 mu : `dict`, [`str`, `list`] 

220 Dictionary keyed by amp name with mean signal values. 

221 

222 covs : `dict`, [`str`, `list`] 

223 Dictionary keyed by amp names containing a list of measued covariances per mean flux. 

224 

225 covsModel : `dict`, [`str`, `list`] 

226 Dictionary keyed by amp names containinging covariances model (Eq. 20 of Astier+19) per mean flux. 

227 

228 covsWeights : `dict`, [`str`, `list`] 

229 Dictionary keyed by amp names containinging sqrt. of covariances weights. 

230 

231 covsNoB : `dict`, [`str`, `list`] 

232 Dictionary keyed by amp names containing a list of measued covariances per mean flux ('b'=0 in 

233 Astier+19). 

234 

235 covsModelNoB : `dict`, [`str`, `list`] 

236 Dictionary keyed by amp names containing covariances model (with 'b'=0 in Eq. 20 of Astier+19) 

237 per mean flux. 

238 

239 covsWeightsNoB : `dict`, [`str`, `list`] 

240 Dictionary keyed by amp names containing sqrt. of covariances weights ('b' = 0 in Eq. 20 of 

241 Astier+19). 

242 

243 gainDict : `dict`, [`str`, `float`] 

244 Dictionary keyed by amp names containing the gains in e-/ADU. 

245 

246 noiseDict : `dict`, [`str`, `float`] 

247 Dictionary keyed by amp names containing the rms redout noise in e-. 

248 

249 aDict : `dict`, [`str`, `numpy.array`] 

250 Dictionary keyed by amp names containing 'a' coefficients (Eq. 20 of Astier+19). 

251 

252 bDict : `dict`, [`str`, `numpy.array`] 

253 Dictionary keyed by amp names containing 'b' coefficients (Eq. 20 of Astier+19). 

254 

255 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

256 PDF file where the plots will be saved. 

257 """ 

258 

259 legendFontSize = 7 

260 labelFontSize = 7 

261 titleFontSize = 9 

262 supTitleFontSize = 18 

263 markerSize = 25 

264 

265 nAmps = len(covs) 

266 if nAmps == 2: 

267 nRows, nCols = 2, 1 

268 nRows = np.sqrt(nAmps) 

269 mantissa, _ = np.modf(nRows) 

270 if mantissa > 0: 

271 nRows = int(nRows) + 1 

272 nCols = nRows 

273 else: 

274 nRows = int(nRows) 

275 nCols = nRows 

276 

277 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

278 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

279 fResCov00, axResCov00 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', 

280 figsize=(13, 10)) 

281 fCov01, axCov01 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

282 fCov10, axCov10 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

283 

284 assert(len(covsModel) == nAmps) 

285 assert(len(covsWeights) == nAmps) 

286 

287 assert(len(covsNoB) == nAmps) 

288 assert(len(covsModelNoB) == nAmps) 

289 assert(len(covsWeightsNoB) == nAmps) 

290 

291 for i, (amp, a, a2, aResVar, a3, a4) in enumerate(zip(covs, ax.flatten(), 

292 ax2.flatten(), axResCov00.flatten(), 

293 axCov01.flatten(), axCov10.flatten())): 

294 

295 muAmp, cov, model, weight = mu[amp], covs[amp], covsModel[amp], covsWeights[amp] 

296 if not np.isnan(np.array(cov)).all(): # If all the entries ara np.nan, this is a bad amp. 

297 aCoeffs, bCoeffs = np.array(aDict[amp]), np.array(bDict[amp]) 

298 gain, noise = gainDict[amp], noiseDict[amp] 

299 (meanVecOriginal, varVecOriginal, varVecModelOriginal, 

300 weightsOriginal, varMask) = getFitDataFromCovariances(0, 0, muAmp, cov, model, weight) 

301 meanVecFinal, varVecFinal = meanVecOriginal[varMask], varVecOriginal[varMask] 

302 varVecModelFinal = varVecModelOriginal[varMask] 

303 meanVecOutliers = meanVecOriginal[np.invert(varMask)] 

304 varVecOutliers = varVecOriginal[np.invert(varMask)] 

305 varWeightsFinal = weightsOriginal[varMask] 

306 # Get weighted reduced chi2 

307 chi2FullModelVar = calculateWeightedReducedChi2(varVecFinal, varVecModelFinal, 

308 varWeightsFinal, len(meanVecFinal), 4) 

309 

310 (meanVecOrigCov01, varVecOrigCov01, varVecModelOrigCov01, 

311 _, maskCov01) = getFitDataFromCovariances(0, 0, muAmp, cov, model, weight) 

312 meanVecFinalCov01, varVecFinalCov01 = meanVecOrigCov01[maskCov01], varVecOrigCov01[maskCov01] 

313 varVecModelFinalCov01 = varVecModelOrigCov01[maskCov01] 

314 meanVecOutliersCov01 = meanVecOrigCov01[np.invert(maskCov01)] 

315 varVecOutliersCov01 = varVecOrigCov01[np.invert(maskCov01)] 

316 

317 (meanVecOrigCov10, varVecOrigCov10, varVecModelOrigCov10, 

318 _, maskCov10) = getFitDataFromCovariances(1, 0, muAmp, cov, model, weight) 

319 meanVecFinalCov10, varVecFinalCov10 = meanVecOrigCov10[maskCov10], varVecOrigCov10[maskCov10] 

320 varVecModelFinalCov10 = varVecModelOrigCov10[maskCov10] 

321 meanVecOutliersCov10 = meanVecOrigCov10[np.invert(maskCov10)] 

322 varVecOutliersCov10 = varVecOrigCov10[np.invert(maskCov10)] 

323 

324 # cuadratic fit for residuals below 

325 par2 = np.polyfit(meanVecFinal, varVecFinal, 2, w=varWeightsFinal) 

326 varModelFinalQuadratic = np.polyval(par2, meanVecFinal) 

327 chi2QuadModelVar = calculateWeightedReducedChi2(varVecFinal, varModelFinalQuadratic, 

328 varWeightsFinal, len(meanVecFinal), 3) 

329 

330 # fit with no 'b' coefficient (c = a*b in Eq. 20 of Astier+19) 

331 covNoB, modelNoB, weightNoB = covsNoB[amp], covsModelNoB[amp], covsWeightsNoB[amp] 

332 (meanVecFinalNoB, varVecFinalNoB, varVecModelFinalNoB, 

333 varWeightsFinalNoB, maskNoB) = getFitDataFromCovariances(0, 0, muAmp, covNoB, modelNoB, 

334 weightNoB, returnMasked=True) 

335 chi2FullModelNoBVar = calculateWeightedReducedChi2(varVecFinalNoB, varVecModelFinalNoB, 

336 varWeightsFinalNoB, len(meanVecFinalNoB), 

337 3) 

338 stringLegend = (f"Gain: {gain:.4} e/DN \n" + 

339 f"Noise: {noise:.4} e \n" + 

340 r"$a_{00}$: %.3e 1/e"%aCoeffs[0, 0] + 

341 "\n" + r"$b_{00}$: %.3e 1/e"%bCoeffs[0, 0]) 

342 minMeanVecFinal = np.min(meanVecFinal) 

343 maxMeanVecFinal = np.max(meanVecFinal) 

344 deltaXlim = maxMeanVecFinal - minMeanVecFinal 

345 

346 a.set_xlabel(r'Mean signal ($\mu$, DN)', fontsize=labelFontSize) 

347 a.set_ylabel(r'Variance (DN$^2$)', fontsize=labelFontSize) 

348 a.tick_params(labelsize=11) 

349 a.set_xscale('linear', fontsize=labelFontSize) 

350 a.set_yscale('linear', fontsize=labelFontSize) 

351 a.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize) 

352 a.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize) 

353 a.plot(meanVecFinal, varVecModelFinal, color='red', lineStyle='-') 

354 a.text(0.03, 0.7, stringLegend, transform=a.transAxes, fontsize=legendFontSize) 

355 a.set_title(amp, fontsize=titleFontSize) 

356 a.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim]) 

357 

358 # Same as above, but in log-scale 

359 a2.set_xlabel(r'Mean Signal ($\mu$, DN)', fontsize=labelFontSize) 

360 a2.set_ylabel(r'Variance (DN$^2$)', fontsize=labelFontSize) 

361 a2.tick_params(labelsize=11) 

362 a2.set_xscale('log') 

363 a2.set_yscale('log') 

364 a2.plot(meanVecFinal, varVecModelFinal, color='red', lineStyle='-') 

365 a2.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize) 

366 a2.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize) 

367 a2.text(0.03, 0.7, stringLegend, transform=a2.transAxes, fontsize=legendFontSize) 

368 a2.set_title(amp, fontsize=titleFontSize) 

369 a2.set_xlim([minMeanVecFinal, maxMeanVecFinal]) 

370 

371 # Residuals var - model 

372 aResVar.set_xlabel(r'Mean signal ($\mu$, DN)', fontsize=labelFontSize) 

373 aResVar.set_ylabel(r'Residuals (DN$^2$)', fontsize=labelFontSize) 

374 aResVar.tick_params(labelsize=11) 

375 aResVar.set_xscale('linear', fontsize=labelFontSize) 

376 aResVar.set_yscale('linear', fontsize=labelFontSize) 

377 aResVar.plot(meanVecFinal, varVecFinal - varVecModelFinal, color='blue', lineStyle='-', 

378 label=r'Full fit ($\chi_{\rm{red}}^2$: %g)'%chi2FullModelVar) 

379 aResVar.plot(meanVecFinal, varVecFinal - varModelFinalQuadratic, color='red', lineStyle='-', 

380 label=r'Quadratic fit ($\chi_{\rm{red}}^2$: %g)'%chi2QuadModelVar) 

381 aResVar.plot(meanVecFinalNoB, varVecFinalNoB - varVecModelFinalNoB, color='green', 

382 lineStyle='-', 

383 label=r'Full fit (b=0) ($\chi_{\rm{red}}^2$: %g)'%chi2FullModelNoBVar) 

384 aResVar.axhline(color='black') 

385 aResVar.set_title(amp, fontsize=titleFontSize) 

386 aResVar.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim]) 

387 aResVar.legend(fontsize=7) 

388 

389 a3.set_xlabel(r'Mean signal ($\mu$, DN)', fontsize=labelFontSize) 

390 a3.set_ylabel(r'Cov01 (DN$^2$)', fontsize=labelFontSize) 

391 a3.tick_params(labelsize=11) 

392 a3.set_xscale('linear', fontsize=labelFontSize) 

393 a3.set_yscale('linear', fontsize=labelFontSize) 

394 a3.scatter(meanVecFinalCov01, varVecFinalCov01, c='blue', marker='o', s=markerSize) 

395 a3.scatter(meanVecOutliersCov01, varVecOutliersCov01, c='magenta', marker='s', s=markerSize) 

396 a3.plot(meanVecFinalCov01, varVecModelFinalCov01, color='red', lineStyle='-') 

397 a3.set_title(amp, fontsize=titleFontSize) 

398 a3.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim]) 

399 

400 a4.set_xlabel(r'Mean signal ($\mu$, DN)', fontsize=labelFontSize) 

401 a4.set_ylabel(r'Cov10 (DN$^2$)', fontsize=labelFontSize) 

402 a4.tick_params(labelsize=11) 

403 a4.set_xscale('linear', fontsize=labelFontSize) 

404 a4.set_yscale('linear', fontsize=labelFontSize) 

405 a4.scatter(meanVecFinalCov10, varVecFinalCov10, c='blue', marker='o', s=markerSize) 

406 a4.scatter(meanVecOutliersCov10, varVecOutliersCov10, c='magenta', marker='s', s=markerSize) 

407 a4.plot(meanVecFinalCov10, varVecModelFinalCov10, color='red', lineStyle='-') 

408 a4.set_title(amp, fontsize=titleFontSize) 

409 a4.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim]) 

410 

411 else: 

412 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

413 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

414 a3.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

415 a4.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

416 

417 f.suptitle("PTC from covariances as in Astier+19 \n Fit: Eq. 20, Astier+19", 

418 fontsize=supTitleFontSize) 

419 pdfPages.savefig(f) 

420 f2.suptitle("PTC from covariances as in Astier+19 (log-log) \n Fit: Eq. 20, Astier+19", 

421 fontsize=supTitleFontSize) 

422 pdfPages.savefig(f2) 

423 fResCov00.suptitle("Residuals (data-model) for Cov00 (Var)", fontsize=supTitleFontSize) 

424 pdfPages.savefig(fResCov00) 

425 fCov01.suptitle("Cov01 as in Astier+19 (nearest parallel neighbor covariance) \n" + 

426 " Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize) 

427 pdfPages.savefig(fCov01) 

428 fCov10.suptitle("Cov10 as in Astier+19 (nearest serial neighbor covariance) \n" + 

429 "Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize) 

430 pdfPages.savefig(fCov10) 

431 

432 return 

433 

434 def plotNormalizedCovariances(self, i, j, inputMu, covs, covsModel, covsWeights, covsNoB, covsModelNoB, 

435 covsWeightsNoB, pdfPages, offset=0.004, 

436 numberOfBins=10, plotData=True, topPlot=False, log=None): 

437 """Plot C_ij/mu vs mu. 

438 

439 Figs. 8, 10, and 11 of Astier+19 

440 

441 Parameters 

442 ---------- 

443 i : `int` 

444 Covariane lag 

445 

446 j : `int` 

447 Covariance lag 

448 

449 inputMu : `dict`, [`str`, `list`] 

450 Dictionary keyed by amp name with mean signal values. 

451 

452 covs : `dict`, [`str`, `list`] 

453 Dictionary keyed by amp names containing a list of measued covariances per mean flux. 

454 

455 covsModel : `dict`, [`str`, `list`] 

456 Dictionary keyed by amp names containinging covariances model (Eq. 20 of Astier+19) per mean flux. 

457 

458 covsWeights : `dict`, [`str`, `list`] 

459 Dictionary keyed by amp names containinging sqrt. of covariances weights. 

460 

461 covsNoB : `dict`, [`str`, `list`] 

462 Dictionary keyed by amp names containing a list of measued covariances per mean flux ('b'=0 in 

463 Astier+19). 

464 

465 covsModelNoB : `dict`, [`str`, `list`] 

466 Dictionary keyed by amp names containing covariances model (with 'b'=0 in Eq. 20 of Astier+19) 

467 per mean flux. 

468 

469 covsWeightsNoB : `dict`, [`str`, `list`] 

470 Dictionary keyed by amp names containing sqrt. of covariances weights ('b' = 0 in Eq. 20 of 

471 Astier+19). 

472 

473 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

474 PDF file where the plots will be saved. 

475 

476 offset : `float`, optional 

477 Constant offset factor to plot covariances in same panel (so they don't overlap). 

478 

479 numberOfBins : `int`, optional 

480 Number of bins for top and bottom plot. 

481 

482 plotData : `bool`, optional 

483 Plot the data points? 

484 

485 topPlot : `bool`, optional 

486 Plot the top plot with the covariances, and the bottom plot with the model residuals? 

487 

488 log : `lsst.log.Log`, optional 

489 Logger to handle messages. 

490 """ 

491 if (not topPlot): 

492 fig = plt.figure(figsize=(8, 10)) 

493 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1]) 

494 gs.update(hspace=0) 

495 ax0 = plt.subplot(gs[0]) 

496 plt.setp(ax0.get_xticklabels(), visible=False) 

497 else: 

498 fig = plt.figure(figsize=(8, 8)) 

499 ax0 = plt.subplot(111) 

500 ax0.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) 

501 ax0.tick_params(axis='both', labelsize='x-large') 

502 mue, rese, wce = [], [], [] 

503 mueNoB, reseNoB, wceNoB = [], [], [] 

504 for counter, amp in enumerate(covs): 

505 muAmp, fullCov, fullCovModel, fullCovWeight = (inputMu[amp], covs[amp], covsModel[amp], 

506 covsWeights[amp]) 

507 if len(fullCov) == 0: 

508 continue 

509 mu, cov, model, weightCov, _ = getFitDataFromCovariances(i, j, muAmp, fullCov, fullCovModel, 

510 fullCovWeight, divideByMu=True, 

511 returnMasked=True) 

512 mue += list(mu) 

513 rese += list(cov - model) 

514 wce += list(weightCov) 

515 

516 fullCovNoB, fullCovModelNoB, fullCovWeightNoB = (covsNoB[amp], covsModelNoB[amp], 

517 covsWeightsNoB[amp]) 

518 if len(fullCovNoB) == 0: 

519 continue 

520 (muNoB, covNoB, modelNoB, 

521 weightCovNoB, _) = getFitDataFromCovariances(i, j, muAmp, fullCovNoB, fullCovModelNoB, 

522 fullCovWeightNoB, divideByMu=True, 

523 returnMasked=True) 

524 mueNoB += list(muNoB) 

525 reseNoB += list(covNoB - modelNoB) 

526 wceNoB += list(weightCovNoB) 

527 

528 # the corresponding fit 

529 fit_curve, = plt.plot(mu, model + counter*offset, '-', linewidth=4.0) 

530 # bin plot. len(mu) = no binning 

531 gind = self.indexForBins(mu, numberOfBins) 

532 

533 xb, yb, wyb, sigyb = self.binData(mu, cov, gind, weightCov) 

534 plt.errorbar(xb, yb+counter*offset, yerr=sigyb, marker='o', linestyle='none', markersize=6.5, 

535 color=fit_curve.get_color(), label=f"{amp} (N: {len(mu)})") 

536 # plot the data 

537 if plotData: 

538 points, = plt.plot(mu, cov + counter*offset, '.', color=fit_curve.get_color()) 

539 plt.legend(loc='upper right', fontsize=8) 

540 # end loop on amps 

541 mue = np.array(mue) 

542 rese = np.array(rese) 

543 wce = np.array(wce) 

544 mueNoB = np.array(mueNoB) 

545 reseNoB = np.array(reseNoB) 

546 wceNoB = np.array(wceNoB) 

547 

548 plt.xlabel(r"$\mu (el)$", fontsize='x-large') 

549 plt.ylabel(r"$Cov{%d%d}/\mu + Cst (el)$"%(i, j), fontsize='x-large') 

550 if (not topPlot): 

551 gind = self.indexForBins(mue, numberOfBins) 

552 xb, yb, wyb, sigyb = self.binData(mue, rese, gind, wce) 

553 

554 ax1 = plt.subplot(gs[1], sharex=ax0) 

555 ax1.errorbar(xb, yb, yerr=sigyb, marker='o', linestyle='none', label='Full fit') 

556 gindNoB = self.indexForBins(mueNoB, numberOfBins) 

557 xb2, yb2, wyb2, sigyb2 = self.binData(mueNoB, reseNoB, gindNoB, wceNoB) 

558 

559 ax1.errorbar(xb2, yb2, yerr=sigyb2, marker='o', linestyle='none', label='b = 0') 

560 ax1.tick_params(axis='both', labelsize='x-large') 

561 plt.legend(loc='upper left', fontsize='large') 

562 # horizontal line at zero 

563 plt.plot(xb, [0]*len(xb), '--', color='k') 

564 plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) 

565 plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) 

566 plt.xlabel(r'$\mu (el)$', fontsize='x-large') 

567 plt.ylabel(r'$Cov{%d%d}/\mu$ -model (el)'%(i, j), fontsize='x-large') 

568 plt.tight_layout() 

569 plt.suptitle(f"Nbins: {numberOfBins}") 

570 # overlapping y labels: 

571 fig.canvas.draw() 

572 labels0 = [item.get_text() for item in ax0.get_yticklabels()] 

573 labels0[0] = u'' 

574 ax0.set_yticklabels(labels0) 

575 pdfPages.savefig(fig) 

576 

577 return 

578 

579 @staticmethod 

580 def plot_a_b(aDict, bDict, pdfPages, bRange=3): 

581 """Fig. 12 of Astier+19 

582 

583 Color display of a and b arrays fits, averaged over channels. 

584 

585 Parameters 

586 ---------- 

587 aDict : `dict`, [`numpy.array`] 

588 Dictionary keyed by amp names containing the fitted 'a' coefficients from the model 

589 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

590 

591 bDict : `dict`, [`numpy.array`] 

592 Dictionary keyed by amp names containing the fitted 'b' coefficients from the model 

593 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

594 

595 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

596 PDF file where the plots will be saved. 

597 

598 bRange : `int` 

599 Maximum lag for b arrays. 

600 """ 

601 a, b = [], [] 

602 for amp in aDict: 

603 if np.isnan(aDict[amp]).all(): 

604 continue 

605 a.append(aDict[amp]) 

606 b.append(bDict[amp]) 

607 a = np.array(a).mean(axis=0) 

608 b = np.array(b).mean(axis=0) 

609 fig = plt.figure(figsize=(7, 11)) 

610 ax0 = fig.add_subplot(2, 1, 1) 

611 im0 = ax0.imshow(np.abs(a.transpose()), origin='lower', norm=mpl.colors.LogNorm()) 

612 ax0.tick_params(axis='both', labelsize='x-large') 

613 ax0.set_title(r'$|a|$', fontsize='x-large') 

614 ax0.xaxis.set_ticks_position('bottom') 

615 cb0 = plt.colorbar(im0) 

616 cb0.ax.tick_params(labelsize='x-large') 

617 

618 ax1 = fig.add_subplot(2, 1, 2) 

619 ax1.tick_params(axis='both', labelsize='x-large') 

620 ax1.yaxis.set_major_locator(MaxNLocator(integer=True)) 

621 ax1.xaxis.set_major_locator(MaxNLocator(integer=True)) 

622 im1 = ax1.imshow(1e6*b[:bRange, :bRange].transpose(), origin='lower') 

623 cb1 = plt.colorbar(im1) 

624 cb1.ax.tick_params(labelsize='x-large') 

625 ax1.set_title(r'$b \times 10^6$', fontsize='x-large') 

626 ax1.xaxis.set_ticks_position('bottom') 

627 plt.tight_layout() 

628 pdfPages.savefig(fig) 

629 

630 return 

631 

632 @staticmethod 

633 def ab_vs_dist(aDict, bDict, pdfPages, bRange=4): 

634 """Fig. 13 of Astier+19. 

635 

636 Values of a and b arrays fits, averaged over amplifiers, as a function of distance. 

637 

638 Parameters 

639 ---------- 

640 aDict : `dict`, [`numpy.array`] 

641 Dictionary keyed by amp names containing the fitted 'a' coefficients from the model 

642 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

643 

644 bDict : `dict`, [`numpy.array`] 

645 Dictionary keyed by amp names containing the fitted 'b' coefficients from the model 

646 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

647 

648 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

649 PDF file where the plots will be saved. 

650 

651 bRange : `int` 

652 Maximum lag for b arrays. 

653 """ 

654 assert (len(aDict) == len(bDict)) 

655 a = [] 

656 for amp in aDict: 

657 if np.isnan(aDict[amp]).all(): 

658 continue 

659 a.append(aDict[amp]) 

660 a = np.array(a) 

661 y = a.mean(axis=0) 

662 sy = a.std(axis=0)/np.sqrt(len(aDict)) 

663 i, j = np.indices(y.shape) 

664 upper = (i >= j).ravel() 

665 r = np.sqrt(i**2 + j**2).ravel() 

666 y = y.ravel() 

667 sy = sy.ravel() 

668 fig = plt.figure(figsize=(6, 9)) 

669 ax = fig.add_subplot(211) 

670 ax.set_xlim([0.5, r.max()+1]) 

671 ax.errorbar(r[upper], y[upper], yerr=sy[upper], marker='o', linestyle='none', color='b', 

672 label='$i>=j$') 

673 ax.errorbar(r[~upper], y[~upper], yerr=sy[~upper], marker='o', linestyle='none', color='r', 

674 label='$i<j$') 

675 ax.legend(loc='upper center', fontsize='x-large') 

676 ax.set_xlabel(r'$\sqrt{i^2+j^2}$', fontsize='x-large') 

677 ax.set_ylabel(r'$a_{ij}$', fontsize='x-large') 

678 ax.set_yscale('log') 

679 ax.tick_params(axis='both', labelsize='x-large') 

680 

681 # 

682 axb = fig.add_subplot(212) 

683 b = [] 

684 for amp in bDict: 

685 if np.isnan(bDict[amp]).all(): 

686 continue 

687 b.append(bDict[amp]) 

688 b = np.array(b) 

689 yb = b.mean(axis=0) 

690 syb = b.std(axis=0)/np.sqrt(len(bDict)) 

691 ib, jb = np.indices(yb.shape) 

692 upper = (ib > jb).ravel() 

693 rb = np.sqrt(i**2 + j**2).ravel() 

694 yb = yb.ravel() 

695 syb = syb.ravel() 

696 xmin = -0.2 

697 xmax = bRange 

698 axb.set_xlim([xmin, xmax+0.2]) 

699 cutu = (r > xmin) & (r < xmax) & (upper) 

700 cutl = (r > xmin) & (r < xmax) & (~upper) 

701 axb.errorbar(rb[cutu], yb[cutu], yerr=syb[cutu], marker='o', linestyle='none', color='b', 

702 label='$i>=j$') 

703 axb.errorbar(rb[cutl], yb[cutl], yerr=syb[cutl], marker='o', linestyle='none', color='r', 

704 label='$i<j$') 

705 plt.legend(loc='upper center', fontsize='x-large') 

706 axb.set_xlabel(r'$\sqrt{i^2+j^2}$', fontsize='x-large') 

707 axb.set_ylabel(r'$b_{ij}$', fontsize='x-large') 

708 axb.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) 

709 axb.tick_params(axis='both', labelsize='x-large') 

710 plt.tight_layout() 

711 pdfPages.savefig(fig) 

712 

713 return 

714 

715 @staticmethod 

716 def plotAcoeffsSum(aDict, bDict, pdfPages): 

717 """Fig. 14. of Astier+19 

718 

719 Cumulative sum of a_ij as a function of maximum separation. This plot displays the average over 

720 channels. 

721 

722 Parameters 

723 ---------- 

724 aDict : `dict`, [`numpy.array`] 

725 Dictionary keyed by amp names containing the fitted 'a' coefficients from the model 

726 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

727 

728 bDict : `dict`, [`numpy.array`] 

729 Dictionary keyed by amp names containing the fitted 'b' coefficients from the model 

730 in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`). 

731 

732 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

733 PDF file where the plots will be saved. 

734 """ 

735 assert (len(aDict) == len(bDict)) 

736 a, b = [], [] 

737 for amp in aDict: 

738 if np.isnan(aDict[amp]).all() or np.isnan(bDict[amp]).all(): 

739 continue 

740 a.append(aDict[amp]) 

741 b.append(bDict[amp]) 

742 a = np.array(a).mean(axis=0) 

743 b = np.array(b).mean(axis=0) 

744 fig = plt.figure(figsize=(7, 6)) 

745 w = 4*np.ones_like(a) 

746 w[0, 1:] = 2 

747 w[1:, 0] = 2 

748 w[0, 0] = 1 

749 wa = w*a 

750 indices = range(1, a.shape[0]+1) 

751 sums = [wa[0:n, 0:n].sum() for n in indices] 

752 ax = plt.subplot(111) 

753 ax.plot(indices, sums/sums[0], 'o', color='b') 

754 ax.set_yscale('log') 

755 ax.set_xlim(indices[0]-0.5, indices[-1]+0.5) 

756 ax.set_ylim(None, 1.2) 

757 ax.set_ylabel(r'$[\sum_{|i|<n\ &\ |j|<n} a_{ij}] / |a_{00}|$', fontsize='x-large') 

758 ax.set_xlabel('n', fontsize='x-large') 

759 ax.tick_params(axis='both', labelsize='x-large') 

760 plt.tight_layout() 

761 pdfPages.savefig(fig) 

762 

763 return 

764 

765 @staticmethod 

766 def plotRelativeBiasACoeffs(aDict, aDictNoB, fullCovsModel, fullCovsModelNoB, signalElectrons, 

767 gainDict, pdfPages, maxr=None): 

768 """Fig. 15 in Astier+19. 

769 

770 Illustrates systematic bias from estimating 'a' 

771 coefficients from the slope of correlations as opposed to the 

772 full model in Astier+19. 

773 

774 Parameters 

775 ---------- 

776 aDict: `dict` 

777 Dictionary of 'a' matrices (Eq. 20, Astier+19), with amp names as keys. 

778 

779 aDictNoB: `dict` 

780 Dictionary of 'a' matrices ('b'= 0 in Eq. 20, Astier+19), with amp names as keys. 

781 

782 fullCovsModel : `dict`, [`str`, `list`] 

783 Dictionary keyed by amp names containing covariances model per mean flux. 

784 

785 fullCovsModelNoB : `dict`, [`str`, `list`] 

786 Dictionary keyed by amp names containing covariances model (with 'b'=0 in Eq. 20 of 

787 Astier+19) per mean flux. 

788 

789 signalElectrons : `float` 

790 Signal at which to evaluate the a_ij coefficients. 

791 

792 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

793 PDF file where the plots will be saved. 

794 

795 gainDict : `dict`, [`str`, `float`] 

796 Dicgionary keyed by amp names with the gains in e-/ADU. 

797 

798 maxr : `int`, optional 

799 Maximum lag. 

800 """ 

801 

802 fig = plt.figure(figsize=(7, 11)) 

803 title = [f"'a' relative bias at {signalElectrons} e", "'a' relative bias (b=0)"] 

804 data = [(aDict, fullCovsModel), (aDictNoB, fullCovsModelNoB)] 

805 

806 for k, pair in enumerate(data): 

807 diffs = [] 

808 amean = [] 

809 for amp in pair[0]: 

810 covModel = pair[1][amp] 

811 if np.isnan(covModel).all(): 

812 continue 

813 aOld = computeApproximateAcoeffs(covModel, signalElectrons, gainDict[amp]) 

814 a = pair[0][amp] 

815 amean.append(a) 

816 diffs.append((aOld-a)) 

817 amean = np.array(amean).mean(axis=0) 

818 diff = np.array(diffs).mean(axis=0) 

819 diff = diff/amean 

820 diff = diff[:] 

821 # The difference should be close to zero 

822 diff[0, 0] = 0 

823 if maxr is None: 

824 maxr = diff.shape[0] 

825 diff = diff[:maxr, :maxr] 

826 ax0 = fig.add_subplot(2, 1, k+1) 

827 im0 = ax0.imshow(diff.transpose(), origin='lower') 

828 ax0.yaxis.set_major_locator(MaxNLocator(integer=True)) 

829 ax0.xaxis.set_major_locator(MaxNLocator(integer=True)) 

830 ax0.tick_params(axis='both', labelsize='x-large') 

831 plt.colorbar(im0) 

832 ax0.set_title(title[k]) 

833 

834 plt.tight_layout() 

835 pdfPages.savefig(fig) 

836 

837 return 

838 

839 def _plotStandardPtc(self, dataset, ptcFitType, pdfPages): 

840 """Plot PTC, var/signal vs signal, linearity, and linearity residual per amplifier. 

841 

842 Parameters 

843 ---------- 

844 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

845 The dataset containing the means, variances, exposure times, and mask. 

846 

847 ptcFitType : `str` 

848 Type of the model fit to the PTC. Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'. 

849 

850 pdfPages: `matplotlib.backends.backend_pdf.PdfPages` 

851 PDF file where the plots will be saved. 

852 """ 

853 

854 if ptcFitType == 'EXPAPPROXIMATION': 

855 ptcFunc = funcAstier 

856 stringTitle = (r"Var = $\frac{1}{2g^2a_{00}}(\exp (2a_{00} \mu g) - 1) + \frac{n_{00}}{g^2}$ ") 

857 elif ptcFitType == 'POLYNOMIAL': 

858 ptcFunc = funcPolynomial 

859 for key in dataset.ptcFitPars: 

860 deg = len(dataset.ptcFitPars[key]) - 1 

861 break 

862 stringTitle = r"Polynomial (degree: %g)" % (deg) 

863 else: 

864 raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n" + 

865 "Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.") 

866 

867 legendFontSize = 8 

868 labelFontSize = 8 

869 titleFontSize = 9 

870 supTitleFontSize = 18 

871 markerSize = 25 

872 

873 # General determination of the size of the plot grid 

874 nAmps = len(dataset.ampNames) 

875 if nAmps == 2: 

876 nRows, nCols = 2, 1 

877 nRows = np.sqrt(nAmps) 

878 mantissa, _ = np.modf(nRows) 

879 if mantissa > 0: 

880 nRows = int(nRows) + 1 

881 nCols = nRows 

882 else: 

883 nRows = int(nRows) 

884 nCols = nRows 

885 

886 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

887 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

888 f3, ax3 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

889 

890 for i, (amp, a, a2, a3) in enumerate(zip(dataset.ampNames, ax.flatten(), ax2.flatten(), 

891 ax3.flatten())): 

892 meanVecOriginal = np.array(dataset.rawMeans[amp]) 

893 varVecOriginal = np.array(dataset.rawVars[amp]) 

894 mask = np.array(dataset.expIdMask[amp]) 

895 if np.isnan(mask[0]): # All NaNs the whole amp is bad 

896 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

897 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

898 a3.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

899 continue 

900 else: 

901 mask = mask.astype(bool) 

902 meanVecFinal = meanVecOriginal[mask] 

903 varVecFinal = varVecOriginal[mask] 

904 meanVecOutliers = meanVecOriginal[np.invert(mask)] 

905 varVecOutliers = varVecOriginal[np.invert(mask)] 

906 pars, parsErr = np.array(dataset.ptcFitPars[amp]), np.array(dataset.ptcFitParsError[amp]) 

907 ptcRedChi2 = np.array(dataset.ptcFitChiSq[amp]) 

908 if ptcFitType == 'EXPAPPROXIMATION': 

909 if len(meanVecFinal): 

910 ptcA00, ptcA00error = pars[0], parsErr[0] 

911 ptcGain, ptcGainError = pars[1], parsErr[1] 

912 ptcNoise = np.sqrt((pars[2])) # pars[2] is in (e-)^2 

913 ptcNoiseAdu = ptcNoise*(1./ptcGain) 

914 ptcNoiseError = 0.5*(parsErr[2]/np.fabs(pars[2]))*np.sqrt(np.fabs(pars[2])) 

915 stringLegend = (f"a00: {ptcA00:.2e}+/-{ptcA00error:.2e} 1/e" 

916 f"\nGain: {ptcGain:.4}+/-{ptcGainError:.2e} e/DN" 

917 f"\nNoise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e\n" 

918 r"$\chi^2_{\rm{red}}$: " + f"{ptcRedChi2:.4}") 

919 

920 if ptcFitType == 'POLYNOMIAL': 

921 if len(meanVecFinal): 

922 ptcGain, ptcGainError = 1./pars[1], np.fabs(1./pars[1])*(parsErr[1]/pars[1]) 

923 ptcNoiseAdu = np.sqrt((pars[0])) # pars[0] is in ADU^2 

924 ptcNoise = ptcNoiseAdu*ptcGain 

925 ptcNoiseError = (0.5*(parsErr[0]/np.fabs(pars[0]))*(np.sqrt(np.fabs(pars[0]))))*ptcGain 

926 stringLegend = (f"Gain: {ptcGain:.4}+/-{ptcGainError:.2e} e/DN\n" 

927 f"Noise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e\n" 

928 r"$\chi^2_{\rm{red}}$: " + f"{ptcRedChi2:.4}") 

929 

930 a.set_xlabel(r'Mean signal ($\mu$, DN)', fontsize=labelFontSize) 

931 a.set_ylabel(r'Variance (DN$^2$)', fontsize=labelFontSize) 

932 a.tick_params(labelsize=11) 

933 a.set_xscale('linear', fontsize=labelFontSize) 

934 a.set_yscale('linear', fontsize=labelFontSize) 

935 

936 a2.set_xlabel(r'Mean Signal ($\mu$, DN)', fontsize=labelFontSize) 

937 a2.set_ylabel(r'Variance (DN$^2$)', fontsize=labelFontSize) 

938 a2.tick_params(labelsize=11) 

939 a2.set_xscale('log') 

940 a2.set_yscale('log') 

941 

942 a3.set_xlabel(r'Mean signal ($\mu$, DN)', fontsize=labelFontSize) 

943 a3.set_ylabel(r'Variance/$\mu$ (DN)', fontsize=labelFontSize) 

944 a3.tick_params(labelsize=11) 

945 a3.set_xscale('linear', fontsize=labelFontSize) 

946 a3.set_yscale('linear', fontsize=labelFontSize) 

947 

948 minMeanVecFinal = np.min(meanVecFinal) 

949 maxMeanVecFinal = np.max(meanVecFinal) 

950 meanVecFit = np.linspace(minMeanVecFinal, maxMeanVecFinal, 100*len(meanVecFinal)) 

951 minMeanVecOriginal = np.min(meanVecOriginal) 

952 maxMeanVecOriginal = np.max(meanVecOriginal) 

953 deltaXlim = maxMeanVecOriginal - minMeanVecOriginal 

954 a.plot(meanVecFit, ptcFunc(pars, meanVecFit), color='red') 

955 a.plot(meanVecFinal, ptcNoiseAdu**2 + (1./ptcGain)*meanVecFinal, color='green', 

956 linestyle='--') 

957 a.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize) 

958 a.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize) 

959 a.text(0.03, 0.7, stringLegend, transform=a.transAxes, fontsize=legendFontSize) 

960 a.set_title(amp, fontsize=titleFontSize) 

961 a.set_xlim([minMeanVecOriginal - 0.2*deltaXlim, maxMeanVecOriginal + 0.2*deltaXlim]) 

962 

963 # Same, but in log-scale 

964 a2.plot(meanVecFit, ptcFunc(pars, meanVecFit), color='red') 

965 a2.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize) 

966 a2.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize) 

967 a2.text(0.03, 0.7, stringLegend, transform=a2.transAxes, fontsize=legendFontSize) 

968 a2.set_title(amp, fontsize=titleFontSize) 

969 a2.set_xlim([minMeanVecOriginal, maxMeanVecOriginal]) 

970 

971 # Var/mu vs mu 

972 a3.plot(meanVecFit, ptcFunc(pars, meanVecFit)/meanVecFit, color='red') 

973 a3.scatter(meanVecFinal, varVecFinal/meanVecFinal, c='blue', marker='o', s=markerSize) 

974 a3.scatter(meanVecOutliers, varVecOutliers/meanVecOutliers, c='magenta', marker='s', 

975 s=markerSize) 

976 a3.text(0.2, 0.65, stringLegend, transform=a3.transAxes, fontsize=legendFontSize) 

977 a3.set_title(amp, fontsize=titleFontSize) 

978 a3.set_xlim([minMeanVecOriginal - 0.2*deltaXlim, maxMeanVecOriginal + 0.2*deltaXlim]) 

979 

980 f.suptitle("PTC \n Fit: " + stringTitle, fontsize=supTitleFontSize) 

981 pdfPages.savefig(f) 

982 f2.suptitle("PTC (log-log)", fontsize=supTitleFontSize) 

983 pdfPages.savefig(f2) 

984 f3.suptitle(r"Var/$\mu$", fontsize=supTitleFontSize) 

985 pdfPages.savefig(f3) 

986 

987 return 

988 

989 def _plotLinearizer(self, dataset, linearizer, pdfPages): 

990 """Plot linearity and linearity residual per amplifier 

991 

992 Parameters 

993 ---------- 

994 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset` 

995 The dataset containing the means, variances, exposure times, and mask. 

996 

997 linearizer : `lsst.ip.isr.Linearizer` 

998 Linearizer object 

999 """ 

1000 legendFontSize = 7 

1001 labelFontSize = 7 

1002 titleFontSize = 9 

1003 supTitleFontSize = 18 

1004 

1005 # General determination of the size of the plot grid 

1006 nAmps = len(dataset.ampNames) 

1007 if nAmps == 2: 

1008 nRows, nCols = 2, 1 

1009 nRows = np.sqrt(nAmps) 

1010 mantissa, _ = np.modf(nRows) 

1011 if mantissa > 0: 

1012 nRows = int(nRows) + 1 

1013 nCols = nRows 

1014 else: 

1015 nRows = int(nRows) 

1016 nCols = nRows 

1017 

1018 # Plot mean vs time (f1), and fractional residuals (f2) 

1019 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

1020 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10)) 

1021 for i, (amp, a, a2) in enumerate(zip(dataset.ampNames, ax.flatten(), ax2.flatten())): 

1022 mask = dataset.expIdMask[amp] 

1023 if np.isnan(mask[0]): 

1024 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

1025 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize) 

1026 continue 

1027 else: 

1028 mask = mask.astype(bool) 

1029 meanVecFinal = np.array(dataset.rawMeans[amp])[mask] 

1030 timeVecFinal = np.array(dataset.rawExpTimes[amp])[mask] 

1031 

1032 a.set_xlabel('Time (sec)', fontsize=labelFontSize) 

1033 a.set_ylabel(r'Mean signal ($\mu$, DN)', fontsize=labelFontSize) 

1034 a.tick_params(labelsize=labelFontSize) 

1035 a.set_xscale('linear', fontsize=labelFontSize) 

1036 a.set_yscale('linear', fontsize=labelFontSize) 

1037 

1038 a2.axhline(y=0, color='k') 

1039 a2.axvline(x=0, color='k', linestyle='-') 

1040 a2.set_xlabel(r'Mean signal ($\mu$, DN)', fontsize=labelFontSize) 

1041 a2.set_ylabel('Fractional nonlinearity (%)', fontsize=labelFontSize) 

1042 a2.tick_params(labelsize=labelFontSize) 

1043 a2.set_xscale('linear', fontsize=labelFontSize) 

1044 a2.set_yscale('linear', fontsize=labelFontSize) 

1045 

1046 pars, parsErr = linearizer.fitParams[amp], linearizer.fitParamsErr[amp] 

1047 k0, k0Error = pars[0], parsErr[0] 

1048 k1, k1Error = pars[1], parsErr[1] 

1049 k2, k2Error = pars[2], parsErr[2] 

1050 linRedChi2 = linearizer.fitChiSq[amp] 

1051 stringLegend = (f"k0: {k0:.4}+/-{k0Error:.2e} DN\nk1: {k1:.4}+/-{k1Error:.2e} DN/t" 

1052 f"\nk2: {k2:.2e}+/-{k2Error:.2e} DN/t^2\n" 

1053 r"$\chi^2_{\rm{red}}$: " + f"{linRedChi2:.4}") 

1054 a.scatter(timeVecFinal, meanVecFinal) 

1055 a.plot(timeVecFinal, funcPolynomial(pars, timeVecFinal), color='red') 

1056 a.text(0.03, 0.75, stringLegend, transform=a.transAxes, fontsize=legendFontSize) 

1057 a.set_title(f"{amp}", fontsize=titleFontSize) 

1058 

1059 linearPart = k0 + k1*timeVecFinal 

1060 fracLinRes = 100*(linearPart - meanVecFinal)/linearPart 

1061 a2.plot(meanVecFinal, fracLinRes, c='g') 

1062 a2.set_title(f"{amp}", fontsize=titleFontSize) 

1063 

1064 f.suptitle("Linearity \n Fit: Polynomial (degree: %g)" 

1065 % (len(pars)-1), 

1066 fontsize=supTitleFontSize) 

1067 f2.suptitle(r"Fractional NL residual" + "\n" + 

1068 r"$100\times \frac{(k_0 + k_1*Time-\mu)}{k_0+k_1*Time}$", 

1069 fontsize=supTitleFontSize) 

1070 pdfPages.savefig(f) 

1071 pdfPages.savefig(f2) 

1072 

1073 @staticmethod 

1074 def findGroups(x, maxDiff): 

1075 """Group data into bins, with at most maxDiff distance between bins. 

1076 

1077 Parameters 

1078 ---------- 

1079 x: `list` 

1080 Data to bin. 

1081 

1082 maxDiff: `int` 

1083 Maximum distance between bins. 

1084 

1085 Returns 

1086 ------- 

1087 index: `list` 

1088 Bin indices. 

1089 """ 

1090 ix = np.argsort(x) 

1091 xsort = np.sort(x) 

1092 index = np.zeros_like(x, dtype=np.int32) 

1093 xc = xsort[0] 

1094 group = 0 

1095 ng = 1 

1096 

1097 for i in range(1, len(ix)): 

1098 xval = xsort[i] 

1099 if (xval - xc < maxDiff): 

1100 xc = (ng*xc + xval)/(ng+1) 

1101 ng += 1 

1102 index[ix[i]] = group 

1103 else: 

1104 group += 1 

1105 ng = 1 

1106 index[ix[i]] = group 

1107 xc = xval 

1108 

1109 return index 

1110 

1111 @staticmethod 

1112 def indexForBins(x, nBins): 

1113 """Builds an index with regular binning. The result can be fed into binData. 

1114 

1115 Parameters 

1116 ---------- 

1117 x: `numpy.array` 

1118 Data to bin. 

1119 nBins: `int` 

1120 Number of bin. 

1121 

1122 Returns 

1123 ------- 

1124 np.digitize(x, bins): `numpy.array` 

1125 Bin indices. 

1126 """ 

1127 

1128 bins = np.linspace(x.min(), x.max() + abs(x.max() * 1e-7), nBins + 1) 

1129 return np.digitize(x, bins) 

1130 

1131 @staticmethod 

1132 def binData(x, y, binIndex, wy=None): 

1133 """Bin data (usually for display purposes). 

1134 

1135 Patrameters 

1136 ----------- 

1137 x: `numpy.array` 

1138 Data to bin. 

1139 

1140 y: `numpy.array` 

1141 Data to bin. 

1142 

1143 binIdex: `list` 

1144 Bin number of each datum. 

1145 

1146 wy: `numpy.array` 

1147 Inverse rms of each datum to use when averaging (the actual weight is wy**2). 

1148 

1149 Returns: 

1150 ------- 

1151 

1152 xbin: `numpy.array` 

1153 Binned data in x. 

1154 

1155 ybin: `numpy.array` 

1156 Binned data in y. 

1157 

1158 wybin: `numpy.array` 

1159 Binned weights in y, computed from wy's in each bin. 

1160 

1161 sybin: `numpy.array` 

1162 Uncertainty on the bin average, considering actual scatter, and ignoring weights. 

1163 """ 

1164 

1165 if wy is None: 

1166 wy = np.ones_like(x) 

1167 binIndexSet = set(binIndex) 

1168 w2 = wy*wy 

1169 xw2 = x*(w2) 

1170 xbin = np.array([xw2[binIndex == i].sum()/w2[binIndex == i].sum() for i in binIndexSet]) 

1171 

1172 yw2 = y*w2 

1173 ybin = np.array([yw2[binIndex == i].sum()/w2[binIndex == i].sum() for i in binIndexSet]) 

1174 

1175 wybin = np.sqrt(np.array([w2[binIndex == i].sum() for i in binIndexSet])) 

1176 sybin = np.array([y[binIndex == i].std()/np.sqrt(np.array([binIndex == i]).sum()) 

1177 for i in binIndexSet]) 

1178 

1179 return xbin, ybin, wybin, sybin