Coverage for python/lsst/cp/pipe/ptc/plotPtc.py: 5%
575 statements
« prev ^ index » next coverage.py v6.5.0, created at 2022-11-23 03:07 -0800
« prev ^ index » next coverage.py v6.5.0, created at 2022-11-23 03:07 -0800
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
23__all__ = ['PlotPhotonTransferCurveTask']
25import numpy as np
26import matplotlib.pyplot as plt
27import matplotlib as mpl
28from matplotlib import gridspec
29from matplotlib.backends.backend_pdf import PdfPages
31from lsst.cp.pipe.utils import (funcAstier, funcPolynomial,
32 calculateWeightedReducedChi2,
33 getFitDataFromCovariances)
34from matplotlib.ticker import MaxNLocator
37class PlotPhotonTransferCurveTask():
38 """A class to plot the dataset from MeasurePhotonTransferCurveTask.
40 Parameters
41 ----------
43 datasetFileName : `str`
44 datasetPtc (lsst.ip.isr.PhotonTransferCurveDataset) file
45 name (fits).
47 linearizerFileName : `str`, optional
48 linearizer (isr.linearize.Linearizer) file
49 name (fits).
51 outDir : `str`, optional
52 Path to the output directory where the final PDF will
53 be placed.
55 detNum : `int`, optional
56 Detector number.
58 signalElectronsRelativeA : `float`, optional
59 Signal value for relative systematic bias between different
60 methods of estimating a_ij (Fig. 15 of Astier+19).
62 plotNormalizedCovariancesNumberOfBins : `float`, optional
63 Number of bins in `plotNormalizedCovariancesNumber` function
64 (Fig. 8, 10., of Astier+19).
65 """
67 def __init__(self, datasetFilename, linearizerFileName=None,
68 outDir='.', detNum=999, signalElectronsRelativeA=75000,
69 plotNormalizedCovariancesNumberOfBins=10):
70 self.datasetFilename = datasetFilename
71 self.linearizerFileName = linearizerFileName
72 self.detNum = detNum
73 self.signalElectronsRelativeA = signalElectronsRelativeA
74 self.plotNormalizedCovariancesNumberOfBins = plotNormalizedCovariancesNumberOfBins
75 self.outDir = outDir
77 def run(self, filenameFull, datasetPtc, linearizer=None, log=None):
78 """Make the plots for the PTC task"""
79 ptcFitType = datasetPtc.ptcFitType
80 with PdfPages(filenameFull) as pdfPages:
81 if ptcFitType in ["FULLCOVARIANCE", ]:
82 self.covAstierMakeAllPlots(datasetPtc, pdfPages, log=log)
83 elif ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]:
84 self._plotStandardPtc(datasetPtc, ptcFitType, pdfPages)
85 else:
86 raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n"
87 "Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.")
88 if linearizer:
89 self._plotLinearizer(datasetPtc, linearizer, pdfPages)
91 return
93 def covAstierMakeAllPlots(self, dataset, pdfPages,
94 log=None):
95 """Make plots for MeasurePhotonTransferCurve task when
96 doCovariancesAstier=True.
98 This function call other functions that mostly reproduce the
99 plots in Astier+19. Most of the code is ported from Pierre
100 Astier's repository https://github.com/PierreAstier/bfptc
102 Parameters
103 ----------
104 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
105 The dataset containing the necessary information to
106 produce the plots.
108 pdfPages : `matplotlib.backends.backend_pdf.PdfPages`
109 PDF file where the plots will be saved.
111 log : `logging.Logger`, optional
112 Logger to handle messages
113 """
114 mu = dataset.finalMeans
115 # dictionaries with ampNames as keys
116 fullCovs = dataset.covariances
117 fullCovsModel = dataset.covariancesModel
118 fullCovWeights = dataset.covariancesSqrtWeights
119 aDict = dataset.aMatrix
120 bDict = dataset.bMatrix
121 fullCovsNoB = dataset.covariances
122 fullCovsModelNoB = dataset.covariancesModelNoB
123 fullCovWeightsNoB = dataset.covariancesSqrtWeights
124 aDictNoB = dataset.aMatrixNoB
125 gainDict = dataset.gain
126 noiseDict = dataset.noise
128 self.plotCovariances(mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, fullCovsModelNoB,
129 fullCovWeightsNoB, gainDict, noiseDict, aDict, bDict, pdfPages)
130 self.plotNormalizedCovariances(0, 0, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB,
131 fullCovsModelNoB, fullCovWeightsNoB, pdfPages,
132 offset=0.01, topPlot=True,
133 numberOfBins=self.plotNormalizedCovariancesNumberOfBins,
134 log=log)
135 self.plotNormalizedCovariances(0, 1, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB,
136 fullCovsModelNoB, fullCovWeightsNoB, pdfPages,
137 numberOfBins=self.plotNormalizedCovariancesNumberOfBins,
138 log=log)
139 self.plotNormalizedCovariances(1, 0, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB,
140 fullCovsModelNoB, fullCovWeightsNoB, pdfPages,
141 numberOfBins=self.plotNormalizedCovariancesNumberOfBins,
142 log=log)
143 self.plot_a_b(aDict, bDict, pdfPages)
144 self.ab_vs_dist(aDict, bDict, pdfPages, bRange=4)
145 self.plotAcoeffsSum(aDict, bDict, pdfPages)
146 self.plotRelativeBiasACoeffs(aDict, aDictNoB, fullCovsModel, fullCovsModelNoB,
147 self.signalElectronsRelativeA, gainDict, pdfPages, maxr=4)
149 return
151 @staticmethod
152 def plotCovariances(mu, covs, covsModel, covsWeights, covsNoB, covsModelNoB, covsWeightsNoB,
153 gainDict, noiseDict, aDict, bDict, pdfPages):
154 """Plot covariances and models: Cov00, Cov10, Cov01.
156 Figs. 6 and 7 of Astier+19
158 Parameters
159 ----------
160 mu : `dict` [`str`, `list`]
161 Dictionary keyed by amp name with mean signal values.
163 covs : `dict` [`str`, `list`]
164 Dictionary keyed by amp names containing a list of measued
165 covariances per mean flux.
167 covsModel : `dict` [`str`, `list`]
168 Dictionary keyed by amp names containinging covariances
169 model (Eq. 20 of Astier+19) per mean flux.
171 covsWeights : `dict` [`str`, `list`]
172 Dictionary keyed by amp names containinging sqrt. of
173 covariances weights.
175 covsNoB : `dict` [`str`, `list`]
176 Dictionary keyed by amp names containing a list of measued
177 covariances per mean flux ('b'=0 in Astier+19).
179 covsModelNoB : `dict` [`str`, `list`]
180 Dictionary keyed by amp names containing covariances model
181 (with 'b'=0 in Eq. 20 of Astier+19) per mean flux.
183 covsWeightsNoB : `dict` [`str`, `list`]
184 Dictionary keyed by amp names containing sqrt. of
185 covariances weights ('b' = 0 in Eq. 20 of Astier+19).
187 gainDict : `dict` [`str`, `float`]
188 Dictionary keyed by amp names containing the gains in e-/ADU.
190 noiseDict : `dict` [`str`, `float`]
191 Dictionary keyed by amp names containing the rms redout
192 noise in e-.
194 aDict : `dict` [`str`, `numpy.array`]
195 Dictionary keyed by amp names containing 'a' coefficients
196 (Eq. 20 of Astier+19).
198 bDict : `dict` [`str`, `numpy.array`]
199 Dictionary keyed by amp names containing 'b' coefficients
200 (Eq. 20 of Astier+19).
202 pdfPages : `matplotlib.backends.backend_pdf.PdfPages`
203 PDF file where the plots will be saved.
204 """
205 legendFontSize = 6.5
206 labelFontSize = 7
207 titleFontSize = 9
208 supTitleFontSize = 18
209 markerSize = 25
211 nAmps = len(covs)
212 if nAmps == 2:
213 nRows, nCols = 2, 1
214 nRows = np.sqrt(nAmps)
215 mantissa, _ = np.modf(nRows)
216 if mantissa > 0:
217 nRows = int(nRows) + 1
218 nCols = nRows
219 else:
220 nRows = int(nRows)
221 nCols = nRows
223 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
224 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
225 fResCov00, axResCov00 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row',
226 figsize=(13, 10))
227 fCov01, axCov01 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
228 fCov10, axCov10 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
230 assert(len(covsModel) == nAmps)
231 assert(len(covsWeights) == nAmps)
233 assert(len(covsNoB) == nAmps)
234 assert(len(covsModelNoB) == nAmps)
235 assert(len(covsWeightsNoB) == nAmps)
237 for i, (amp, a, a2, aResVar, a3, a4) in enumerate(zip(covs, ax.flatten(),
238 ax2.flatten(), axResCov00.flatten(),
239 axCov01.flatten(), axCov10.flatten())):
241 muAmp, cov, model, weight = mu[amp], covs[amp], covsModel[amp], covsWeights[amp]
242 if not np.isnan(np.array(cov)).all(): # If all the entries are np.nan, this is a bad amp.
243 aCoeffs, bCoeffs = np.array(aDict[amp]), np.array(bDict[amp])
244 gain, noise = gainDict[amp], noiseDict[amp]
245 (meanVecFinal, varVecFinal, varVecModelFinal,
246 varWeightsFinal, _) = getFitDataFromCovariances(0, 0, muAmp, cov, model, weight,
247 returnMasked=True)
249 # Get weighted reduced chi2
250 chi2FullModelVar = calculateWeightedReducedChi2(varVecFinal, varVecModelFinal,
251 varWeightsFinal, len(meanVecFinal), 4)
253 (meanVecFinalCov01, varVecFinalCov01, varVecModelFinalCov01,
254 _, _) = getFitDataFromCovariances(0, 0, muAmp, cov, model, weight, returnMasked=True)
256 (meanVecFinalCov10, varVecFinalCov10, varVecModelFinalCov10,
257 _, _) = getFitDataFromCovariances(1, 0, muAmp, cov, model, weight, returnMasked=True)
259 # cuadratic fit for residuals below
260 par2 = np.polyfit(meanVecFinal, varVecFinal, 2, w=varWeightsFinal)
261 varModelFinalQuadratic = np.polyval(par2, meanVecFinal)
262 chi2QuadModelVar = calculateWeightedReducedChi2(varVecFinal, varModelFinalQuadratic,
263 varWeightsFinal, len(meanVecFinal), 3)
265 # fit with no 'b' coefficient (c = a*b in Eq. 20 of Astier+19)
266 covNoB, modelNoB, weightNoB = covsNoB[amp], covsModelNoB[amp], covsWeightsNoB[amp]
267 (meanVecFinalNoB, varVecFinalNoB, varVecModelFinalNoB,
268 varWeightsFinalNoB, _) = getFitDataFromCovariances(0, 0, muAmp, covNoB, modelNoB,
269 weightNoB, returnMasked=True)
271 chi2FullModelNoBVar = calculateWeightedReducedChi2(varVecFinalNoB, varVecModelFinalNoB,
272 varWeightsFinalNoB, len(meanVecFinalNoB),
273 3)
274 stringLegend = (f"Gain: {gain:.4} e/ADU \n"
275 f"Noise: {noise:.4} e \n"
276 + r"$a_{00}$: %.3e 1/e"%aCoeffs[0, 0] + "\n"
277 + r"$b_{00}$: %.3e 1/e"%bCoeffs[0, 0]
278 + f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ")
279 minMeanVecFinal = np.nanmin(meanVecFinal)
280 maxMeanVecFinal = np.nanmax(meanVecFinal)
281 deltaXlim = maxMeanVecFinal - minMeanVecFinal
283 a.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
284 a.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize)
285 a.tick_params(labelsize=11)
286 a.set_xscale('linear')
287 a.set_yscale('linear')
288 a.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize)
289 a.plot(meanVecFinal, varVecModelFinal, color='red', lineStyle='-')
290 a.text(0.03, 0.7, stringLegend, transform=a.transAxes, fontsize=legendFontSize)
291 a.set_title(amp, fontsize=titleFontSize)
292 a.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim])
294 # Same as above, but in log-scale
295 a2.set_xlabel(r'Mean Signal ($\mu$, ADU)', fontsize=labelFontSize)
296 a2.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize)
297 a2.tick_params(labelsize=11)
298 a2.set_xscale('log')
299 a2.set_yscale('log')
300 a2.plot(meanVecFinal, varVecModelFinal, color='red', lineStyle='-')
301 a2.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize)
302 a2.text(0.03, 0.7, stringLegend, transform=a2.transAxes, fontsize=legendFontSize)
303 a2.set_title(amp, fontsize=titleFontSize)
304 a2.set_xlim([minMeanVecFinal, maxMeanVecFinal])
306 # Residuals var - model
307 aResVar.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
308 aResVar.set_ylabel(r'Residuals (ADU$^2$)', fontsize=labelFontSize)
309 aResVar.tick_params(labelsize=11)
310 aResVar.set_xscale('linear')
311 aResVar.set_yscale('linear')
312 aResVar.plot(meanVecFinal, varVecFinal - varVecModelFinal, color='blue', lineStyle='-',
313 label=r'Full fit ($\chi_{\rm{red}}^2$: %g)'%chi2FullModelVar)
314 aResVar.plot(meanVecFinal, varVecFinal - varModelFinalQuadratic, color='red', lineStyle='-',
315 label=r'Quadratic fit ($\chi_{\rm{red}}^2$: %g)'%chi2QuadModelVar)
316 aResVar.plot(meanVecFinalNoB, varVecFinalNoB - varVecModelFinalNoB, color='green',
317 lineStyle='-',
318 label=r'Full fit (b=0) ($\chi_{\rm{red}}^2$: %g)'%chi2FullModelNoBVar)
319 aResVar.axhline(color='black')
320 aResVar.set_title(amp, fontsize=titleFontSize)
321 aResVar.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim])
322 aResVar.legend(fontsize=7)
324 a3.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
325 a3.set_ylabel(r'Cov01 (ADU$^2$)', fontsize=labelFontSize)
326 a3.tick_params(labelsize=11)
327 a3.set_xscale('linear')
328 a3.set_yscale('linear')
329 a3.scatter(meanVecFinalCov01, varVecFinalCov01, c='blue', marker='o', s=markerSize)
330 a3.plot(meanVecFinalCov01, varVecModelFinalCov01, color='red', lineStyle='-')
331 a3.set_title(amp, fontsize=titleFontSize)
332 a3.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim])
334 a4.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
335 a4.set_ylabel(r'Cov10 (ADU$^2$)', fontsize=labelFontSize)
336 a4.tick_params(labelsize=11)
337 a4.set_xscale('linear')
338 a4.set_yscale('linear')
339 a4.scatter(meanVecFinalCov10, varVecFinalCov10, c='blue', marker='o', s=markerSize)
340 a4.plot(meanVecFinalCov10, varVecModelFinalCov10, color='red', lineStyle='-')
341 a4.set_title(amp, fontsize=titleFontSize)
342 a4.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim])
344 else:
345 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
346 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
347 a3.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
348 a4.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
350 f.suptitle("PTC from covariances as in Astier+19 \n Fit: Eq. 20, Astier+19",
351 fontsize=supTitleFontSize)
352 pdfPages.savefig(f)
353 f2.suptitle("PTC from covariances as in Astier+19 (log-log) \n Fit: Eq. 20, Astier+19",
354 fontsize=supTitleFontSize)
355 pdfPages.savefig(f2)
356 fResCov00.suptitle("Residuals (data-model) for Cov00 (Var)", fontsize=supTitleFontSize)
357 pdfPages.savefig(fResCov00)
358 fCov01.suptitle("Cov01 as in Astier+19 (nearest parallel neighbor covariance) \n"
359 " Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize)
360 pdfPages.savefig(fCov01)
361 fCov10.suptitle("Cov10 as in Astier+19 (nearest serial neighbor covariance) \n"
362 "Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize)
363 pdfPages.savefig(fCov10)
365 return
367 def plotNormalizedCovariances(self, i, j, inputMu, covs, covsModel, covsWeights, covsNoB, covsModelNoB,
368 covsWeightsNoB, pdfPages, offset=0.004,
369 numberOfBins=10, plotData=True, topPlot=False, log=None):
370 """Plot C_ij/mu vs mu.
372 Figs. 8, 10, and 11 of Astier+19
374 Parameters
375 ----------
376 i : `int`
377 Covariane lag
379 j : `int`
380 Covariance lag
382 inputMu : `dict` [`str`, `list`]
383 Dictionary keyed by amp name with mean signal values.
385 covs : `dict` [`str`, `list`]
386 Dictionary keyed by amp names containing a list of measued
387 covariances per mean flux.
389 covsModel : `dict` [`str`, `list`]
390 Dictionary keyed by amp names containinging covariances
391 model (Eq. 20 of Astier+19) per mean flux.
393 covsWeights : `dict` [`str`, `list`]
394 Dictionary keyed by amp names containinging sqrt. of
395 covariances weights.
397 covsNoB : `dict` [`str`, `list`]
398 Dictionary keyed by amp names containing a list of measued
399 covariances per mean flux ('b'=0 in Astier+19).
401 covsModelNoB : `dict` [`str`, `list`]
402 Dictionary keyed by amp names containing covariances model
403 (with 'b'=0 in Eq. 20 of Astier+19) per mean flux.
405 covsWeightsNoB : `dict` [`str`, `list`]
406 Dictionary keyed by amp names containing sqrt. of
407 covariances weights ('b' = 0 in Eq. 20 of Astier+19).
409 expIdMask : `dict` [`str`, `list`]
410 Dictionary keyed by amp names containing the masked
411 exposure pairs.
413 pdfPages : `matplotlib.backends.backend_pdf.PdfPages`
414 PDF file where the plots will be saved.
416 offset : `float`, optional
417 Constant offset factor to plot covariances in same panel
418 (so they don't overlap).
420 numberOfBins : `int`, optional
421 Number of bins for top and bottom plot.
423 plotData : `bool`, optional
424 Plot the data points?
426 topPlot : `bool`, optional
427 Plot the top plot with the covariances, and the bottom
428 plot with the model residuals?
430 log : `logging.Logger`, optional
431 Logger to handle messages.
432 """
433 if not topPlot:
434 fig = plt.figure(figsize=(8, 10))
435 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
436 gs.update(hspace=0)
437 ax0 = plt.subplot(gs[0])
438 plt.setp(ax0.get_xticklabels(), visible=False)
439 else:
440 fig = plt.figure(figsize=(8, 8))
441 ax0 = plt.subplot(111)
442 ax0.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
443 ax0.tick_params(axis='both', labelsize='x-large')
444 mue, rese, wce = [], [], []
445 mueNoB, reseNoB, wceNoB = [], [], []
446 for counter, amp in enumerate(covs):
447 muAmp, fullCov, fullCovModel, fullCovWeight = (inputMu[amp], covs[amp], covsModel[amp],
448 covsWeights[amp])
449 if len(fullCov) == 0:
450 continue
451 mu, cov, model, weightCov, _ = getFitDataFromCovariances(i, j, muAmp, fullCov, fullCovModel,
452 fullCovWeight, divideByMu=True,
453 returnMasked=True)
455 mue += list(mu)
456 rese += list(cov - model)
457 wce += list(weightCov)
459 fullCovNoB, fullCovModelNoB, fullCovWeightNoB = (covsNoB[amp], covsModelNoB[amp],
460 covsWeightsNoB[amp])
461 if len(fullCovNoB) == 0:
462 continue
463 (muNoB, covNoB, modelNoB,
464 weightCovNoB, _) = getFitDataFromCovariances(i, j, muAmp, fullCovNoB, fullCovModelNoB,
465 fullCovWeightNoB, divideByMu=True,
466 returnMasked=True)
468 mueNoB += list(muNoB)
469 reseNoB += list(covNoB - modelNoB)
470 wceNoB += list(weightCovNoB)
472 # the corresponding fit
473 fit_curve, = plt.plot(mu, model + counter*offset, '-', linewidth=4.0)
474 # bin plot. len(mu) = no binning
475 gind = self.indexForBins(mu, numberOfBins)
477 xb, yb, wyb, sigyb = self.binData(mu, cov, gind, weightCov)
478 plt.errorbar(xb, yb+counter*offset, yerr=sigyb, marker='o', linestyle='none', markersize=6.5,
479 color=fit_curve.get_color(), label=f"{amp} (N: {len(mu)})")
480 # plot the data
481 if plotData:
482 points, = plt.plot(mu, cov + counter*offset, '.', color=fit_curve.get_color())
483 plt.legend(loc='upper right', fontsize=8)
484 # end loop on amps
485 mue = np.array(mue)
486 rese = np.array(rese)
487 wce = np.array(wce)
488 mueNoB = np.array(mueNoB)
489 reseNoB = np.array(reseNoB)
490 wceNoB = np.array(wceNoB)
492 plt.xlabel(r"$\mu (el)$", fontsize='x-large')
493 plt.ylabel(r"$Cov{%d%d}/\mu + Cst (el)$"%(i, j), fontsize='x-large')
494 if (not topPlot):
495 gind = self.indexForBins(mue, numberOfBins)
496 xb, yb, wyb, sigyb = self.binData(mue, rese, gind, wce)
498 ax1 = plt.subplot(gs[1], sharex=ax0)
499 ax1.errorbar(xb, yb, yerr=sigyb, marker='o', linestyle='none', label='Full fit')
500 gindNoB = self.indexForBins(mueNoB, numberOfBins)
501 xb2, yb2, wyb2, sigyb2 = self.binData(mueNoB, reseNoB, gindNoB, wceNoB)
503 ax1.errorbar(xb2, yb2, yerr=sigyb2, marker='o', linestyle='none', label='b = 0')
504 ax1.tick_params(axis='both', labelsize='x-large')
505 plt.legend(loc='upper left', fontsize='large')
506 # horizontal line at zero
507 plt.plot(xb, [0]*len(xb), '--', color='k')
508 plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
509 plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
510 plt.xlabel(r'$\mu (el)$', fontsize='x-large')
511 plt.ylabel(r'$Cov{%d%d}/\mu$ -model (el)'%(i, j), fontsize='x-large')
512 plt.tight_layout()
513 plt.suptitle(f"Nbins: {numberOfBins}")
514 # overlapping y labels:
515 fig.canvas.draw()
516 labels0 = [item.get_text() for item in ax0.get_yticklabels()]
517 labels0[0] = u''
518 ax0.set_yticklabels(labels0)
519 pdfPages.savefig(fig)
521 return
523 @staticmethod
524 def plot_a_b(aDict, bDict, pdfPages, bRange=3):
525 """Fig. 12 of Astier+19
527 Color display of a and b arrays fits, averaged over channels.
529 Parameters
530 ----------
531 aDict : `dict` [`numpy.array`]
532 Dictionary keyed by amp names containing the fitted 'a'
533 coefficients from the model in Eq. 20 of Astier+19 (if
534 `ptcFitType` is `FULLCOVARIANCE`).
536 bDict : `dict` [`numpy.array`]
537 Dictionary keyed by amp names containing the fitted 'b'
538 coefficients from the model in Eq. 20 of Astier+19 (if
539 `ptcFitType` is `FULLCOVARIANCE`).
541 pdfPages : `matplotlib.backends.backend_pdf.PdfPages`
542 PDF file where the plots will be saved.
544 bRange : `int`
545 Maximum lag for b arrays.
546 """
547 a, b = [], []
548 for amp in aDict:
549 if np.isnan(aDict[amp]).all():
550 continue
551 a.append(aDict[amp])
552 b.append(bDict[amp])
553 a = np.array(a).mean(axis=0)
554 b = np.array(b).mean(axis=0)
555 fig = plt.figure(figsize=(7, 11))
556 ax0 = fig.add_subplot(2, 1, 1)
557 im0 = ax0.imshow(np.abs(a.transpose()), origin='lower', norm=mpl.colors.LogNorm())
558 ax0.tick_params(axis='both', labelsize='x-large')
559 ax0.set_title(r'$|a|$', fontsize='x-large')
560 ax0.xaxis.set_ticks_position('bottom')
561 cb0 = plt.colorbar(im0)
562 cb0.ax.tick_params(labelsize='x-large')
564 ax1 = fig.add_subplot(2, 1, 2)
565 ax1.tick_params(axis='both', labelsize='x-large')
566 ax1.yaxis.set_major_locator(MaxNLocator(integer=True))
567 ax1.xaxis.set_major_locator(MaxNLocator(integer=True))
568 im1 = ax1.imshow(1e6*b[:bRange, :bRange].transpose(), origin='lower')
569 cb1 = plt.colorbar(im1)
570 cb1.ax.tick_params(labelsize='x-large')
571 ax1.set_title(r'$b \times 10^6$', fontsize='x-large')
572 ax1.xaxis.set_ticks_position('bottom')
573 plt.tight_layout()
574 pdfPages.savefig(fig)
576 return
578 @staticmethod
579 def ab_vs_dist(aDict, bDict, pdfPages, bRange=4):
580 """Fig. 13 of Astier+19.
582 Values of a and b arrays fits, averaged over amplifiers, as a
583 function of distance.
585 Parameters
586 ----------
587 aDict : `dict` [`numpy.array`]
588 Dictionary keyed by amp names containing the fitted 'a'
589 coefficients from the model in Eq. 20 of Astier+19 (if
590 `ptcFitType` is `FULLCOVARIANCE`).
592 bDict : `dict` [`numpy.array`]
593 Dictionary keyed by amp names containing the fitted 'b'
594 coefficients from the model in Eq. 20 of Astier+19 (if
595 `ptcFitType` is `FULLCOVARIANCE`).
597 pdfPages : `matplotlib.backends.backend_pdf.PdfPages`
598 PDF file where the plots will be saved.
600 bRange : `int`
601 Maximum lag for b arrays.
602 """
603 assert (len(aDict) == len(bDict))
604 a = []
605 for amp in aDict:
606 if np.isnan(aDict[amp]).all():
607 continue
608 a.append(aDict[amp])
609 a = np.array(a)
610 y = a.mean(axis=0)
611 sy = a.std(axis=0)/np.sqrt(len(aDict))
612 i, j = np.indices(y.shape)
613 upper = (i >= j).ravel()
614 r = np.sqrt(i**2 + j**2).ravel()
615 y = y.ravel()
616 sy = sy.ravel()
617 fig = plt.figure(figsize=(6, 9))
618 ax = fig.add_subplot(211)
619 ax.set_xlim([0.5, r.max()+1])
620 ax.errorbar(r[upper], y[upper], yerr=sy[upper], marker='o', linestyle='none', color='b',
621 label='$i>=j$')
622 ax.errorbar(r[~upper], y[~upper], yerr=sy[~upper], marker='o', linestyle='none', color='r',
623 label='$i<j$')
624 ax.legend(loc='upper center', fontsize='x-large')
625 ax.set_xlabel(r'$\sqrt{i^2+j^2}$', fontsize='x-large')
626 ax.set_ylabel(r'$a_{ij}$', fontsize='x-large')
627 ax.set_yscale('log')
628 ax.tick_params(axis='both', labelsize='x-large')
630 #
631 axb = fig.add_subplot(212)
632 b = []
633 for amp in bDict:
634 if np.isnan(bDict[amp]).all():
635 continue
636 b.append(bDict[amp])
637 b = np.array(b)
638 yb = b.mean(axis=0)
639 syb = b.std(axis=0)/np.sqrt(len(bDict))
640 ib, jb = np.indices(yb.shape)
641 upper = (ib > jb).ravel()
642 rb = np.sqrt(i**2 + j**2).ravel()
643 yb = yb.ravel()
644 syb = syb.ravel()
645 xmin = -0.2
646 xmax = bRange
647 axb.set_xlim([xmin, xmax+0.2])
648 cutu = (r > xmin) & (r < xmax) & (upper)
649 cutl = (r > xmin) & (r < xmax) & (~upper)
650 axb.errorbar(rb[cutu], yb[cutu], yerr=syb[cutu], marker='o', linestyle='none', color='b',
651 label='$i>=j$')
652 axb.errorbar(rb[cutl], yb[cutl], yerr=syb[cutl], marker='o', linestyle='none', color='r',
653 label='$i<j$')
654 plt.legend(loc='upper center', fontsize='x-large')
655 axb.set_xlabel(r'$\sqrt{i^2+j^2}$', fontsize='x-large')
656 axb.set_ylabel(r'$b_{ij}$', fontsize='x-large')
657 axb.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
658 axb.tick_params(axis='both', labelsize='x-large')
659 plt.tight_layout()
660 pdfPages.savefig(fig)
662 return
664 @staticmethod
665 def plotAcoeffsSum(aDict, bDict, pdfPages):
666 """Fig. 14. of Astier+19
668 Cumulative sum of a_ij as a function of maximum
669 separation. This plot displays the average over channels.
671 Parameters
672 ----------
673 aDict : `dict` [`numpy.array`]
674 Dictionary keyed by amp names containing the fitted 'a'
675 coefficients from the model in Eq. 20 of Astier+19 (if
676 `ptcFitType` is `FULLCOVARIANCE`).
678 bDict : `dict` [`numpy.array`]
679 Dictionary keyed by amp names containing the fitted 'b'
680 coefficients from the model in Eq. 20 of Astier+19 (if
681 `ptcFitType` is `FULLCOVARIANCE`).
683 pdfPages : `matplotlib.backends.backend_pdf.PdfPages`
684 PDF file where the plots will be saved.
685 """
686 assert (len(aDict) == len(bDict))
687 a, b = [], []
688 for amp in aDict:
689 if np.isnan(aDict[amp]).all() or np.isnan(bDict[amp]).all():
690 continue
691 a.append(aDict[amp])
692 b.append(bDict[amp])
693 a = np.array(a).mean(axis=0)
694 b = np.array(b).mean(axis=0)
695 fig = plt.figure(figsize=(7, 6))
696 w = 4*np.ones_like(a)
697 w[0, 1:] = 2
698 w[1:, 0] = 2
699 w[0, 0] = 1
700 wa = w*a
701 indices = range(1, a.shape[0]+1)
702 sums = [wa[0:n, 0:n].sum() for n in indices]
703 ax = plt.subplot(111)
704 ax.plot(indices, sums/sums[0], 'o', color='b')
705 ax.set_yscale('log')
706 ax.set_xlim(indices[0]-0.5, indices[-1]+0.5)
707 ax.set_ylim(None, 1.2)
708 ax.set_ylabel(r'$[\sum_{|i|<n\ &\ |j|<n} a_{ij}] / |a_{00}|$', fontsize='x-large')
709 ax.set_xlabel('n', fontsize='x-large')
710 ax.tick_params(axis='both', labelsize='x-large')
711 plt.tight_layout()
712 pdfPages.savefig(fig)
714 return
716 @staticmethod
717 def plotRelativeBiasACoeffs(aDict, aDictNoB, fullCovsModel, fullCovsModelNoB, signalElectrons,
718 gainDict, pdfPages, maxr=None):
719 """Fig. 15 in Astier+19.
721 Illustrates systematic bias from estimating 'a'
722 coefficients from the slope of correlations as opposed to the
723 full model in Astier+19.
725 Parameters
726 ----------
727 aDict : `dict`
728 Dictionary of 'a' matrices (Eq. 20, Astier+19), with amp
729 names as keys.
731 aDictNoB : `dict`
732 Dictionary of 'a' matrices ('b'= 0 in Eq. 20, Astier+19),
733 with amp names as keys.
735 fullCovsModel : `dict` [`str`, `list`]
736 Dictionary keyed by amp names containing covariances model
737 per mean flux.
739 fullCovsModelNoB : `dict` [`str`, `list`]
740 Dictionary keyed by amp names containing covariances model
741 (with 'b'=0 in Eq. 20 of Astier+19) per mean flux.
743 signalElectrons : `float`
744 Signal at which to evaluate the a_ij coefficients.
746 pdfPages : `matplotlib.backends.backend_pdf.PdfPages`
747 PDF file where the plots will be saved.
749 gainDict : `dict` [`str`, `float`]
750 Dicgionary keyed by amp names with the gains in e-/ADU.
752 maxr : `int`, optional
753 Maximum lag.
754 """
755 fig = plt.figure(figsize=(7, 11))
756 title = [f"'a' relative bias at {signalElectrons} e", "'a' relative bias (b=0)"]
757 data = [(aDict, fullCovsModel), (aDictNoB, fullCovsModelNoB)]
759 for k, pair in enumerate(data):
760 diffs = []
761 amean = []
762 for amp in pair[0]:
763 covModel = np.array(pair[1][amp])
764 if np.isnan(covModel).all():
765 continue
766 # Compute the "a" coefficients of the Antilogus+14
767 # (1402.0725) model as in Guyonnet+15 (1501.01577,
768 # eq. 16, the slope of cov/var at a given flux mu in
769 # electrons). Eq. 16 of 1501.01577 is an approximation
770 # to the more complete model in Astier+19
771 # (1905.08677).
772 var = covModel[0, 0, 0] # ADU^2
773 # For a result in electrons^-1, we have to use mu in electrons
774 aOld = covModel[0, :, :]/(var*signalElectrons)
775 a = pair[0][amp]
776 amean.append(a)
777 diffs.append((aOld-a))
778 amean = np.array(amean).mean(axis=0)
779 diff = np.array(diffs).mean(axis=0)
780 diff = diff/amean
781 diff = diff[:]
782 # The difference should be close to zero
783 diff[0, 0] = 0
784 if maxr is None:
785 maxr = diff.shape[0]
786 diff = diff[:maxr, :maxr]
787 ax0 = fig.add_subplot(2, 1, k+1)
788 im0 = ax0.imshow(diff.transpose(), origin='lower')
789 ax0.yaxis.set_major_locator(MaxNLocator(integer=True))
790 ax0.xaxis.set_major_locator(MaxNLocator(integer=True))
791 ax0.tick_params(axis='both', labelsize='x-large')
792 plt.colorbar(im0)
793 ax0.set_title(title[k])
795 plt.tight_layout()
796 pdfPages.savefig(fig)
798 return
800 def _plotStandardPtc(self, dataset, ptcFitType, pdfPages):
801 """Plot PTC, var/signal vs signal, linearity, and linearity residual
802 per amplifier.
804 Parameters
805 ----------
806 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
807 The dataset containing the means, variances, exposure
808 times, and mask.
810 ptcFitType : `str`
811 Type of the model fit to the PTC. Options:
812 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.
814 pdfPages : `matplotlib.backends.backend_pdf.PdfPages`
815 PDF file where the plots will be saved.
816 """
817 if ptcFitType == 'EXPAPPROXIMATION':
818 ptcFunc = funcAstier
819 stringTitle = (r"Var = $\frac{1}{2g^2a_{00}}(\exp (2a_{00} \mu g) - 1) + \frac{n_{00}}{g^2}$ ")
820 elif ptcFitType == 'POLYNOMIAL':
821 ptcFunc = funcPolynomial
822 for key in dataset.ptcFitPars:
823 deg = len(dataset.ptcFitPars[key]) - 1
824 break
825 stringTitle = r"Polynomial (degree: %g)" % (deg)
826 else:
827 raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n"
828 "Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.")
830 legendFontSize = 6.5
831 labelFontSize = 8
832 titleFontSize = 9
833 supTitleFontSize = 18
834 markerSize = 25
836 # General determination of the size of the plot grid
837 nAmps = len(dataset.ampNames)
838 if nAmps == 2:
839 nRows, nCols = 2, 1
840 nRows = np.sqrt(nAmps)
841 mantissa, _ = np.modf(nRows)
842 if mantissa > 0:
843 nRows = int(nRows) + 1
844 nCols = nRows
845 else:
846 nRows = int(nRows)
847 nCols = nRows
849 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
850 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
851 f3, ax3 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
853 for i, (amp, a, a2, a3) in enumerate(zip(dataset.ampNames, ax.flatten(), ax2.flatten(),
854 ax3.flatten())):
855 meanVecOriginal = np.ravel(np.array(dataset.rawMeans[amp]))
856 varVecOriginal = np.ravel(np.array(dataset.rawVars[amp]))
857 mask = np.ravel(np.array(dataset.expIdMask[amp]))
858 if np.sum(mask) == 0: # The whole amp is bad
859 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
860 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
861 a3.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
862 continue
863 else:
864 mask = mask.astype(bool)
865 meanVecFinal = meanVecOriginal[mask]
866 varVecFinal = varVecOriginal[mask]
867 meanVecOutliers = meanVecOriginal[np.invert(mask)]
868 varVecOutliers = varVecOriginal[np.invert(mask)]
869 pars, parsErr = np.array(dataset.ptcFitPars[amp]), np.array(dataset.ptcFitParsError[amp])
870 ptcRedChi2 = dataset.ptcFitChiSq[amp]
871 if ptcFitType == 'EXPAPPROXIMATION':
872 if len(meanVecFinal):
873 ptcA00, ptcA00error = pars[0], parsErr[0]
874 ptcGain, ptcGainError = pars[1], parsErr[1]
875 ptcNoise = np.sqrt((pars[2])) # pars[2] is in (e-)^2
876 ptcNoiseAdu = ptcNoise*(1./ptcGain)
877 ptcNoiseError = 0.5*(parsErr[2]/np.fabs(pars[2]))*np.sqrt(np.fabs(pars[2]))
878 stringLegend = (f"a00: {ptcA00:.2e}+/-{ptcA00error:.2e} 1/e"
879 f"\nGain: {ptcGain:.4}+/-{ptcGainError:.2e} e/ADU"
880 f"\nNoise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e\n"
881 r"$\chi^2_{\rm{red}}$: " + f"{ptcRedChi2:.4}"
882 f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ")
884 if ptcFitType == 'POLYNOMIAL':
885 if len(meanVecFinal):
886 ptcGain, ptcGainError = 1./pars[1], np.fabs(1./pars[1])*(parsErr[1]/pars[1])
887 ptcNoiseAdu = np.sqrt((pars[0])) # pars[0] is in ADU^2
888 ptcNoise = ptcNoiseAdu*ptcGain
889 ptcNoiseError = (0.5*(parsErr[0]/np.fabs(pars[0]))*(np.sqrt(np.fabs(pars[0]))))*ptcGain
890 stringLegend = (f"Gain: {ptcGain:.4}+/-{ptcGainError:.2e} e/ADU\n"
891 f"Noise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e\n"
892 r"$\chi^2_{\rm{red}}$: " + f"{ptcRedChi2:.4}"
893 f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ")
894 a.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
895 a.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize)
896 a.tick_params(labelsize=11)
897 a.set_xscale('linear')
898 a.set_yscale('linear')
900 a2.set_xlabel(r'Mean Signal ($\mu$, ADU)', fontsize=labelFontSize)
901 a2.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize)
902 a2.tick_params(labelsize=11)
903 a2.set_xscale('log')
904 a2.set_yscale('log')
906 a3.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
907 a3.set_ylabel(r'Variance/$\mu$ (ADU)', fontsize=labelFontSize)
908 a3.tick_params(labelsize=11)
909 a3.set_xscale('log')
910 a3.set_yscale('linear')
911 minMeanVecFinal = np.nanmin(meanVecFinal)
912 maxMeanVecFinal = np.nanmax(meanVecFinal)
913 meanVecFit = np.linspace(minMeanVecFinal, maxMeanVecFinal, 100*len(meanVecFinal))
914 minMeanVecOriginal = np.nanmin(meanVecOriginal)
915 maxMeanVecOriginal = np.nanmax(meanVecOriginal)
916 deltaXlim = maxMeanVecOriginal - minMeanVecOriginal
917 a.plot(meanVecFit, ptcFunc(pars, meanVecFit), color='red')
918 a.plot(meanVecFinal, ptcNoiseAdu**2 + (1./ptcGain)*meanVecFinal, color='green',
919 linestyle='--')
920 a.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize)
921 a.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize)
922 a.text(0.03, 0.66, stringLegend, transform=a.transAxes, fontsize=legendFontSize)
923 a.set_title(amp, fontsize=titleFontSize)
924 a.set_xlim([minMeanVecOriginal - 0.2*deltaXlim, maxMeanVecOriginal + 0.2*deltaXlim])
926 # Same, but in log-scale
927 a2.plot(meanVecFit, ptcFunc(pars, meanVecFit), color='red')
928 a2.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize)
929 a2.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize)
930 a2.text(0.03, 0.66, stringLegend, transform=a2.transAxes, fontsize=legendFontSize)
931 a2.set_title(amp, fontsize=titleFontSize)
932 a2.set_xlim([minMeanVecOriginal, maxMeanVecOriginal])
934 # Var/mu vs mu
935 a3.plot(meanVecFit, ptcFunc(pars, meanVecFit)/meanVecFit, color='red')
936 a3.scatter(meanVecFinal, varVecFinal/meanVecFinal, c='blue', marker='o', s=markerSize)
937 a3.scatter(meanVecOutliers, varVecOutliers/meanVecOutliers, c='magenta', marker='s',
938 s=markerSize)
939 a3.text(0.05, 0.1, stringLegend, transform=a3.transAxes, fontsize=legendFontSize)
940 a3.set_title(amp, fontsize=titleFontSize)
941 a3.set_xlim([minMeanVecOriginal - 0.2*deltaXlim, maxMeanVecOriginal + 0.2*deltaXlim])
942 f.suptitle("PTC \n Fit: " + stringTitle, fontsize=supTitleFontSize)
943 pdfPages.savefig(f)
944 f2.suptitle("PTC (log-log)", fontsize=supTitleFontSize)
945 pdfPages.savefig(f2)
946 f3.suptitle(r"Var/$\mu$", fontsize=supTitleFontSize)
947 pdfPages.savefig(f3)
949 return
951 def _plotLinearizer(self, dataset, linearizer, pdfPages):
952 """Plot linearity and linearity residual per amplifier
954 Parameters
955 ----------
956 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
957 The dataset containing the means, variances, exposure
958 times, and mask.
960 linearizer : `lsst.ip.isr.Linearizer`
961 Linearizer object
962 """
963 legendFontSize = 7
964 labelFontSize = 7
965 titleFontSize = 9
966 supTitleFontSize = 18
968 # General determination of the size of the plot grid
969 nAmps = len(dataset.ampNames)
970 if nAmps == 2:
971 nRows, nCols = 2, 1
972 nRows = np.sqrt(nAmps)
973 mantissa, _ = np.modf(nRows)
974 if mantissa > 0:
975 nRows = int(nRows) + 1
976 nCols = nRows
977 else:
978 nRows = int(nRows)
979 nCols = nRows
981 # Plot mean vs time (f1), and fractional residuals (f2)
982 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
983 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
984 for i, (amp, a, a2) in enumerate(zip(dataset.ampNames, ax.flatten(), ax2.flatten())):
985 mask = dataset.expIdMask[amp]
986 if np.sum(mask) == 0: # Bad amp
987 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
988 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
989 continue
990 else:
991 mask = mask.astype(bool)
992 meanVecFinal = np.array(dataset.rawMeans[amp])[mask]
993 timeVecFinal = np.array(dataset.rawExpTimes[amp])[mask]
995 a.set_xlabel('Time (sec)', fontsize=labelFontSize)
996 a.set_ylabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
997 a.tick_params(labelsize=labelFontSize)
998 a.set_xscale('linear')
999 a.set_yscale('linear')
1001 a2.axhline(y=0, color='k')
1002 a2.axvline(x=0, color='k', linestyle='-')
1003 a2.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
1004 a2.set_ylabel('Fractional nonlinearity (%)', fontsize=labelFontSize)
1005 a2.tick_params(labelsize=labelFontSize)
1006 a2.set_xscale('linear')
1007 a2.set_yscale('linear')
1009 pars, parsErr = linearizer.fitParams[amp], linearizer.fitParamsErr[amp]
1010 k0, k0Error = pars[0], parsErr[0]
1011 k1, k1Error = pars[1], parsErr[1]
1012 k2, k2Error = pars[2], parsErr[2]
1013 linRedChi2 = linearizer.fitChiSq[amp]
1014 stringLegend = (f"k0: {k0:.4}+/-{k0Error:.2e} ADU\nk1: {k1:.4}+/-{k1Error:.2e} ADU/t"
1015 f"\nk2: {k2:.2e}+/-{k2Error:.2e} ADU/t^2\n"
1016 r"$\chi^2_{\rm{red}}$: " + f"{linRedChi2:.4}")
1017 a.scatter(timeVecFinal, meanVecFinal)
1018 a.plot(timeVecFinal, funcPolynomial(pars, timeVecFinal), color='red')
1019 a.text(0.03, 0.75, stringLegend, transform=a.transAxes, fontsize=legendFontSize)
1020 a.set_title(f"{amp}", fontsize=titleFontSize)
1022 linearPart = k0 + k1*timeVecFinal
1023 fracLinRes = 100*(linearPart - meanVecFinal)/linearPart
1024 a2.plot(meanVecFinal, fracLinRes, c='g')
1025 a2.set_title(f"{amp}", fontsize=titleFontSize)
1027 f.suptitle("Linearity \n Fit: Polynomial (degree: %g)"
1028 % (len(pars)-1),
1029 fontsize=supTitleFontSize)
1030 f2.suptitle(r"Fractional NL residual" "\n"
1031 r"$100\times \frac{(k_0 + k_1*Time-\mu)}{k_0+k_1*Time}$",
1032 fontsize=supTitleFontSize)
1033 pdfPages.savefig(f)
1034 pdfPages.savefig(f2)
1036 @staticmethod
1037 def findGroups(x, maxDiff):
1038 """Group data into bins, with at most maxDiff distance between bins.
1040 Parameters
1041 ----------
1042 x : `list`
1043 Data to bin.
1045 maxDiff : `int`
1046 Maximum distance between bins.
1048 Returns
1049 -------
1050 index : `list`
1051 Bin indices.
1052 """
1053 ix = np.argsort(x)
1054 xsort = np.sort(x)
1055 index = np.zeros_like(x, dtype=np.int32)
1056 xc = xsort[0]
1057 group = 0
1058 ng = 1
1060 for i in range(1, len(ix)):
1061 xval = xsort[i]
1062 if (xval - xc < maxDiff):
1063 xc = (ng*xc + xval)/(ng+1)
1064 ng += 1
1065 index[ix[i]] = group
1066 else:
1067 group += 1
1068 ng = 1
1069 index[ix[i]] = group
1070 xc = xval
1072 return index
1074 @staticmethod
1075 def indexForBins(x, nBins):
1076 """Builds an index with regular binning. The result can be fed into
1077 binData.
1079 Parameters
1080 ----------
1081 x : `numpy.array`
1082 Data to bin.
1083 nBins : `int`
1084 Number of bin.
1086 Returns
1087 -------
1088 np.digitize(x, bins): `numpy.array`
1089 Bin indices.
1090 """
1091 bins = np.linspace(x.min(), x.max() + abs(x.max() * 1e-7), nBins + 1)
1092 return np.digitize(x, bins)
1094 @staticmethod
1095 def binData(x, y, binIndex, wy=None):
1096 """Bin data (usually for display purposes).
1098 Parameters
1099 ----------
1100 x : `numpy.array`
1101 Data to bin.
1103 y : `numpy.array`
1104 Data to bin.
1106 binIdex : `list`
1107 Bin number of each datum.
1109 wy : `numpy.array`
1110 Inverse rms of each datum to use when averaging (the
1111 actual weight is wy**2).
1113 Returns
1114 -------
1115 xbin : `numpy.array`
1116 Binned data in x.
1118 ybin : `numpy.array`
1119 Binned data in y.
1121 wybin : `numpy.array`
1122 Binned weights in y, computed from wy's in each bin.
1124 sybin : `numpy.array`
1125 Uncertainty on the bin average, considering actual
1126 scatter, and ignoring weights.
1127 """
1128 if wy is None:
1129 wy = np.ones_like(x)
1130 binIndexSet = set(binIndex)
1131 w2 = wy*wy
1132 xw2 = x*(w2)
1133 xbin = np.array([xw2[binIndex == i].sum()/w2[binIndex == i].sum() for i in binIndexSet])
1135 yw2 = y*w2
1136 ybin = np.array([yw2[binIndex == i].sum()/w2[binIndex == i].sum() for i in binIndexSet])
1138 wybin = np.sqrt(np.array([w2[binIndex == i].sum() for i in binIndexSet]))
1139 sybin = np.array([y[binIndex == i].std()/np.sqrt(np.array([binIndex == i]).sum())
1140 for i in binIndexSet])
1142 return xbin, ybin, wybin, sybin