Coverage for python/lsst/cp/pipe/ptc/plotPtc.py: 5%
Shortcuts on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
Shortcuts on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
23__all__ = ['PlotPhotonTransferCurveTask']
25import logging
26import numpy as np
27import matplotlib.pyplot as plt
28import matplotlib as mpl
29from matplotlib import gridspec
30import os
31from matplotlib.backends.backend_pdf import PdfPages
33import lsst.ip.isr as isr
35from lsst.cp.pipe.utils import (funcAstier, funcPolynomial,
36 calculateWeightedReducedChi2)
37from matplotlib.ticker import MaxNLocator
38from lsst.cp.pipe.ptc.astierCovPtcUtils import getFitDataFromCovariances
39from lsst.ip.isr import PhotonTransferCurveDataset
42class PlotPhotonTransferCurveTask():
43 """A class to plot the dataset from MeasurePhotonTransferCurveTask.
45 Parameters
46 ----------
48 datasetFileName : `str`
49 datasetPtc (lsst.ip.isr.PhotonTransferCurveDataset) file
50 name (fits).
52 linearizerFileName : `str`, optional
53 linearizer (isr.linearize.Linearizer) file
54 name (fits).
56 outDir : `str`, optional
57 Path to the output directory where the final PDF will
58 be placed.
60 detNum : `int`, optional
61 Detector number.
63 signalElectronsRelativeA : `float`, optional
64 Signal value for relative systematic bias between different
65 methods of estimating a_ij (Fig. 15 of Astier+19).
67 plotNormalizedCovariancesNumberOfBins : `float`, optional
68 Number of bins in `plotNormalizedCovariancesNumber` function
69 (Fig. 8, 10., of Astier+19).
71 Notes
72 -----
73 The plotting code in this file is almost identical to the code in
74 `plotPtcGen2.py`. If further changes are implemented in this file,
75 `plotPtcGen2.py` needs to be updated accordingly, and vice versa.
76 The file `plotPtcGen2.py` helps with maintaining backwards
77 compatibility with gen2 as we transition to gen3; the code
78 duplication is meant to only last for few month from now
79 (Jan, 2021). At that point only this file, `plotPtc.py`, will
80 remain.
81 """
83 def __init__(self, datasetFilename, linearizerFileName=None,
84 outDir='.', detNum=999, signalElectronsRelativeA=75000,
85 plotNormalizedCovariancesNumberOfBins=10):
86 self.datasetFilename = datasetFilename
87 self.linearizerFileName = linearizerFileName
88 self.detNum = detNum
89 self.signalElectronsRelativeA = signalElectronsRelativeA
90 self.plotNormalizedCovariancesNumberOfBins = plotNormalizedCovariancesNumberOfBins
91 self.outDir = outDir
93 def runDataRef(self):
94 """Run the Photon Transfer Curve (PTC) plotting measurement task.
95 """
96 datasetFile = self.datasetFilename
97 datasetPtc = PhotonTransferCurveDataset.readFits(datasetFile)
99 dirname = self.outDir
100 if not os.path.exists(dirname):
101 os.makedirs(dirname)
103 detNum = self.detNum
104 filename = f"PTC_det{detNum}.pdf"
105 filenameFull = os.path.join(dirname, filename)
107 if self.linearizerFileName:
108 linearizer = isr.linearize.Linearizer.readFits(self.linearizerFileName)
109 else:
110 linearizer = None
111 self.run(filenameFull, datasetPtc, linearizer=linearizer, log=logging.getLogger(__name__))
113 return
115 def run(self, filenameFull, datasetPtc, linearizer=None, log=None):
116 """Make the plots for the PTC task"""
117 ptcFitType = datasetPtc.ptcFitType
118 with PdfPages(filenameFull) as pdfPages:
119 if ptcFitType in ["FULLCOVARIANCE", ]:
120 self.covAstierMakeAllPlots(datasetPtc, pdfPages, log=log)
121 elif ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]:
122 self._plotStandardPtc(datasetPtc, ptcFitType, pdfPages)
123 else:
124 raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n"
125 "Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.")
126 if linearizer:
127 self._plotLinearizer(datasetPtc, linearizer, pdfPages)
129 return
131 def covAstierMakeAllPlots(self, dataset, pdfPages,
132 log=None):
133 """Make plots for MeasurePhotonTransferCurve task when
134 doCovariancesAstier=True.
136 This function call other functions that mostly reproduce the
137 plots in Astier+19. Most of the code is ported from Pierre
138 Astier's repository https://github.com/PierreAstier/bfptc
140 Parameters
141 ----------
142 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
143 The dataset containing the necessary information to
144 produce the plots.
146 pdfPages : `matplotlib.backends.backend_pdf.PdfPages`
147 PDF file where the plots will be saved.
149 log : `logging.Logger`, optional
150 Logger to handle messages
151 """
152 mu = dataset.finalMeans
153 # dictionaries with ampNames as keys
154 fullCovs = dataset.covariances
155 fullCovsModel = dataset.covariancesModel
156 fullCovWeights = dataset.covariancesSqrtWeights
157 aDict = dataset.aMatrix
158 bDict = dataset.bMatrix
159 fullCovsNoB = dataset.covariances
160 fullCovsModelNoB = dataset.covariancesModelNoB
161 fullCovWeightsNoB = dataset.covariancesSqrtWeights
162 aDictNoB = dataset.aMatrixNoB
163 gainDict = dataset.gain
164 noiseDict = dataset.noise
166 self.plotCovariances(mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, fullCovsModelNoB,
167 fullCovWeightsNoB, gainDict, noiseDict, aDict, bDict, pdfPages)
168 self.plotNormalizedCovariances(0, 0, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB,
169 fullCovsModelNoB, fullCovWeightsNoB, pdfPages,
170 offset=0.01, topPlot=True,
171 numberOfBins=self.plotNormalizedCovariancesNumberOfBins,
172 log=log)
173 self.plotNormalizedCovariances(0, 1, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB,
174 fullCovsModelNoB, fullCovWeightsNoB, pdfPages,
175 numberOfBins=self.plotNormalizedCovariancesNumberOfBins,
176 log=log)
177 self.plotNormalizedCovariances(1, 0, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB,
178 fullCovsModelNoB, fullCovWeightsNoB, pdfPages,
179 numberOfBins=self.plotNormalizedCovariancesNumberOfBins,
180 log=log)
181 self.plot_a_b(aDict, bDict, pdfPages)
182 self.ab_vs_dist(aDict, bDict, pdfPages, bRange=4)
183 self.plotAcoeffsSum(aDict, bDict, pdfPages)
184 self.plotRelativeBiasACoeffs(aDict, aDictNoB, fullCovsModel, fullCovsModelNoB,
185 self.signalElectronsRelativeA, gainDict, pdfPages, maxr=4)
187 return
189 @staticmethod
190 def plotCovariances(mu, covs, covsModel, covsWeights, covsNoB, covsModelNoB, covsWeightsNoB,
191 gainDict, noiseDict, aDict, bDict, pdfPages):
192 """Plot covariances and models: Cov00, Cov10, Cov01.
194 Figs. 6 and 7 of Astier+19
196 Parameters
197 ----------
198 mu : `dict` [`str`, `list`]
199 Dictionary keyed by amp name with mean signal values.
201 covs : `dict` [`str`, `list`]
202 Dictionary keyed by amp names containing a list of measued
203 covariances per mean flux.
205 covsModel : `dict` [`str`, `list`]
206 Dictionary keyed by amp names containinging covariances
207 model (Eq. 20 of Astier+19) per mean flux.
209 covsWeights : `dict` [`str`, `list`]
210 Dictionary keyed by amp names containinging sqrt. of
211 covariances weights.
213 covsNoB : `dict` [`str`, `list`]
214 Dictionary keyed by amp names containing a list of measued
215 covariances per mean flux ('b'=0 in Astier+19).
217 covsModelNoB : `dict` [`str`, `list`]
218 Dictionary keyed by amp names containing covariances model
219 (with 'b'=0 in Eq. 20 of Astier+19) per mean flux.
221 covsWeightsNoB : `dict` [`str`, `list`]
222 Dictionary keyed by amp names containing sqrt. of
223 covariances weights ('b' = 0 in Eq. 20 of Astier+19).
225 gainDict : `dict` [`str`, `float`]
226 Dictionary keyed by amp names containing the gains in e-/ADU.
228 noiseDict : `dict` [`str`, `float`]
229 Dictionary keyed by amp names containing the rms redout
230 noise in e-.
232 aDict : `dict` [`str`, `numpy.array`]
233 Dictionary keyed by amp names containing 'a' coefficients
234 (Eq. 20 of Astier+19).
236 bDict : `dict` [`str`, `numpy.array`]
237 Dictionary keyed by amp names containing 'b' coefficients
238 (Eq. 20 of Astier+19).
240 pdfPages : `matplotlib.backends.backend_pdf.PdfPages`
241 PDF file where the plots will be saved.
242 """
243 legendFontSize = 6.5
244 labelFontSize = 7
245 titleFontSize = 9
246 supTitleFontSize = 18
247 markerSize = 25
249 nAmps = len(covs)
250 if nAmps == 2:
251 nRows, nCols = 2, 1
252 nRows = np.sqrt(nAmps)
253 mantissa, _ = np.modf(nRows)
254 if mantissa > 0:
255 nRows = int(nRows) + 1
256 nCols = nRows
257 else:
258 nRows = int(nRows)
259 nCols = nRows
261 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
262 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
263 fResCov00, axResCov00 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row',
264 figsize=(13, 10))
265 fCov01, axCov01 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
266 fCov10, axCov10 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
268 assert(len(covsModel) == nAmps)
269 assert(len(covsWeights) == nAmps)
271 assert(len(covsNoB) == nAmps)
272 assert(len(covsModelNoB) == nAmps)
273 assert(len(covsWeightsNoB) == nAmps)
275 for i, (amp, a, a2, aResVar, a3, a4) in enumerate(zip(covs, ax.flatten(),
276 ax2.flatten(), axResCov00.flatten(),
277 axCov01.flatten(), axCov10.flatten())):
279 muAmp, cov, model, weight = mu[amp], covs[amp], covsModel[amp], covsWeights[amp]
280 if not np.isnan(np.array(cov)).all(): # If all the entries are np.nan, this is a bad amp.
281 aCoeffs, bCoeffs = np.array(aDict[amp]), np.array(bDict[amp])
282 gain, noise = gainDict[amp], noiseDict[amp]
283 (meanVecFinal, varVecFinal, varVecModelFinal,
284 varWeightsFinal, _) = getFitDataFromCovariances(0, 0, muAmp, cov, model, weight,
285 returnMasked=True)
287 # Get weighted reduced chi2
288 chi2FullModelVar = calculateWeightedReducedChi2(varVecFinal, varVecModelFinal,
289 varWeightsFinal, len(meanVecFinal), 4)
291 (meanVecFinalCov01, varVecFinalCov01, varVecModelFinalCov01,
292 _, _) = getFitDataFromCovariances(0, 0, muAmp, cov, model, weight, returnMasked=True)
294 (meanVecFinalCov10, varVecFinalCov10, varVecModelFinalCov10,
295 _, _) = getFitDataFromCovariances(1, 0, muAmp, cov, model, weight, returnMasked=True)
297 # cuadratic fit for residuals below
298 par2 = np.polyfit(meanVecFinal, varVecFinal, 2, w=varWeightsFinal)
299 varModelFinalQuadratic = np.polyval(par2, meanVecFinal)
300 chi2QuadModelVar = calculateWeightedReducedChi2(varVecFinal, varModelFinalQuadratic,
301 varWeightsFinal, len(meanVecFinal), 3)
303 # fit with no 'b' coefficient (c = a*b in Eq. 20 of Astier+19)
304 covNoB, modelNoB, weightNoB = covsNoB[amp], covsModelNoB[amp], covsWeightsNoB[amp]
305 (meanVecFinalNoB, varVecFinalNoB, varVecModelFinalNoB,
306 varWeightsFinalNoB, _) = getFitDataFromCovariances(0, 0, muAmp, covNoB, modelNoB,
307 weightNoB, returnMasked=True)
309 chi2FullModelNoBVar = calculateWeightedReducedChi2(varVecFinalNoB, varVecModelFinalNoB,
310 varWeightsFinalNoB, len(meanVecFinalNoB),
311 3)
312 stringLegend = (f"Gain: {gain:.4} e/ADU \n"
313 f"Noise: {noise:.4} e \n"
314 + r"$a_{00}$: %.3e 1/e"%aCoeffs[0, 0] + "\n"
315 + r"$b_{00}$: %.3e 1/e"%bCoeffs[0, 0]
316 + f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ")
317 minMeanVecFinal = np.nanmin(meanVecFinal)
318 maxMeanVecFinal = np.nanmax(meanVecFinal)
319 deltaXlim = maxMeanVecFinal - minMeanVecFinal
321 a.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
322 a.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize)
323 a.tick_params(labelsize=11)
324 a.set_xscale('linear')
325 a.set_yscale('linear')
326 a.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize)
327 a.plot(meanVecFinal, varVecModelFinal, color='red', lineStyle='-')
328 a.text(0.03, 0.7, stringLegend, transform=a.transAxes, fontsize=legendFontSize)
329 a.set_title(amp, fontsize=titleFontSize)
330 a.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim])
332 # Same as above, but in log-scale
333 a2.set_xlabel(r'Mean Signal ($\mu$, ADU)', fontsize=labelFontSize)
334 a2.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize)
335 a2.tick_params(labelsize=11)
336 a2.set_xscale('log')
337 a2.set_yscale('log')
338 a2.plot(meanVecFinal, varVecModelFinal, color='red', lineStyle='-')
339 a2.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize)
340 a2.text(0.03, 0.7, stringLegend, transform=a2.transAxes, fontsize=legendFontSize)
341 a2.set_title(amp, fontsize=titleFontSize)
342 a2.set_xlim([minMeanVecFinal, maxMeanVecFinal])
344 # Residuals var - model
345 aResVar.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
346 aResVar.set_ylabel(r'Residuals (ADU$^2$)', fontsize=labelFontSize)
347 aResVar.tick_params(labelsize=11)
348 aResVar.set_xscale('linear')
349 aResVar.set_yscale('linear')
350 aResVar.plot(meanVecFinal, varVecFinal - varVecModelFinal, color='blue', lineStyle='-',
351 label=r'Full fit ($\chi_{\rm{red}}^2$: %g)'%chi2FullModelVar)
352 aResVar.plot(meanVecFinal, varVecFinal - varModelFinalQuadratic, color='red', lineStyle='-',
353 label=r'Quadratic fit ($\chi_{\rm{red}}^2$: %g)'%chi2QuadModelVar)
354 aResVar.plot(meanVecFinalNoB, varVecFinalNoB - varVecModelFinalNoB, color='green',
355 lineStyle='-',
356 label=r'Full fit (b=0) ($\chi_{\rm{red}}^2$: %g)'%chi2FullModelNoBVar)
357 aResVar.axhline(color='black')
358 aResVar.set_title(amp, fontsize=titleFontSize)
359 aResVar.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim])
360 aResVar.legend(fontsize=7)
362 a3.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
363 a3.set_ylabel(r'Cov01 (ADU$^2$)', fontsize=labelFontSize)
364 a3.tick_params(labelsize=11)
365 a3.set_xscale('linear')
366 a3.set_yscale('linear')
367 a3.scatter(meanVecFinalCov01, varVecFinalCov01, c='blue', marker='o', s=markerSize)
368 a3.plot(meanVecFinalCov01, varVecModelFinalCov01, color='red', lineStyle='-')
369 a3.set_title(amp, fontsize=titleFontSize)
370 a3.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim])
372 a4.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
373 a4.set_ylabel(r'Cov10 (ADU$^2$)', fontsize=labelFontSize)
374 a4.tick_params(labelsize=11)
375 a4.set_xscale('linear')
376 a4.set_yscale('linear')
377 a4.scatter(meanVecFinalCov10, varVecFinalCov10, c='blue', marker='o', s=markerSize)
378 a4.plot(meanVecFinalCov10, varVecModelFinalCov10, color='red', lineStyle='-')
379 a4.set_title(amp, fontsize=titleFontSize)
380 a4.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim])
382 else:
383 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
384 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
385 a3.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
386 a4.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
388 f.suptitle("PTC from covariances as in Astier+19 \n Fit: Eq. 20, Astier+19",
389 fontsize=supTitleFontSize)
390 pdfPages.savefig(f)
391 f2.suptitle("PTC from covariances as in Astier+19 (log-log) \n Fit: Eq. 20, Astier+19",
392 fontsize=supTitleFontSize)
393 pdfPages.savefig(f2)
394 fResCov00.suptitle("Residuals (data-model) for Cov00 (Var)", fontsize=supTitleFontSize)
395 pdfPages.savefig(fResCov00)
396 fCov01.suptitle("Cov01 as in Astier+19 (nearest parallel neighbor covariance) \n"
397 " Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize)
398 pdfPages.savefig(fCov01)
399 fCov10.suptitle("Cov10 as in Astier+19 (nearest serial neighbor covariance) \n"
400 "Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize)
401 pdfPages.savefig(fCov10)
403 return
405 def plotNormalizedCovariances(self, i, j, inputMu, covs, covsModel, covsWeights, covsNoB, covsModelNoB,
406 covsWeightsNoB, pdfPages, offset=0.004,
407 numberOfBins=10, plotData=True, topPlot=False, log=None):
408 """Plot C_ij/mu vs mu.
410 Figs. 8, 10, and 11 of Astier+19
412 Parameters
413 ----------
414 i : `int`
415 Covariane lag
417 j : `int`
418 Covariance lag
420 inputMu : `dict` [`str`, `list`]
421 Dictionary keyed by amp name with mean signal values.
423 covs : `dict` [`str`, `list`]
424 Dictionary keyed by amp names containing a list of measued
425 covariances per mean flux.
427 covsModel : `dict` [`str`, `list`]
428 Dictionary keyed by amp names containinging covariances
429 model (Eq. 20 of Astier+19) per mean flux.
431 covsWeights : `dict` [`str`, `list`]
432 Dictionary keyed by amp names containinging sqrt. of
433 covariances weights.
435 covsNoB : `dict` [`str`, `list`]
436 Dictionary keyed by amp names containing a list of measued
437 covariances per mean flux ('b'=0 in Astier+19).
439 covsModelNoB : `dict` [`str`, `list`]
440 Dictionary keyed by amp names containing covariances model
441 (with 'b'=0 in Eq. 20 of Astier+19) per mean flux.
443 covsWeightsNoB : `dict` [`str`, `list`]
444 Dictionary keyed by amp names containing sqrt. of
445 covariances weights ('b' = 0 in Eq. 20 of Astier+19).
447 expIdMask : `dict` [`str`, `list`]
448 Dictionary keyed by amp names containing the masked
449 exposure pairs.
451 pdfPages : `matplotlib.backends.backend_pdf.PdfPages`
452 PDF file where the plots will be saved.
454 offset : `float`, optional
455 Constant offset factor to plot covariances in same panel
456 (so they don't overlap).
458 numberOfBins : `int`, optional
459 Number of bins for top and bottom plot.
461 plotData : `bool`, optional
462 Plot the data points?
464 topPlot : `bool`, optional
465 Plot the top plot with the covariances, and the bottom
466 plot with the model residuals?
468 log : `logging.Logger`, optional
469 Logger to handle messages.
470 """
471 if not topPlot:
472 fig = plt.figure(figsize=(8, 10))
473 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
474 gs.update(hspace=0)
475 ax0 = plt.subplot(gs[0])
476 plt.setp(ax0.get_xticklabels(), visible=False)
477 else:
478 fig = plt.figure(figsize=(8, 8))
479 ax0 = plt.subplot(111)
480 ax0.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
481 ax0.tick_params(axis='both', labelsize='x-large')
482 mue, rese, wce = [], [], []
483 mueNoB, reseNoB, wceNoB = [], [], []
484 for counter, amp in enumerate(covs):
485 muAmp, fullCov, fullCovModel, fullCovWeight = (inputMu[amp], covs[amp], covsModel[amp],
486 covsWeights[amp])
487 if len(fullCov) == 0:
488 continue
489 mu, cov, model, weightCov, _ = getFitDataFromCovariances(i, j, muAmp, fullCov, fullCovModel,
490 fullCovWeight, divideByMu=True,
491 returnMasked=True)
493 mue += list(mu)
494 rese += list(cov - model)
495 wce += list(weightCov)
497 fullCovNoB, fullCovModelNoB, fullCovWeightNoB = (covsNoB[amp], covsModelNoB[amp],
498 covsWeightsNoB[amp])
499 if len(fullCovNoB) == 0:
500 continue
501 (muNoB, covNoB, modelNoB,
502 weightCovNoB, _) = getFitDataFromCovariances(i, j, muAmp, fullCovNoB, fullCovModelNoB,
503 fullCovWeightNoB, divideByMu=True,
504 returnMasked=True)
506 mueNoB += list(muNoB)
507 reseNoB += list(covNoB - modelNoB)
508 wceNoB += list(weightCovNoB)
510 # the corresponding fit
511 fit_curve, = plt.plot(mu, model + counter*offset, '-', linewidth=4.0)
512 # bin plot. len(mu) = no binning
513 gind = self.indexForBins(mu, numberOfBins)
515 xb, yb, wyb, sigyb = self.binData(mu, cov, gind, weightCov)
516 plt.errorbar(xb, yb+counter*offset, yerr=sigyb, marker='o', linestyle='none', markersize=6.5,
517 color=fit_curve.get_color(), label=f"{amp} (N: {len(mu)})")
518 # plot the data
519 if plotData:
520 points, = plt.plot(mu, cov + counter*offset, '.', color=fit_curve.get_color())
521 plt.legend(loc='upper right', fontsize=8)
522 # end loop on amps
523 mue = np.array(mue)
524 rese = np.array(rese)
525 wce = np.array(wce)
526 mueNoB = np.array(mueNoB)
527 reseNoB = np.array(reseNoB)
528 wceNoB = np.array(wceNoB)
530 plt.xlabel(r"$\mu (el)$", fontsize='x-large')
531 plt.ylabel(r"$Cov{%d%d}/\mu + Cst (el)$"%(i, j), fontsize='x-large')
532 if (not topPlot):
533 gind = self.indexForBins(mue, numberOfBins)
534 xb, yb, wyb, sigyb = self.binData(mue, rese, gind, wce)
536 ax1 = plt.subplot(gs[1], sharex=ax0)
537 ax1.errorbar(xb, yb, yerr=sigyb, marker='o', linestyle='none', label='Full fit')
538 gindNoB = self.indexForBins(mueNoB, numberOfBins)
539 xb2, yb2, wyb2, sigyb2 = self.binData(mueNoB, reseNoB, gindNoB, wceNoB)
541 ax1.errorbar(xb2, yb2, yerr=sigyb2, marker='o', linestyle='none', label='b = 0')
542 ax1.tick_params(axis='both', labelsize='x-large')
543 plt.legend(loc='upper left', fontsize='large')
544 # horizontal line at zero
545 plt.plot(xb, [0]*len(xb), '--', color='k')
546 plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
547 plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
548 plt.xlabel(r'$\mu (el)$', fontsize='x-large')
549 plt.ylabel(r'$Cov{%d%d}/\mu$ -model (el)'%(i, j), fontsize='x-large')
550 plt.tight_layout()
551 plt.suptitle(f"Nbins: {numberOfBins}")
552 # overlapping y labels:
553 fig.canvas.draw()
554 labels0 = [item.get_text() for item in ax0.get_yticklabels()]
555 labels0[0] = u''
556 ax0.set_yticklabels(labels0)
557 pdfPages.savefig(fig)
559 return
561 @staticmethod
562 def plot_a_b(aDict, bDict, pdfPages, bRange=3):
563 """Fig. 12 of Astier+19
565 Color display of a and b arrays fits, averaged over channels.
567 Parameters
568 ----------
569 aDict : `dict` [`numpy.array`]
570 Dictionary keyed by amp names containing the fitted 'a'
571 coefficients from the model in Eq. 20 of Astier+19 (if
572 `ptcFitType` is `FULLCOVARIANCE`).
574 bDict : `dict` [`numpy.array`]
575 Dictionary keyed by amp names containing the fitted 'b'
576 coefficients from the model in Eq. 20 of Astier+19 (if
577 `ptcFitType` is `FULLCOVARIANCE`).
579 pdfPages : `matplotlib.backends.backend_pdf.PdfPages`
580 PDF file where the plots will be saved.
582 bRange : `int`
583 Maximum lag for b arrays.
584 """
585 a, b = [], []
586 for amp in aDict:
587 if np.isnan(aDict[amp]).all():
588 continue
589 a.append(aDict[amp])
590 b.append(bDict[amp])
591 a = np.array(a).mean(axis=0)
592 b = np.array(b).mean(axis=0)
593 fig = plt.figure(figsize=(7, 11))
594 ax0 = fig.add_subplot(2, 1, 1)
595 im0 = ax0.imshow(np.abs(a.transpose()), origin='lower', norm=mpl.colors.LogNorm())
596 ax0.tick_params(axis='both', labelsize='x-large')
597 ax0.set_title(r'$|a|$', fontsize='x-large')
598 ax0.xaxis.set_ticks_position('bottom')
599 cb0 = plt.colorbar(im0)
600 cb0.ax.tick_params(labelsize='x-large')
602 ax1 = fig.add_subplot(2, 1, 2)
603 ax1.tick_params(axis='both', labelsize='x-large')
604 ax1.yaxis.set_major_locator(MaxNLocator(integer=True))
605 ax1.xaxis.set_major_locator(MaxNLocator(integer=True))
606 im1 = ax1.imshow(1e6*b[:bRange, :bRange].transpose(), origin='lower')
607 cb1 = plt.colorbar(im1)
608 cb1.ax.tick_params(labelsize='x-large')
609 ax1.set_title(r'$b \times 10^6$', fontsize='x-large')
610 ax1.xaxis.set_ticks_position('bottom')
611 plt.tight_layout()
612 pdfPages.savefig(fig)
614 return
616 @staticmethod
617 def ab_vs_dist(aDict, bDict, pdfPages, bRange=4):
618 """Fig. 13 of Astier+19.
620 Values of a and b arrays fits, averaged over amplifiers, as a
621 function of distance.
623 Parameters
624 ----------
625 aDict : `dict` [`numpy.array`]
626 Dictionary keyed by amp names containing the fitted 'a'
627 coefficients from the model in Eq. 20 of Astier+19 (if
628 `ptcFitType` is `FULLCOVARIANCE`).
630 bDict : `dict` [`numpy.array`]
631 Dictionary keyed by amp names containing the fitted 'b'
632 coefficients from the model in Eq. 20 of Astier+19 (if
633 `ptcFitType` is `FULLCOVARIANCE`).
635 pdfPages : `matplotlib.backends.backend_pdf.PdfPages`
636 PDF file where the plots will be saved.
638 bRange : `int`
639 Maximum lag for b arrays.
640 """
641 assert (len(aDict) == len(bDict))
642 a = []
643 for amp in aDict:
644 if np.isnan(aDict[amp]).all():
645 continue
646 a.append(aDict[amp])
647 a = np.array(a)
648 y = a.mean(axis=0)
649 sy = a.std(axis=0)/np.sqrt(len(aDict))
650 i, j = np.indices(y.shape)
651 upper = (i >= j).ravel()
652 r = np.sqrt(i**2 + j**2).ravel()
653 y = y.ravel()
654 sy = sy.ravel()
655 fig = plt.figure(figsize=(6, 9))
656 ax = fig.add_subplot(211)
657 ax.set_xlim([0.5, r.max()+1])
658 ax.errorbar(r[upper], y[upper], yerr=sy[upper], marker='o', linestyle='none', color='b',
659 label='$i>=j$')
660 ax.errorbar(r[~upper], y[~upper], yerr=sy[~upper], marker='o', linestyle='none', color='r',
661 label='$i<j$')
662 ax.legend(loc='upper center', fontsize='x-large')
663 ax.set_xlabel(r'$\sqrt{i^2+j^2}$', fontsize='x-large')
664 ax.set_ylabel(r'$a_{ij}$', fontsize='x-large')
665 ax.set_yscale('log')
666 ax.tick_params(axis='both', labelsize='x-large')
668 #
669 axb = fig.add_subplot(212)
670 b = []
671 for amp in bDict:
672 if np.isnan(bDict[amp]).all():
673 continue
674 b.append(bDict[amp])
675 b = np.array(b)
676 yb = b.mean(axis=0)
677 syb = b.std(axis=0)/np.sqrt(len(bDict))
678 ib, jb = np.indices(yb.shape)
679 upper = (ib > jb).ravel()
680 rb = np.sqrt(i**2 + j**2).ravel()
681 yb = yb.ravel()
682 syb = syb.ravel()
683 xmin = -0.2
684 xmax = bRange
685 axb.set_xlim([xmin, xmax+0.2])
686 cutu = (r > xmin) & (r < xmax) & (upper)
687 cutl = (r > xmin) & (r < xmax) & (~upper)
688 axb.errorbar(rb[cutu], yb[cutu], yerr=syb[cutu], marker='o', linestyle='none', color='b',
689 label='$i>=j$')
690 axb.errorbar(rb[cutl], yb[cutl], yerr=syb[cutl], marker='o', linestyle='none', color='r',
691 label='$i<j$')
692 plt.legend(loc='upper center', fontsize='x-large')
693 axb.set_xlabel(r'$\sqrt{i^2+j^2}$', fontsize='x-large')
694 axb.set_ylabel(r'$b_{ij}$', fontsize='x-large')
695 axb.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
696 axb.tick_params(axis='both', labelsize='x-large')
697 plt.tight_layout()
698 pdfPages.savefig(fig)
700 return
702 @staticmethod
703 def plotAcoeffsSum(aDict, bDict, pdfPages):
704 """Fig. 14. of Astier+19
706 Cumulative sum of a_ij as a function of maximum
707 separation. This plot displays the average over channels.
709 Parameters
710 ----------
711 aDict : `dict` [`numpy.array`]
712 Dictionary keyed by amp names containing the fitted 'a'
713 coefficients from the model in Eq. 20 of Astier+19 (if
714 `ptcFitType` is `FULLCOVARIANCE`).
716 bDict : `dict` [`numpy.array`]
717 Dictionary keyed by amp names containing the fitted 'b'
718 coefficients from the model in Eq. 20 of Astier+19 (if
719 `ptcFitType` is `FULLCOVARIANCE`).
721 pdfPages : `matplotlib.backends.backend_pdf.PdfPages`
722 PDF file where the plots will be saved.
723 """
724 assert (len(aDict) == len(bDict))
725 a, b = [], []
726 for amp in aDict:
727 if np.isnan(aDict[amp]).all() or np.isnan(bDict[amp]).all():
728 continue
729 a.append(aDict[amp])
730 b.append(bDict[amp])
731 a = np.array(a).mean(axis=0)
732 b = np.array(b).mean(axis=0)
733 fig = plt.figure(figsize=(7, 6))
734 w = 4*np.ones_like(a)
735 w[0, 1:] = 2
736 w[1:, 0] = 2
737 w[0, 0] = 1
738 wa = w*a
739 indices = range(1, a.shape[0]+1)
740 sums = [wa[0:n, 0:n].sum() for n in indices]
741 ax = plt.subplot(111)
742 ax.plot(indices, sums/sums[0], 'o', color='b')
743 ax.set_yscale('log')
744 ax.set_xlim(indices[0]-0.5, indices[-1]+0.5)
745 ax.set_ylim(None, 1.2)
746 ax.set_ylabel(r'$[\sum_{|i|<n\ &\ |j|<n} a_{ij}] / |a_{00}|$', fontsize='x-large')
747 ax.set_xlabel('n', fontsize='x-large')
748 ax.tick_params(axis='both', labelsize='x-large')
749 plt.tight_layout()
750 pdfPages.savefig(fig)
752 return
754 @staticmethod
755 def plotRelativeBiasACoeffs(aDict, aDictNoB, fullCovsModel, fullCovsModelNoB, signalElectrons,
756 gainDict, pdfPages, maxr=None):
757 """Fig. 15 in Astier+19.
759 Illustrates systematic bias from estimating 'a'
760 coefficients from the slope of correlations as opposed to the
761 full model in Astier+19.
763 Parameters
764 ----------
765 aDict : `dict`
766 Dictionary of 'a' matrices (Eq. 20, Astier+19), with amp
767 names as keys.
769 aDictNoB : `dict`
770 Dictionary of 'a' matrices ('b'= 0 in Eq. 20, Astier+19),
771 with amp names as keys.
773 fullCovsModel : `dict` [`str`, `list`]
774 Dictionary keyed by amp names containing covariances model
775 per mean flux.
777 fullCovsModelNoB : `dict` [`str`, `list`]
778 Dictionary keyed by amp names containing covariances model
779 (with 'b'=0 in Eq. 20 of Astier+19) per mean flux.
781 signalElectrons : `float`
782 Signal at which to evaluate the a_ij coefficients.
784 pdfPages : `matplotlib.backends.backend_pdf.PdfPages`
785 PDF file where the plots will be saved.
787 gainDict : `dict` [`str`, `float`]
788 Dicgionary keyed by amp names with the gains in e-/ADU.
790 maxr : `int`, optional
791 Maximum lag.
792 """
793 fig = plt.figure(figsize=(7, 11))
794 title = [f"'a' relative bias at {signalElectrons} e", "'a' relative bias (b=0)"]
795 data = [(aDict, fullCovsModel), (aDictNoB, fullCovsModelNoB)]
797 for k, pair in enumerate(data):
798 diffs = []
799 amean = []
800 for amp in pair[0]:
801 covModel = np.array(pair[1][amp])
802 if np.isnan(covModel).all():
803 continue
804 # Compute the "a" coefficients of the Antilogus+14
805 # (1402.0725) model as in Guyonnet+15 (1501.01577,
806 # eq. 16, the slope of cov/var at a given flux mu in
807 # electrons). Eq. 16 of 1501.01577 is an approximation
808 # to the more complete model in Astier+19
809 # (1905.08677).
810 var = covModel[0, 0, 0] # ADU^2
811 # For a result in electrons^-1, we have to use mu in electrons
812 aOld = covModel[0, :, :]/(var*signalElectrons)
813 a = pair[0][amp]
814 amean.append(a)
815 diffs.append((aOld-a))
816 amean = np.array(amean).mean(axis=0)
817 diff = np.array(diffs).mean(axis=0)
818 diff = diff/amean
819 diff = diff[:]
820 # The difference should be close to zero
821 diff[0, 0] = 0
822 if maxr is None:
823 maxr = diff.shape[0]
824 diff = diff[:maxr, :maxr]
825 ax0 = fig.add_subplot(2, 1, k+1)
826 im0 = ax0.imshow(diff.transpose(), origin='lower')
827 ax0.yaxis.set_major_locator(MaxNLocator(integer=True))
828 ax0.xaxis.set_major_locator(MaxNLocator(integer=True))
829 ax0.tick_params(axis='both', labelsize='x-large')
830 plt.colorbar(im0)
831 ax0.set_title(title[k])
833 plt.tight_layout()
834 pdfPages.savefig(fig)
836 return
838 def _plotStandardPtc(self, dataset, ptcFitType, pdfPages):
839 """Plot PTC, var/signal vs signal, linearity, and linearity residual
840 per amplifier.
842 Parameters
843 ----------
844 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
845 The dataset containing the means, variances, exposure
846 times, and mask.
848 ptcFitType : `str`
849 Type of the model fit to the PTC. Options:
850 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.
852 pdfPages : `matplotlib.backends.backend_pdf.PdfPages`
853 PDF file where the plots will be saved.
854 """
855 if ptcFitType == 'EXPAPPROXIMATION':
856 ptcFunc = funcAstier
857 stringTitle = (r"Var = $\frac{1}{2g^2a_{00}}(\exp (2a_{00} \mu g) - 1) + \frac{n_{00}}{g^2}$ ")
858 elif ptcFitType == 'POLYNOMIAL':
859 ptcFunc = funcPolynomial
860 for key in dataset.ptcFitPars:
861 deg = len(dataset.ptcFitPars[key]) - 1
862 break
863 stringTitle = r"Polynomial (degree: %g)" % (deg)
864 else:
865 raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n"
866 "Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.")
868 legendFontSize = 6.5
869 labelFontSize = 8
870 titleFontSize = 9
871 supTitleFontSize = 18
872 markerSize = 25
874 # General determination of the size of the plot grid
875 nAmps = len(dataset.ampNames)
876 if nAmps == 2:
877 nRows, nCols = 2, 1
878 nRows = np.sqrt(nAmps)
879 mantissa, _ = np.modf(nRows)
880 if mantissa > 0:
881 nRows = int(nRows) + 1
882 nCols = nRows
883 else:
884 nRows = int(nRows)
885 nCols = nRows
887 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
888 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
889 f3, ax3 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
891 for i, (amp, a, a2, a3) in enumerate(zip(dataset.ampNames, ax.flatten(), ax2.flatten(),
892 ax3.flatten())):
893 meanVecOriginal = np.ravel(np.array(dataset.rawMeans[amp]))
894 varVecOriginal = np.ravel(np.array(dataset.rawVars[amp]))
895 mask = np.ravel(np.array(dataset.expIdMask[amp]))
896 if np.isnan(mask[0]): # All NaNs the whole amp is bad
897 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
898 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
899 a3.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
900 continue
901 else:
902 mask = mask.astype(bool)
903 meanVecFinal = meanVecOriginal[mask]
904 varVecFinal = varVecOriginal[mask]
905 meanVecOutliers = meanVecOriginal[np.invert(mask)]
906 varVecOutliers = varVecOriginal[np.invert(mask)]
907 pars, parsErr = np.array(dataset.ptcFitPars[amp]), np.array(dataset.ptcFitParsError[amp])
908 ptcRedChi2 = dataset.ptcFitChiSq[amp]
909 if ptcFitType == 'EXPAPPROXIMATION':
910 if len(meanVecFinal):
911 ptcA00, ptcA00error = pars[0], parsErr[0]
912 ptcGain, ptcGainError = pars[1], parsErr[1]
913 ptcNoise = np.sqrt((pars[2])) # pars[2] is in (e-)^2
914 ptcNoiseAdu = ptcNoise*(1./ptcGain)
915 ptcNoiseError = 0.5*(parsErr[2]/np.fabs(pars[2]))*np.sqrt(np.fabs(pars[2]))
916 stringLegend = (f"a00: {ptcA00:.2e}+/-{ptcA00error:.2e} 1/e"
917 f"\nGain: {ptcGain:.4}+/-{ptcGainError:.2e} e/ADU"
918 f"\nNoise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e\n"
919 r"$\chi^2_{\rm{red}}$: " + f"{ptcRedChi2:.4}"
920 f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ")
922 if ptcFitType == 'POLYNOMIAL':
923 if len(meanVecFinal):
924 ptcGain, ptcGainError = 1./pars[1], np.fabs(1./pars[1])*(parsErr[1]/pars[1])
925 ptcNoiseAdu = np.sqrt((pars[0])) # pars[0] is in ADU^2
926 ptcNoise = ptcNoiseAdu*ptcGain
927 ptcNoiseError = (0.5*(parsErr[0]/np.fabs(pars[0]))*(np.sqrt(np.fabs(pars[0]))))*ptcGain
928 stringLegend = (f"Gain: {ptcGain:.4}+/-{ptcGainError:.2e} e/ADU\n"
929 f"Noise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e\n"
930 r"$\chi^2_{\rm{red}}$: " + f"{ptcRedChi2:.4}"
931 f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ")
933 a.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
934 a.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize)
935 a.tick_params(labelsize=11)
936 a.set_xscale('linear')
937 a.set_yscale('linear')
939 a2.set_xlabel(r'Mean Signal ($\mu$, ADU)', fontsize=labelFontSize)
940 a2.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize)
941 a2.tick_params(labelsize=11)
942 a2.set_xscale('log')
943 a2.set_yscale('log')
945 a3.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
946 a3.set_ylabel(r'Variance/$\mu$ (ADU)', fontsize=labelFontSize)
947 a3.tick_params(labelsize=11)
948 a3.set_xscale('log')
949 a3.set_yscale('linear')
951 minMeanVecFinal = np.nanmin(meanVecFinal)
952 maxMeanVecFinal = np.nanmax(meanVecFinal)
953 meanVecFit = np.linspace(minMeanVecFinal, maxMeanVecFinal, 100*len(meanVecFinal))
954 minMeanVecOriginal = np.nanmin(meanVecOriginal)
955 maxMeanVecOriginal = np.nanmax(meanVecOriginal)
956 deltaXlim = maxMeanVecOriginal - minMeanVecOriginal
957 a.plot(meanVecFit, ptcFunc(pars, meanVecFit), color='red')
958 a.plot(meanVecFinal, ptcNoiseAdu**2 + (1./ptcGain)*meanVecFinal, color='green',
959 linestyle='--')
960 a.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize)
961 a.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize)
962 a.text(0.03, 0.66, stringLegend, transform=a.transAxes, fontsize=legendFontSize)
963 a.set_title(amp, fontsize=titleFontSize)
964 a.set_xlim([minMeanVecOriginal - 0.2*deltaXlim, maxMeanVecOriginal + 0.2*deltaXlim])
966 # Same, but in log-scale
967 a2.plot(meanVecFit, ptcFunc(pars, meanVecFit), color='red')
968 a2.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize)
969 a2.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize)
970 a2.text(0.03, 0.66, stringLegend, transform=a2.transAxes, fontsize=legendFontSize)
971 a2.set_title(amp, fontsize=titleFontSize)
972 a2.set_xlim([minMeanVecOriginal, maxMeanVecOriginal])
974 # Var/mu vs mu
975 a3.plot(meanVecFit, ptcFunc(pars, meanVecFit)/meanVecFit, color='red')
976 a3.scatter(meanVecFinal, varVecFinal/meanVecFinal, c='blue', marker='o', s=markerSize)
977 a3.scatter(meanVecOutliers, varVecOutliers/meanVecOutliers, c='magenta', marker='s',
978 s=markerSize)
979 a3.text(0.05, 0.1, stringLegend, transform=a3.transAxes, fontsize=legendFontSize)
980 a3.set_title(amp, fontsize=titleFontSize)
981 a3.set_xlim([minMeanVecOriginal - 0.2*deltaXlim, maxMeanVecOriginal + 0.2*deltaXlim])
983 f.suptitle("PTC \n Fit: " + stringTitle, fontsize=supTitleFontSize)
984 pdfPages.savefig(f)
985 f2.suptitle("PTC (log-log)", fontsize=supTitleFontSize)
986 pdfPages.savefig(f2)
987 f3.suptitle(r"Var/$\mu$", fontsize=supTitleFontSize)
988 pdfPages.savefig(f3)
990 return
992 def _plotLinearizer(self, dataset, linearizer, pdfPages):
993 """Plot linearity and linearity residual per amplifier
995 Parameters
996 ----------
997 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
998 The dataset containing the means, variances, exposure
999 times, and mask.
1001 linearizer : `lsst.ip.isr.Linearizer`
1002 Linearizer object
1003 """
1004 legendFontSize = 7
1005 labelFontSize = 7
1006 titleFontSize = 9
1007 supTitleFontSize = 18
1009 # General determination of the size of the plot grid
1010 nAmps = len(dataset.ampNames)
1011 if nAmps == 2:
1012 nRows, nCols = 2, 1
1013 nRows = np.sqrt(nAmps)
1014 mantissa, _ = np.modf(nRows)
1015 if mantissa > 0:
1016 nRows = int(nRows) + 1
1017 nCols = nRows
1018 else:
1019 nRows = int(nRows)
1020 nCols = nRows
1022 # Plot mean vs time (f1), and fractional residuals (f2)
1023 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
1024 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
1025 for i, (amp, a, a2) in enumerate(zip(dataset.ampNames, ax.flatten(), ax2.flatten())):
1026 mask = dataset.expIdMask[amp]
1027 if np.isnan(mask[0]):
1028 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
1029 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
1030 continue
1031 else:
1032 mask = mask.astype(bool)
1033 meanVecFinal = np.array(dataset.rawMeans[amp])[mask]
1034 timeVecFinal = np.array(dataset.rawExpTimes[amp])[mask]
1036 a.set_xlabel('Time (sec)', fontsize=labelFontSize)
1037 a.set_ylabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
1038 a.tick_params(labelsize=labelFontSize)
1039 a.set_xscale('linear')
1040 a.set_yscale('linear')
1042 a2.axhline(y=0, color='k')
1043 a2.axvline(x=0, color='k', linestyle='-')
1044 a2.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
1045 a2.set_ylabel('Fractional nonlinearity (%)', fontsize=labelFontSize)
1046 a2.tick_params(labelsize=labelFontSize)
1047 a2.set_xscale('linear')
1048 a2.set_yscale('linear')
1050 pars, parsErr = linearizer.fitParams[amp], linearizer.fitParamsErr[amp]
1051 k0, k0Error = pars[0], parsErr[0]
1052 k1, k1Error = pars[1], parsErr[1]
1053 k2, k2Error = pars[2], parsErr[2]
1054 linRedChi2 = linearizer.fitChiSq[amp]
1055 stringLegend = (f"k0: {k0:.4}+/-{k0Error:.2e} ADU\nk1: {k1:.4}+/-{k1Error:.2e} ADU/t"
1056 f"\nk2: {k2:.2e}+/-{k2Error:.2e} ADU/t^2\n"
1057 r"$\chi^2_{\rm{red}}$: " + f"{linRedChi2:.4}")
1058 a.scatter(timeVecFinal, meanVecFinal)
1059 a.plot(timeVecFinal, funcPolynomial(pars, timeVecFinal), color='red')
1060 a.text(0.03, 0.75, stringLegend, transform=a.transAxes, fontsize=legendFontSize)
1061 a.set_title(f"{amp}", fontsize=titleFontSize)
1063 linearPart = k0 + k1*timeVecFinal
1064 fracLinRes = 100*(linearPart - meanVecFinal)/linearPart
1065 a2.plot(meanVecFinal, fracLinRes, c='g')
1066 a2.set_title(f"{amp}", fontsize=titleFontSize)
1068 f.suptitle("Linearity \n Fit: Polynomial (degree: %g)"
1069 % (len(pars)-1),
1070 fontsize=supTitleFontSize)
1071 f2.suptitle(r"Fractional NL residual" "\n"
1072 r"$100\times \frac{(k_0 + k_1*Time-\mu)}{k_0+k_1*Time}$",
1073 fontsize=supTitleFontSize)
1074 pdfPages.savefig(f)
1075 pdfPages.savefig(f2)
1077 @staticmethod
1078 def findGroups(x, maxDiff):
1079 """Group data into bins, with at most maxDiff distance between bins.
1081 Parameters
1082 ----------
1083 x : `list`
1084 Data to bin.
1086 maxDiff : `int`
1087 Maximum distance between bins.
1089 Returns
1090 -------
1091 index : `list`
1092 Bin indices.
1093 """
1094 ix = np.argsort(x)
1095 xsort = np.sort(x)
1096 index = np.zeros_like(x, dtype=np.int32)
1097 xc = xsort[0]
1098 group = 0
1099 ng = 1
1101 for i in range(1, len(ix)):
1102 xval = xsort[i]
1103 if (xval - xc < maxDiff):
1104 xc = (ng*xc + xval)/(ng+1)
1105 ng += 1
1106 index[ix[i]] = group
1107 else:
1108 group += 1
1109 ng = 1
1110 index[ix[i]] = group
1111 xc = xval
1113 return index
1115 @staticmethod
1116 def indexForBins(x, nBins):
1117 """Builds an index with regular binning. The result can be fed into
1118 binData.
1120 Parameters
1121 ----------
1122 x : `numpy.array`
1123 Data to bin.
1124 nBins : `int`
1125 Number of bin.
1127 Returns
1128 -------
1129 np.digitize(x, bins): `numpy.array`
1130 Bin indices.
1131 """
1132 bins = np.linspace(x.min(), x.max() + abs(x.max() * 1e-7), nBins + 1)
1133 return np.digitize(x, bins)
1135 @staticmethod
1136 def binData(x, y, binIndex, wy=None):
1137 """Bin data (usually for display purposes).
1139 Parameters
1140 ----------
1141 x : `numpy.array`
1142 Data to bin.
1144 y : `numpy.array`
1145 Data to bin.
1147 binIdex : `list`
1148 Bin number of each datum.
1150 wy : `numpy.array`
1151 Inverse rms of each datum to use when averaging (the
1152 actual weight is wy**2).
1154 Returns
1155 -------
1156 xbin : `numpy.array`
1157 Binned data in x.
1159 ybin : `numpy.array`
1160 Binned data in y.
1162 wybin : `numpy.array`
1163 Binned weights in y, computed from wy's in each bin.
1165 sybin : `numpy.array`
1166 Uncertainty on the bin average, considering actual
1167 scatter, and ignoring weights.
1168 """
1169 if wy is None:
1170 wy = np.ones_like(x)
1171 binIndexSet = set(binIndex)
1172 w2 = wy*wy
1173 xw2 = x*(w2)
1174 xbin = np.array([xw2[binIndex == i].sum()/w2[binIndex == i].sum() for i in binIndexSet])
1176 yw2 = y*w2
1177 ybin = np.array([yw2[binIndex == i].sum()/w2[binIndex == i].sum() for i in binIndexSet])
1179 wybin = np.sqrt(np.array([w2[binIndex == i].sum() for i in binIndexSet]))
1180 sybin = np.array([y[binIndex == i].std()/np.sqrt(np.array([binIndex == i]).sum())
1181 for i in binIndexSet])
1183 return xbin, ybin, wybin, sybin