Coverage for python/lsst/cp/pipe/ptc/cpPlotPtcTask.py: 8%
521 statements
« prev ^ index » next coverage.py v6.5.0, created at 2023-02-02 07:33 -0800
« prev ^ index » next coverage.py v6.5.0, created at 2023-02-02 07:33 -0800
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
23__all__ = ["PlotPhotonTransferCurveConfig", "PlotPhotonTransferCurveTask"]
25import numpy as np
26import matplotlib.pyplot as plt
27import matplotlib as mpl
28from matplotlib import gridspec
30import lsst.pex.config as pexConfig
31import lsst.pipe.base as pipeBase
32import lsst.pipe.base.connectionTypes as cT
33from lsst.cp.pipe._lookupStaticCalibration import lookupStaticCalibration
35from lsst.cp.pipe.utils import (
36 funcAstier,
37 funcPolynomial,
38 calculateWeightedReducedChi2,
39 getFitDataFromCovariances,
40)
41from matplotlib.ticker import MaxNLocator
44class PlotPhotonTransferCurveConnections(
45 pipeBase.PipelineTaskConnections, dimensions=("instrument", "exposure", "detector")
46):
48 dummyExposure = cT.Input(
49 name="raw",
50 doc="Dummy exposure to retreive PTC dataset.",
51 storageClass="Exposure",
52 dimensions=("instrument", "detector", "exposure"),
53 multiple=True,
54 deferLoad=True,
55 )
56 inputPtcDataset = cT.Input(
57 name="calib",
58 doc="Input PTC dataset.",
59 storageClass="PhotonTransferCurveDataset",
60 dimensions=("instrument", "detector"),
61 isCalibration=True,
62 )
63 camera = cT.PrerequisiteInput(
64 name="camera",
65 doc="Camera associated with this data.",
66 storageClass="Camera",
67 dimensions=("instrument",),
68 isCalibration=True,
69 lookupFunction=lookupStaticCalibration,
70 )
71 # ptcFitType = "FULLCOVARIANCE" produces 12 plots
72 ptcPlot1 = cT.Output(
73 name="ptcVarMean",
74 doc="Variance vs mean.",
75 storageClass="Plot",
76 dimensions=("instrument", "detector"),
77 )
78 ptcPlot2 = cT.Output(
79 name="ptcVarMeanLog",
80 doc="Variance vs Mean, log scale.",
81 storageClass="Plot",
82 dimensions=("instrument", "detector"),
83 )
84 ptcPlot3 = cT.Output(
85 name="ptcVarResiduals",
86 doc="Variance residuals compared to model.",
87 storageClass="Plot",
88 dimensions=("instrument", "detector"),
89 )
90 ptcPlot4 = cT.Output(
91 name="ptcCov01Mean",
92 doc="Cov01 vs mean.",
93 storageClass="Plot",
94 dimensions=("instrument", "detector"),
95 )
96 ptcPlot5 = cT.Output(
97 name="ptcCov10Mean",
98 doc="Cov10 vs mean.",
99 storageClass="Plot",
100 dimensions=("instrument", "detector"),
101 )
102 ptcPlot6 = cT.Output(
103 name="ptcNormalizedVar",
104 doc="Variance over mean vs mean.",
105 storageClass="Plot",
106 dimensions=("instrument", "detector"),
107 )
108 ptcPlot7 = cT.Output(
109 name="ptcNormalizedCov01",
110 doc="Cov01 over mean vs mean.",
111 storageClass="Plot",
112 dimensions=("instrument", "detector"),
113 )
114 ptcPlot8 = cT.Output(
115 name="ptcNormalizedCov10",
116 doc="Cov10 over mean vs mean.",
117 storageClass="Plot",
118 dimensions=("instrument", "detector"),
119 )
120 ptcPlot9 = cT.Output(
121 name="ptcAandBMatrices",
122 doc="Fig. 12 of Astier+19.",
123 storageClass="Plot",
124 dimensions=("instrument", "detector"),
125 )
126 ptcPlot10 = cT.Output(
127 name="ptcAandBDistance",
128 doc="Fig. 13 of Astier+19.",
129 storageClass="Plot",
130 dimensions=("instrument", "detector"),
131 )
132 ptcPlot11 = cT.Output(
133 name="ptcACumulativeSum",
134 doc="Fig. 14 of Astier+19.",
135 storageClass="Plot",
136 dimensions=("instrument", "detector"),
137 )
138 ptcPlot12 = cT.Output(
139 name="ptcARelativeBias",
140 doc="Fig. 15 of Astier+19.",
141 storageClass="Plot",
142 dimensions=("instrument", "detector"),
143 )
146class PlotPhotonTransferCurveConfig(
147 pipeBase.PipelineTaskConfig, pipelineConnections=PlotPhotonTransferCurveConnections
148):
149 """Configuration for the measurement of covariances from flats."""
151 signalElectronsRelativeA = pexConfig.Field(
152 dtype=float,
153 doc="Signal value (in e-) for relative systematic bias between different "
154 "methods of estimating a_ij (Fig. 15 of Astier+19).",
155 default=75000.0,
156 )
157 plotNormalizedCovariancesNumberOfBins = pexConfig.Field(
158 dtype=int,
159 doc="Number of bins in `plotNormalizedCovariancesNumber` function"
160 "(Fig. 8, 10., of Astier+19).",
161 default=10,
162 )
165class PlotPhotonTransferCurveTask(pipeBase.PipelineTask):
166 """A class to plot the dataset from MeasurePhotonTransferCurveTask.
168 Parameters
169 ----------
170 outDir : `str`, optional
171 Path to the output directory where the final PDF will
172 be placed.
174 signalElectronsRelativeA : `float`, optional
175 Signal value for relative systematic bias between different
176 methods of estimating a_ij (Fig. 15 of Astier+19).
178 plotNormalizedCovariancesNumberOfBins : `float`, optional
179 Number of bins in `plotNormalizedCovariancesNumber` function
180 (Fig. 8, 10., of Astier+19).
181 """
183 ConfigClass = PlotPhotonTransferCurveConfig
184 _DefaultName = "cpPlotPtc"
186 def runQuantum(self, butlerQC, inputRefs, outputRefs):
187 inputs = butlerQC.get(inputRefs)
188 outputs = self.run(**inputs)
189 butlerQC.put(outputs, outputRefs)
191 def run(self, inputPtcDataset, dummyExposure=None, camera=None):
192 """Make the plots for the PTC task.
194 Parameters
195 ----------
196 inputPtcDataset : `lsst.ip.isr.PhotonTransferCurveDataset`
197 Output dataset from Photon Transfer Curve task.
198 dummyExposure : `lsst.afw.image.Exposure`
199 The exposure used to select the appropriate PTC dataset.
200 camera : `lsst.afw.cameraGeom.Camera`
201 Camera to use for camera geometry information.
202 """
204 if len(dummyExposure) == 0:
205 self.log.warning("No dummy exposure found.")
207 ptcFitType = inputPtcDataset.ptcFitType
208 self.detId = inputPtcDataset.getMetadata()["DETECTOR"]
210 if ptcFitType in [
211 "FULLCOVARIANCE",
212 ]:
213 figDict = self.covAstierMakeAllPlots(inputPtcDataset)
214 elif ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]:
215 figDict = self._plotStandardPtc(inputPtcDataset)
216 else:
217 raise RuntimeError(
218 f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n"
219 "Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'."
220 )
222 maxNumberPlots = 12
223 if len(figDict) < maxNumberPlots:
224 for i in range(len(figDict), maxNumberPlots + 1):
225 figDict.setdefault(i, plt.figure())
227 return pipeBase.Struct(
228 ptcPlot1=figDict[0],
229 ptcPlot2=figDict[1],
230 ptcPlot3=figDict[2],
231 ptcPlot4=figDict[3],
232 ptcPlot5=figDict[4],
233 ptcPlot6=figDict[5],
234 ptcPlot7=figDict[6],
235 ptcPlot8=figDict[7],
236 ptcPlot9=figDict[8],
237 ptcPlot10=figDict[9],
238 ptcPlot11=figDict[10],
239 ptcPlot12=figDict[11],
240 )
242 def covAstierMakeAllPlots(self, dataset):
243 """Make plots for MeasurePhotonTransferCurve task when
244 doCovariancesAstier=True.
246 This function call other functions that mostly reproduce the
247 plots in Astier+19. Most of the code is ported from Pierre
248 Astier's repository https://github.com/PierreAstier/bfptc
250 Parameters
251 ----------
252 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
253 The dataset containing the necessary information to
254 produce the plots.
255 """
256 mu = dataset.finalMeans
257 # dictionaries with ampNames as keys
258 fullCovs = dataset.covariances
259 fullCovsModel = dataset.covariancesModel
260 fullCovWeights = dataset.covariancesSqrtWeights
261 aDict = dataset.aMatrix
262 bDict = dataset.bMatrix
263 fullCovWeights = dataset.covariancesSqrtWeights
264 aDict = dataset.aMatrix
265 bDict = dataset.bMatrix
266 fullCovWeights = dataset.covariancesSqrtWeights
267 aDict = dataset.aMatrix
268 bDict = dataset.bMatrix
269 fullCovWeights = dataset.covariancesSqrtWeights
270 aDict = dataset.aMatrix
271 bDict = dataset.bMatrix
272 fullCovsNoB = dataset.covariances
273 fullCovsModelNoB = dataset.covariancesModelNoB
274 fullCovWeightsNoB = dataset.covariancesSqrtWeights
275 aDictNoB = dataset.aMatrixNoB
276 gainDict = dataset.gain
277 noiseDict = dataset.noise
279 figList1 = self.plotCovariances(
280 mu,
281 fullCovs,
282 fullCovsModel,
283 fullCovWeights,
284 fullCovsNoB,
285 fullCovsModelNoB,
286 fullCovWeightsNoB,
287 gainDict,
288 noiseDict,
289 aDict,
290 bDict,
291 )
292 figList2 = self.plotNormalizedCovariances(
293 0,
294 0,
295 mu,
296 fullCovs,
297 fullCovsModel,
298 fullCovWeights,
299 fullCovsNoB,
300 fullCovsModelNoB,
301 fullCovWeightsNoB,
302 offset=0.01,
303 topPlot=True,
304 numberOfBins=self.config.plotNormalizedCovariancesNumberOfBins,
305 )
306 figList3 = self.plotNormalizedCovariances(
307 0,
308 1,
309 mu,
310 fullCovs,
311 fullCovsModel,
312 fullCovWeights,
313 fullCovsNoB,
314 fullCovsModelNoB,
315 fullCovWeightsNoB,
316 numberOfBins=self.config.plotNormalizedCovariancesNumberOfBins,
317 )
318 figList4 = self.plotNormalizedCovariances(
319 1,
320 0,
321 mu,
322 fullCovs,
323 fullCovsModel,
324 fullCovWeights,
325 fullCovsNoB,
326 fullCovsModelNoB,
327 fullCovWeightsNoB,
328 numberOfBins=self.config.plotNormalizedCovariancesNumberOfBins,
329 )
330 figList5 = self.plot_a_b(aDict, bDict)
331 figList6 = self.ab_vs_dist(aDict, bDict, bRange=4)
332 figList7 = self.plotAcoeffsSum(aDict, bDict)
333 figList8 = self.plotRelativeBiasACoeffs(
334 aDict,
335 aDictNoB,
336 fullCovsModel,
337 fullCovsModelNoB,
338 self.config.signalElectronsRelativeA,
339 gainDict,
340 maxr=4,
341 )
343 figList = (
344 figList1
345 + figList2
346 + figList3
347 + figList4
348 + figList5
349 + figList6
350 + figList7
351 + figList8
352 )
354 figDict = {}
355 for i, fig in enumerate(figList):
356 figDict[i] = fig
358 return figDict
360 @staticmethod
361 def plotCovariances(
362 mu,
363 covs,
364 covsModel,
365 covsWeights,
366 covsNoB,
367 covsModelNoB,
368 covsWeightsNoB,
369 gainDict,
370 noiseDict,
371 aDict,
372 bDict,
373 ):
374 """Plot covariances and models: Cov00, Cov10, Cov01.
376 Figs. 6 and 7 of Astier+19
378 Parameters
379 ----------
380 mu : `dict` [`str`, `list`]
381 Dictionary keyed by amp name with mean signal values.
383 covs : `dict` [`str`, `list`]
384 Dictionary keyed by amp names containing a list of measued
385 covariances per mean flux.
387 covsModel : `dict` [`str`, `list`]
388 Dictionary keyed by amp names containinging covariances
389 model (Eq. 20 of Astier+19) per mean flux.
391 covsWeights : `dict` [`str`, `list`]
392 Dictionary keyed by amp names containinging sqrt. of
393 covariances weights.
395 covsNoB : `dict` [`str`, `list`]
396 Dictionary keyed by amp names containing a list of measued
397 covariances per mean flux ('b'=0 in Astier+19).
399 covsModelNoB : `dict` [`str`, `list`]
400 Dictionary keyed by amp names containing covariances model
401 (with 'b'=0 in Eq. 20 of Astier+19) per mean flux.
403 covsWeightsNoB : `dict` [`str`, `list`]
404 Dictionary keyed by amp names containing sqrt. of
405 covariances weights ('b' = 0 in Eq. 20 of Astier+19).
407 gainDict : `dict` [`str`, `float`]
408 Dictionary keyed by amp names containing the gains in e-/ADU.
410 noiseDict : `dict` [`str`, `float`]
411 Dictionary keyed by amp names containing the rms redout
412 noise in e-.
414 aDict : `dict` [`str`, `numpy.array`]
415 Dictionary keyed by amp names containing 'a' coefficients
416 (Eq. 20 of Astier+19).
418 bDict : `dict` [`str`, `numpy.array`]
419 Dictionary keyed by amp names containing 'b' coefficients
420 (Eq. 20 of Astier+19).
421 """
422 legendFontSize = 6.5
423 labelFontSize = 7
424 titleFontSize = 9
425 supTitleFontSize = 18
426 markerSize = 25
428 nAmps = len(covs)
429 if nAmps == 2:
430 nRows, nCols = 2, 1
431 nRows = np.sqrt(nAmps)
432 mantissa, _ = np.modf(nRows)
433 if mantissa > 0:
434 nRows = int(nRows) + 1
435 nCols = nRows
436 else:
437 nRows = int(nRows)
438 nCols = nRows
440 f, ax = plt.subplots(
441 nrows=nRows, ncols=nCols, sharex="col", sharey="row", figsize=(13, 10)
442 )
443 f2, ax2 = plt.subplots(
444 nrows=nRows, ncols=nCols, sharex="col", sharey="row", figsize=(13, 10)
445 )
446 fResCov00, axResCov00 = plt.subplots(
447 nrows=nRows, ncols=nCols, sharex="col", sharey="row", figsize=(13, 10)
448 )
449 fCov01, axCov01 = plt.subplots(
450 nrows=nRows, ncols=nCols, sharex="col", sharey="row", figsize=(13, 10)
451 )
452 fCov10, axCov10 = plt.subplots(
453 nrows=nRows, ncols=nCols, sharex="col", sharey="row", figsize=(13, 10)
454 )
456 assert len(covsModel) == nAmps
457 assert len(covsWeights) == nAmps
459 assert len(covsNoB) == nAmps
460 assert len(covsModelNoB) == nAmps
461 assert len(covsWeightsNoB) == nAmps
463 for i, (amp, a, a2, aResVar, a3, a4) in enumerate(
464 zip(
465 covs,
466 ax.flatten(),
467 ax2.flatten(),
468 axResCov00.flatten(),
469 axCov01.flatten(),
470 axCov10.flatten(),
471 )
472 ):
474 muAmp, cov, model, weight = (
475 mu[amp],
476 covs[amp],
477 covsModel[amp],
478 covsWeights[amp],
479 )
480 if not np.isnan(
481 np.array(cov)
482 ).all(): # If all the entries are np.nan, this is a bad amp.
483 aCoeffs, bCoeffs = np.array(aDict[amp]), np.array(bDict[amp])
484 gain, noise = gainDict[amp], noiseDict[amp]
485 (
486 meanVecFinal,
487 varVecFinal,
488 varVecModelFinal,
489 varWeightsFinal,
490 _,
491 ) = getFitDataFromCovariances(
492 0, 0, muAmp, cov, model, weight, returnMasked=True
493 )
495 # Get weighted reduced chi2
496 chi2FullModelVar = calculateWeightedReducedChi2(
497 varVecFinal, varVecModelFinal, varWeightsFinal, len(meanVecFinal), 4
498 )
500 (
501 meanVecFinalCov01,
502 varVecFinalCov01,
503 varVecModelFinalCov01,
504 _,
505 _,
506 ) = getFitDataFromCovariances(
507 0, 0, muAmp, cov, model, weight, returnMasked=True
508 )
510 (
511 meanVecFinalCov10,
512 varVecFinalCov10,
513 varVecModelFinalCov10,
514 _,
515 _,
516 ) = getFitDataFromCovariances(
517 1, 0, muAmp, cov, model, weight, returnMasked=True
518 )
520 # cuadratic fit for residuals below
521 par2 = np.polyfit(meanVecFinal, varVecFinal, 2, w=varWeightsFinal)
522 varModelFinalQuadratic = np.polyval(par2, meanVecFinal)
523 chi2QuadModelVar = calculateWeightedReducedChi2(
524 varVecFinal,
525 varModelFinalQuadratic,
526 varWeightsFinal,
527 len(meanVecFinal),
528 3,
529 )
531 # fit with no 'b' coefficient (c = a*b in Eq. 20 of Astier+19)
532 covNoB, modelNoB, weightNoB = (
533 covsNoB[amp],
534 covsModelNoB[amp],
535 covsWeightsNoB[amp],
536 )
537 (
538 meanVecFinalNoB,
539 varVecFinalNoB,
540 varVecModelFinalNoB,
541 varWeightsFinalNoB,
542 _,
543 ) = getFitDataFromCovariances(
544 0, 0, muAmp, covNoB, modelNoB, weightNoB, returnMasked=True
545 )
547 chi2FullModelNoBVar = calculateWeightedReducedChi2(
548 varVecFinalNoB,
549 varVecModelFinalNoB,
550 varWeightsFinalNoB,
551 len(meanVecFinalNoB),
552 3,
553 )
554 stringLegend = (
555 f"Gain: {gain:.4} e/ADU \n"
556 f"Noise: {noise:.4} e \n"
557 + r"$a_{00}$: %.3e 1/e" % aCoeffs[0, 0]
558 + "\n"
559 + r"$b_{00}$: %.3e 1/e" % bCoeffs[0, 0]
560 + f"\nLast in fit: {meanVecFinal[-1]:.7} ADU "
561 )
562 minMeanVecFinal = np.nanmin(meanVecFinal)
563 maxMeanVecFinal = np.nanmax(meanVecFinal)
564 deltaXlim = maxMeanVecFinal - minMeanVecFinal
566 a.set_xlabel(r"Mean signal ($\mu$, ADU)", fontsize=labelFontSize)
567 a.set_ylabel(r"Variance (ADU$^2$)", fontsize=labelFontSize)
568 a.tick_params(labelsize=11)
569 a.set_xscale("linear")
570 a.set_yscale("linear")
571 a.scatter(meanVecFinal, varVecFinal, c="blue", marker="o", s=markerSize)
572 a.plot(meanVecFinal, varVecModelFinal, color="red", linestyle="-")
573 a.text(
574 0.03,
575 0.7,
576 stringLegend,
577 transform=a.transAxes,
578 fontsize=legendFontSize,
579 )
580 a.set_title(amp, fontsize=titleFontSize)
581 a.set_xlim(
582 [
583 minMeanVecFinal - 0.2 * deltaXlim,
584 maxMeanVecFinal + 0.2 * deltaXlim,
585 ]
586 )
588 # Same as above, but in log-scale
589 a2.set_xlabel(r"Mean Signal ($\mu$, ADU)", fontsize=labelFontSize)
590 a2.set_ylabel(r"Variance (ADU$^2$)", fontsize=labelFontSize)
591 a2.tick_params(labelsize=11)
592 a2.set_xscale("log")
593 a2.set_yscale("log")
594 a2.plot(meanVecFinal, varVecModelFinal, color="red", linestyle="-")
595 a2.scatter(
596 meanVecFinal, varVecFinal, c="blue", marker="o", s=markerSize
597 )
598 a2.text(
599 0.03,
600 0.7,
601 stringLegend,
602 transform=a2.transAxes,
603 fontsize=legendFontSize,
604 )
605 a2.set_title(amp, fontsize=titleFontSize)
606 a2.set_xlim([minMeanVecFinal, maxMeanVecFinal])
608 # Residuals var - model
609 aResVar.set_xlabel(r"Mean signal ($\mu$, ADU)", fontsize=labelFontSize)
610 aResVar.set_ylabel(r"Residuals (ADU$^2$)", fontsize=labelFontSize)
611 aResVar.tick_params(labelsize=11)
612 aResVar.set_xscale("linear")
613 aResVar.set_yscale("linear")
614 aResVar.plot(
615 meanVecFinal,
616 varVecFinal - varVecModelFinal,
617 color="blue",
618 linestyle="-",
619 label=r"Full fit ($\chi_{\rm{red}}^2$: %g)" % chi2FullModelVar,
620 )
621 aResVar.plot(
622 meanVecFinal,
623 varVecFinal - varModelFinalQuadratic,
624 color="red",
625 linestyle="-",
626 label=r"Quadratic fit ($\chi_{\rm{red}}^2$: %g)" % chi2QuadModelVar,
627 )
628 aResVar.plot(
629 meanVecFinalNoB,
630 varVecFinalNoB - varVecModelFinalNoB,
631 color="green",
632 linestyle="-",
633 label=r"Full fit (b=0) ($\chi_{\rm{red}}^2$: %g)"
634 % chi2FullModelNoBVar,
635 )
636 aResVar.axhline(color="black")
637 aResVar.set_title(amp, fontsize=titleFontSize)
638 aResVar.set_xlim(
639 [
640 minMeanVecFinal - 0.2 * deltaXlim,
641 maxMeanVecFinal + 0.2 * deltaXlim,
642 ]
643 )
644 aResVar.legend(fontsize=7)
646 a3.set_xlabel(r"Mean signal ($\mu$, ADU)", fontsize=labelFontSize)
647 a3.set_ylabel(r"Cov01 (ADU$^2$)", fontsize=labelFontSize)
648 a3.tick_params(labelsize=11)
649 a3.set_xscale("linear")
650 a3.set_yscale("linear")
651 a3.scatter(
652 meanVecFinalCov01,
653 varVecFinalCov01,
654 c="blue",
655 marker="o",
656 s=markerSize,
657 )
658 a3.plot(
659 meanVecFinalCov01, varVecModelFinalCov01, color="red", linestyle="-"
660 )
661 a3.set_title(amp, fontsize=titleFontSize)
662 a3.set_xlim(
663 [
664 minMeanVecFinal - 0.2 * deltaXlim,
665 maxMeanVecFinal + 0.2 * deltaXlim,
666 ]
667 )
669 a4.set_xlabel(r"Mean signal ($\mu$, ADU)", fontsize=labelFontSize)
670 a4.set_ylabel(r"Cov10 (ADU$^2$)", fontsize=labelFontSize)
671 a4.tick_params(labelsize=11)
672 a4.set_xscale("linear")
673 a4.set_yscale("linear")
674 a4.scatter(
675 meanVecFinalCov10,
676 varVecFinalCov10,
677 c="blue",
678 marker="o",
679 s=markerSize,
680 )
681 a4.plot(
682 meanVecFinalCov10, varVecModelFinalCov10, color="red", linestyle="-"
683 )
684 a4.set_title(amp, fontsize=titleFontSize)
685 a4.set_xlim(
686 [
687 minMeanVecFinal - 0.2 * deltaXlim,
688 maxMeanVecFinal + 0.2 * deltaXlim,
689 ]
690 )
692 else:
693 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
694 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
695 a3.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
696 a4.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
698 f.suptitle(
699 "PTC from covariances as in Astier+19 \n Fit: Eq. 20, Astier+19",
700 fontsize=supTitleFontSize,
701 )
702 f2.suptitle(
703 "PTC from covariances as in Astier+19 (log-log) \n Fit: Eq. 20, Astier+19",
704 fontsize=supTitleFontSize,
705 )
706 fResCov00.suptitle(
707 "Residuals (data-model) for Cov00 (Var)", fontsize=supTitleFontSize
708 )
709 fCov01.suptitle(
710 "Cov01 as in Astier+19 (nearest parallel neighbor covariance) \n"
711 " Fit: Eq. 20, Astier+19",
712 fontsize=supTitleFontSize,
713 )
714 fCov10.suptitle(
715 "Cov10 as in Astier+19 (nearest serial neighbor covariance) \n"
716 "Fit: Eq. 20, Astier+19",
717 fontsize=supTitleFontSize,
718 )
720 return [f, f2, fResCov00, fCov01, fCov10]
722 def plotNormalizedCovariances(
723 self,
724 i,
725 j,
726 inputMu,
727 covs,
728 covsModel,
729 covsWeights,
730 covsNoB,
731 covsModelNoB,
732 covsWeightsNoB,
733 offset=0.004,
734 numberOfBins=10,
735 plotData=True,
736 topPlot=False,
737 ):
738 """Plot C_ij/mu vs mu.
740 Figs. 8, 10, and 11 of Astier+19
742 Parameters
743 ----------
744 i : `int`
745 Covariance lag.
746 j : `int`
747 Covariance lag.
748 inputMu : `dict` [`str`, `list`]
749 Dictionary keyed by amp name with mean signal values.
750 covs : `dict` [`str`, `list`]
751 Dictionary keyed by amp names containing a list of measued
752 covariances per mean flux.
753 covsModel : `dict` [`str`, `list`]
754 Dictionary keyed by amp names containinging covariances
755 model (Eq. 20 of Astier+19) per mean flux.
756 covsWeights : `dict` [`str`, `list`]
757 Dictionary keyed by amp names containinging sqrt. of
758 covariances weights.
759 covsNoB : `dict` [`str`, `list`]
760 Dictionary keyed by amp names containing a list of measued
761 covariances per mean flux ('b'=0 in Astier+19).
762 covsModelNoB : `dict` [`str`, `list`]
763 Dictionary keyed by amp names containing covariances model
764 (with 'b'=0 in Eq. 20 of Astier+19) per mean flux.
765 covsWeightsNoB : `dict` [`str`, `list`]
766 Dictionary keyed by amp names containing sqrt. of
767 covariances weights ('b' = 0 in Eq. 20 of Astier+19).
768 expIdMask : `dict` [`str`, `list`]
769 Dictionary keyed by amp names containing the masked
770 exposure pairs.
771 offset : `float`, optional
772 Constant offset factor to plot covariances in same panel
773 (so they don't overlap).
774 numberOfBins : `int`, optional
775 Number of bins for top and bottom plot.
776 plotData : `bool`, optional
777 Plot the data points?
778 topPlot : `bool`, optional
779 Plot the top plot with the covariances, and the bottom
780 plot with the model residuals?
781 """
782 if not topPlot:
783 fig = plt.figure(figsize=(8, 10))
784 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
785 gs.update(hspace=0)
786 ax0 = plt.subplot(gs[0])
787 plt.setp(ax0.get_xticklabels(), visible=False)
788 else:
789 fig = plt.figure(figsize=(8, 8))
790 ax0 = plt.subplot(111)
791 ax0.ticklabel_format(style="sci", axis="x", scilimits=(0, 0))
792 ax0.tick_params(axis="both", labelsize="x-large")
793 mue, rese, wce = [], [], []
794 mueNoB, reseNoB, wceNoB = [], [], []
795 for counter, amp in enumerate(covs):
796 muAmp, fullCov, fullCovModel, fullCovWeight = (
797 inputMu[amp],
798 covs[amp],
799 covsModel[amp],
800 covsWeights[amp],
801 )
802 if len(fullCov) == 0:
803 continue
804 mu, cov, model, weightCov, _ = getFitDataFromCovariances(
805 i,
806 j,
807 muAmp,
808 fullCov,
809 fullCovModel,
810 fullCovWeight,
811 divideByMu=True,
812 returnMasked=True,
813 )
815 mue += list(mu)
816 rese += list(cov - model)
817 wce += list(weightCov)
819 fullCovNoB, fullCovModelNoB, fullCovWeightNoB = (
820 covsNoB[amp],
821 covsModelNoB[amp],
822 covsWeightsNoB[amp],
823 )
824 if len(fullCovNoB) == 0:
825 continue
826 (muNoB, covNoB, modelNoB, weightCovNoB, _) = getFitDataFromCovariances(
827 i,
828 j,
829 muAmp,
830 fullCovNoB,
831 fullCovModelNoB,
832 fullCovWeightNoB,
833 divideByMu=True,
834 returnMasked=True,
835 )
837 mueNoB += list(muNoB)
838 reseNoB += list(covNoB - modelNoB)
839 wceNoB += list(weightCovNoB)
841 # the corresponding fit
842 (fit_curve,) = plt.plot(mu, model + counter * offset, "-", linewidth=4.0)
843 # bin plot. len(mu) = no binning
844 gind = self.indexForBins(mu, numberOfBins)
846 xb, yb, wyb, sigyb = self.binData(mu, cov, gind, weightCov)
847 plt.errorbar(
848 xb,
849 yb + counter * offset,
850 yerr=sigyb,
851 marker="o",
852 linestyle="none",
853 markersize=6.5,
854 color=fit_curve.get_color(),
855 label=f"{amp} (N: {len(mu)})",
856 )
857 # plot the data
858 if plotData:
859 (points,) = plt.plot(
860 mu, cov + counter * offset, ".", color=fit_curve.get_color()
861 )
862 plt.legend(loc="upper right", fontsize=8)
863 # end loop on amps
864 mue = np.array(mue)
865 rese = np.array(rese)
866 wce = np.array(wce)
867 mueNoB = np.array(mueNoB)
868 reseNoB = np.array(reseNoB)
869 wceNoB = np.array(wceNoB)
871 plt.xlabel(r"$\mu (el)$", fontsize="x-large")
872 plt.ylabel(r"$Cov{%d%d}/\mu + Cst (el)$" % (i, j), fontsize="x-large")
873 if not topPlot:
874 gind = self.indexForBins(mue, numberOfBins)
875 xb, yb, wyb, sigyb = self.binData(mue, rese, gind, wce)
877 ax1 = plt.subplot(gs[1], sharex=ax0)
878 ax1.errorbar(
879 xb, yb, yerr=sigyb, marker="o", linestyle="none", label="Full fit"
880 )
881 gindNoB = self.indexForBins(mueNoB, numberOfBins)
882 xb2, yb2, wyb2, sigyb2 = self.binData(mueNoB, reseNoB, gindNoB, wceNoB)
884 ax1.errorbar(
885 xb2, yb2, yerr=sigyb2, marker="o", linestyle="none", label="b = 0"
886 )
887 ax1.tick_params(axis="both", labelsize="x-large")
888 plt.legend(loc="upper left", fontsize="large")
889 # horizontal line at zero
890 plt.plot(xb, [0] * len(xb), "--", color="k")
891 plt.ticklabel_format(style="sci", axis="x", scilimits=(0, 0))
892 plt.ticklabel_format(style="sci", axis="y", scilimits=(0, 0))
893 plt.xlabel(r"$\mu (el)$", fontsize="x-large")
894 plt.ylabel(r"$Cov{%d%d}/\mu$ -model (el)" % (i, j), fontsize="x-large")
895 plt.tight_layout()
896 plt.suptitle(f"Nbins: {numberOfBins}")
897 # overlapping y labels:
898 fig.canvas.draw()
899 labels0 = [item.get_text() for item in ax0.get_yticklabels()]
900 labels0[0] = ""
901 ax0.set_yticklabels(labels0)
903 return [fig]
905 @staticmethod
906 def plot_a_b(aDict, bDict, bRange=3):
907 """Fig. 12 of Astier+19
909 Color display of a and b arrays fits, averaged over channels.
911 Parameters
912 ----------
913 aDict : `dict` [`numpy.array`]
914 Dictionary keyed by amp names containing the fitted 'a'
915 coefficients from the model in Eq. 20 of Astier+19 (if
916 `ptcFitType` is `FULLCOVARIANCE`).
917 bDict : `dict` [`numpy.array`]
918 Dictionary keyed by amp names containing the fitted 'b'
919 coefficients from the model in Eq. 20 of Astier+19 (if
920 `ptcFitType` is `FULLCOVARIANCE`).
921 bRange : `int`
922 Maximum lag for b arrays.
923 """
924 a, b = [], []
925 for amp in aDict:
926 if np.isnan(aDict[amp]).all():
927 continue
928 a.append(aDict[amp])
929 b.append(bDict[amp])
930 a = np.array(a).mean(axis=0)
931 b = np.array(b).mean(axis=0)
932 fig = plt.figure(figsize=(7, 11))
933 ax0 = fig.add_subplot(2, 1, 1)
934 im0 = ax0.imshow(
935 np.abs(a.transpose()), origin="lower", norm=mpl.colors.LogNorm()
936 )
937 ax0.tick_params(axis="both", labelsize="x-large")
938 ax0.set_title(r"$|a|$", fontsize="x-large")
939 ax0.xaxis.set_ticks_position("bottom")
940 cb0 = plt.colorbar(im0)
941 cb0.ax.tick_params(labelsize="x-large")
943 ax1 = fig.add_subplot(2, 1, 2)
944 ax1.tick_params(axis="both", labelsize="x-large")
945 ax1.yaxis.set_major_locator(MaxNLocator(integer=True))
946 ax1.xaxis.set_major_locator(MaxNLocator(integer=True))
947 im1 = ax1.imshow(1e6 * b[:bRange, :bRange].transpose(), origin="lower")
948 cb1 = plt.colorbar(im1)
949 cb1.ax.tick_params(labelsize="x-large")
950 ax1.set_title(r"$b \times 10^6$", fontsize="x-large")
951 ax1.xaxis.set_ticks_position("bottom")
952 plt.tight_layout()
954 return [fig]
956 @staticmethod
957 def ab_vs_dist(aDict, bDict, bRange=4):
958 """Fig. 13 of Astier+19.
960 Values of a and b arrays fits, averaged over amplifiers, as a
961 function of distance.
963 Parameters
964 ----------
965 aDict : `dict` [`numpy.array`]
966 Dictionary keyed by amp names containing the fitted 'a'
967 coefficients from the model in Eq. 20 of Astier+19 (if
968 `ptcFitType` is `FULLCOVARIANCE`).
970 bDict : `dict` [`numpy.array`]
971 Dictionary keyed by amp names containing the fitted 'b'
972 coefficients from the model in Eq. 20 of Astier+19 (if
973 `ptcFitType` is `FULLCOVARIANCE`).
974 bRange : `int`
975 Maximum lag for b arrays.
976 """
977 assert len(aDict) == len(bDict)
978 a = []
979 for amp in aDict:
980 if np.isnan(aDict[amp]).all():
981 continue
982 a.append(aDict[amp])
983 a = np.array(a)
984 y = a.mean(axis=0)
985 sy = a.std(axis=0) / np.sqrt(len(aDict))
986 i, j = np.indices(y.shape)
987 upper = (i >= j).ravel()
988 r = np.sqrt(i**2 + j**2).ravel()
989 y = y.ravel()
990 sy = sy.ravel()
991 fig = plt.figure(figsize=(6, 9))
992 ax = fig.add_subplot(211)
993 ax.set_xlim([0.5, r.max() + 1])
994 ax.errorbar(
995 r[upper],
996 y[upper],
997 yerr=sy[upper],
998 marker="o",
999 linestyle="none",
1000 color="b",
1001 label="$i>=j$",
1002 )
1003 ax.errorbar(
1004 r[~upper],
1005 y[~upper],
1006 yerr=sy[~upper],
1007 marker="o",
1008 linestyle="none",
1009 color="r",
1010 label="$i<j$",
1011 )
1012 ax.legend(loc="upper center", fontsize="x-large")
1013 ax.set_xlabel(r"$\sqrt{i^2+j^2}$", fontsize="x-large")
1014 ax.set_ylabel(r"$a_{ij}$", fontsize="x-large")
1015 ax.set_yscale("log")
1016 ax.tick_params(axis="both", labelsize="x-large")
1018 #
1019 axb = fig.add_subplot(212)
1020 b = []
1021 for amp in bDict:
1022 if np.isnan(bDict[amp]).all():
1023 continue
1024 b.append(bDict[amp])
1025 b = np.array(b)
1026 yb = b.mean(axis=0)
1027 syb = b.std(axis=0) / np.sqrt(len(bDict))
1028 ib, jb = np.indices(yb.shape)
1029 upper = (ib > jb).ravel()
1030 rb = np.sqrt(i**2 + j**2).ravel()
1031 yb = yb.ravel()
1032 syb = syb.ravel()
1033 xmin = -0.2
1034 xmax = bRange
1035 axb.set_xlim([xmin, xmax + 0.2])
1036 cutu = (r > xmin) & (r < xmax) & (upper)
1037 cutl = (r > xmin) & (r < xmax) & (~upper)
1038 axb.errorbar(
1039 rb[cutu],
1040 yb[cutu],
1041 yerr=syb[cutu],
1042 marker="o",
1043 linestyle="none",
1044 color="b",
1045 label="$i>=j$",
1046 )
1047 axb.errorbar(
1048 rb[cutl],
1049 yb[cutl],
1050 yerr=syb[cutl],
1051 marker="o",
1052 linestyle="none",
1053 color="r",
1054 label="$i<j$",
1055 )
1056 plt.legend(loc="upper center", fontsize="x-large")
1057 axb.set_xlabel(r"$\sqrt{i^2+j^2}$", fontsize="x-large")
1058 axb.set_ylabel(r"$b_{ij}$", fontsize="x-large")
1059 axb.ticklabel_format(style="sci", axis="y", scilimits=(0, 0))
1060 axb.tick_params(axis="both", labelsize="x-large")
1061 plt.tight_layout()
1063 return [fig]
1065 @staticmethod
1066 def plotAcoeffsSum(aDict, bDict):
1067 """Fig. 14. of Astier+19
1069 Cumulative sum of a_ij as a function of maximum
1070 separation. This plot displays the average over channels.
1072 Parameters
1073 ----------
1074 aDict : `dict` [`numpy.array`]
1075 Dictionary keyed by amp names containing the fitted 'a'
1076 coefficients from the model in Eq. 20 of Astier+19 (if
1077 `ptcFitType` is `FULLCOVARIANCE`).
1078 bDict : `dict` [`numpy.array`]
1079 Dictionary keyed by amp names containing the fitted 'b'
1080 coefficients from the model in Eq. 20 of Astier+19 (if
1081 `ptcFitType` is `FULLCOVARIANCE`).
1082 """
1083 assert len(aDict) == len(bDict)
1084 a, b = [], []
1085 for amp in aDict:
1086 if np.isnan(aDict[amp]).all() or np.isnan(bDict[amp]).all():
1087 continue
1088 a.append(aDict[amp])
1089 b.append(bDict[amp])
1090 a = np.array(a).mean(axis=0)
1091 b = np.array(b).mean(axis=0)
1092 fig = plt.figure(figsize=(7, 6))
1093 w = 4 * np.ones_like(a)
1094 w[0, 1:] = 2
1095 w[1:, 0] = 2
1096 w[0, 0] = 1
1097 wa = w * a
1098 indices = range(1, a.shape[0] + 1)
1099 sums = [wa[0:n, 0:n].sum() for n in indices]
1100 ax = plt.subplot(111)
1101 ax.plot(indices, sums / sums[0], "o", color="b")
1102 ax.set_yscale("log")
1103 ax.set_xlim(indices[0] - 0.5, indices[-1] + 0.5)
1104 ax.set_ylim(None, 1.2)
1105 ax.set_ylabel(
1106 r"$[\sum_{|i|<n\ &\ |j|<n} a_{ij}] / |a_{00}|$", fontsize="x-large"
1107 )
1108 ax.set_xlabel("n", fontsize="x-large")
1109 ax.tick_params(axis="both", labelsize="x-large")
1110 plt.tight_layout()
1112 return [fig]
1114 @staticmethod
1115 def plotRelativeBiasACoeffs(
1116 aDict,
1117 aDictNoB,
1118 fullCovsModel,
1119 fullCovsModelNoB,
1120 signalElectrons,
1121 gainDict,
1122 maxr=None,
1123 ):
1124 """Fig. 15 in Astier+19.
1126 Illustrates systematic bias from estimating 'a'
1127 coefficients from the slope of correlations as opposed to the
1128 full model in Astier+19.
1130 Parameters
1131 ----------
1132 aDict : `dict`
1133 Dictionary of 'a' matrices (Eq. 20, Astier+19), with amp
1134 names as keys.
1135 aDictNoB : `dict`
1136 Dictionary of 'a' matrices ('b'= 0 in Eq. 20, Astier+19),
1137 with amp names as keys.
1138 fullCovsModel : `dict` [`str`, `list`]
1139 Dictionary keyed by amp names containing covariances model
1140 per mean flux.
1141 fullCovsModelNoB : `dict` [`str`, `list`]
1142 Dictionary keyed by amp names containing covariances model
1143 (with 'b'=0 in Eq. 20 of Astier+19) per mean flux.
1144 signalElectrons : `float`
1145 Signal at which to evaluate the a_ij coefficients.
1146 gainDict : `dict` [`str`, `float`]
1147 Dicgionary keyed by amp names with the gains in e-/ADU.
1148 maxr : `int`, optional
1149 Maximum lag.
1150 """
1151 fig = plt.figure(figsize=(7, 11))
1152 title = [f"'a' relative bias at {signalElectrons} e", "'a' relative bias (b=0)"]
1153 data = [(aDict, fullCovsModel), (aDictNoB, fullCovsModelNoB)]
1155 for k, pair in enumerate(data):
1156 diffs = []
1157 amean = []
1158 for amp in pair[0]:
1159 covModel = np.array(pair[1][amp])
1160 if np.isnan(covModel).all():
1161 continue
1162 # Compute the "a" coefficients of the Antilogus+14
1163 # (1402.0725) model as in Guyonnet+15 (1501.01577,
1164 # eq. 16, the slope of cov/var at a given flux mu in
1165 # electrons). Eq. 16 of 1501.01577 is an approximation
1166 # to the more complete model in Astier+19
1167 # (1905.08677).
1168 var = covModel[0, 0, 0] # ADU^2
1169 # For a result in electrons^-1, we have to use mu in electrons
1170 aOld = covModel[0, :, :] / (var * signalElectrons)
1171 a = pair[0][amp]
1172 amean.append(a)
1173 diffs.append((aOld - a))
1174 amean = np.array(amean).mean(axis=0)
1175 diff = np.array(diffs).mean(axis=0)
1176 diff = diff / amean
1177 diff = diff[:]
1178 # The difference should be close to zero
1179 diff[0, 0] = 0
1180 if maxr is None:
1181 maxr = diff.shape[0]
1182 diff = diff[:maxr, :maxr]
1183 ax0 = fig.add_subplot(2, 1, k + 1)
1184 im0 = ax0.imshow(diff.transpose(), origin="lower")
1185 ax0.yaxis.set_major_locator(MaxNLocator(integer=True))
1186 ax0.xaxis.set_major_locator(MaxNLocator(integer=True))
1187 ax0.tick_params(axis="both", labelsize="x-large")
1188 plt.colorbar(im0)
1189 ax0.set_title(title[k])
1191 plt.tight_layout()
1193 return [fig]
1195 def _plotStandardPtc(self, dataset):
1196 """Plot PTC, var/signal vs signal, linearity, and linearity residual
1197 per amplifier.
1199 Parameters
1200 ----------
1201 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
1202 The dataset containing the means, variances, exposure
1203 times, and mask.
1204 """
1205 ptcFitType = dataset.ptcFitType
1206 if ptcFitType == "EXPAPPROXIMATION":
1207 ptcFunc = funcAstier
1208 stringTitle = r"Var = $\frac{1}{2g^2a_{00}}(\exp (2a_{00} \mu g) - 1) + \frac{n_{00}}{g^2}$ "
1209 elif ptcFitType == "POLYNOMIAL":
1210 ptcFunc = funcPolynomial
1211 for key in dataset.ptcFitPars:
1212 deg = len(dataset.ptcFitPars[key]) - 1
1213 break
1214 stringTitle = r"Polynomial (degree: %g)" % (deg)
1215 else:
1216 raise RuntimeError(
1217 f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n"
1218 "Options: 'EXPAPPROXIMATION' or 'POLYNOMIAL'."
1219 )
1221 legendFontSize = 6.5
1222 labelFontSize = 8
1223 titleFontSize = 9
1224 supTitleFontSize = 18
1225 markerSize = 25
1227 # General determination of the size of the plot grid
1228 nAmps = len(dataset.ampNames)
1229 if nAmps == 2:
1230 nRows, nCols = 2, 1
1231 nRows = np.sqrt(nAmps)
1232 mantissa, _ = np.modf(nRows)
1233 if mantissa > 0:
1234 nRows = int(nRows) + 1
1235 nCols = nRows
1236 else:
1237 nRows = int(nRows)
1238 nCols = nRows
1240 f, ax = plt.subplots(
1241 nrows=nRows, ncols=nCols, sharex="col", sharey="row", figsize=(13, 10)
1242 )
1243 f2, ax2 = plt.subplots(
1244 nrows=nRows, ncols=nCols, sharex="col", sharey="row", figsize=(13, 10)
1245 )
1246 f3, ax3 = plt.subplots(
1247 nrows=nRows, ncols=nCols, sharex="col", sharey="row", figsize=(13, 10)
1248 )
1250 for i, (amp, a, a2, a3) in enumerate(
1251 zip(dataset.ampNames, ax.flatten(), ax2.flatten(), ax3.flatten())
1252 ):
1253 meanVecOriginal = np.ravel(np.array(dataset.rawMeans[amp]))
1254 varVecOriginal = np.ravel(np.array(dataset.rawVars[amp]))
1255 mask = np.ravel(np.array(dataset.expIdMask[amp]))
1256 if np.sum(mask) == 0: # The whole amp is bad
1257 a.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
1258 a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
1259 a3.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
1260 continue
1261 else:
1262 mask = mask.astype(bool)
1263 meanVecFinal = meanVecOriginal[mask]
1264 varVecFinal = varVecOriginal[mask]
1265 meanVecOutliers = meanVecOriginal[np.invert(mask)]
1266 varVecOutliers = varVecOriginal[np.invert(mask)]
1267 pars, parsErr = np.array(dataset.ptcFitPars[amp]), np.array(
1268 dataset.ptcFitParsError[amp]
1269 )
1270 ptcRedChi2 = dataset.ptcFitChiSq[amp]
1271 if ptcFitType == "EXPAPPROXIMATION":
1272 if len(meanVecFinal):
1273 ptcA00, ptcA00error = pars[0], parsErr[0]
1274 ptcGain, ptcGainError = pars[1], parsErr[1]
1275 ptcNoise = np.sqrt((pars[2])) # pars[2] is in (e-)^2
1276 ptcNoiseAdu = ptcNoise * (1.0 / ptcGain)
1277 ptcNoiseError = (
1278 0.5
1279 * (parsErr[2] / np.fabs(pars[2]))
1280 * np.sqrt(np.fabs(pars[2]))
1281 )
1282 stringLegend = (
1283 f"a00: {ptcA00:.2e}+/-{ptcA00error:.2e} 1/e"
1284 f"\nGain: {ptcGain:.4}+/-{ptcGainError:.2e} e/ADU"
1285 f"\nNoise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e\n"
1286 r"$\chi^2_{\rm{red}}$: " + f"{ptcRedChi2:.4}"
1287 f"\nLast in fit: {meanVecFinal[-1]:.7} ADU "
1288 )
1290 if ptcFitType == "POLYNOMIAL":
1291 if len(meanVecFinal):
1292 ptcGain, ptcGainError = 1.0 / pars[1], np.fabs(1.0 / pars[1]) * (
1293 parsErr[1] / pars[1]
1294 )
1295 ptcNoiseAdu = np.sqrt((pars[0])) # pars[0] is in ADU^2
1296 ptcNoise = ptcNoiseAdu * ptcGain
1297 ptcNoiseError = (
1298 0.5
1299 * (parsErr[0] / np.fabs(pars[0]))
1300 * (np.sqrt(np.fabs(pars[0])))
1301 ) * ptcGain
1302 stringLegend = (
1303 f"Gain: {ptcGain:.4}+/-{ptcGainError:.2e} e/ADU\n"
1304 f"Noise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e\n"
1305 r"$\chi^2_{\rm{red}}$: " + f"{ptcRedChi2:.4}"
1306 f"\nLast in fit: {meanVecFinal[-1]:.7} ADU "
1307 )
1308 a.set_xlabel(r"Mean signal ($\mu$, ADU)", fontsize=labelFontSize)
1309 a.set_ylabel(r"Variance (ADU$^2$)", fontsize=labelFontSize)
1310 a.tick_params(labelsize=11)
1311 a.set_xscale("linear")
1312 a.set_yscale("linear")
1314 a2.set_xlabel(r"Mean Signal ($\mu$, ADU)", fontsize=labelFontSize)
1315 a2.set_ylabel(r"Variance (ADU$^2$)", fontsize=labelFontSize)
1316 a2.tick_params(labelsize=11)
1317 a2.set_xscale("log")
1318 a2.set_yscale("log")
1320 a3.set_xlabel(r"Mean signal ($\mu$, ADU)", fontsize=labelFontSize)
1321 a3.set_ylabel(r"Variance/$\mu$ (ADU)", fontsize=labelFontSize)
1322 a3.tick_params(labelsize=11)
1323 a3.set_xscale("log")
1324 a3.set_yscale("linear")
1325 minMeanVecFinal = np.nanmin(meanVecFinal)
1326 maxMeanVecFinal = np.nanmax(meanVecFinal)
1327 meanVecFit = np.linspace(
1328 minMeanVecFinal, maxMeanVecFinal, 100 * len(meanVecFinal)
1329 )
1330 minMeanVecOriginal = np.nanmin(meanVecOriginal)
1331 maxMeanVecOriginal = np.nanmax(meanVecOriginal)
1332 deltaXlim = maxMeanVecOriginal - minMeanVecOriginal
1333 a.plot(meanVecFit, ptcFunc(pars, meanVecFit), color="red")
1334 a2.plot(meanVecFit, ptcFunc(pars, meanVecFit), color="red")
1335 a2.scatter(meanVecFinal, varVecFinal, c="blue", marker="o", s=markerSize)
1336 a2.scatter(
1337 meanVecOutliers, varVecOutliers, c="magenta", marker="s", s=markerSize
1338 )
1339 a2.text(
1340 0.03,
1341 0.66,
1342 stringLegend,
1343 transform=a2.transAxes,
1344 fontsize=legendFontSize,
1345 )
1346 a2.set_title(amp, fontsize=titleFontSize)
1347 a2.set_xlim([minMeanVecOriginal, maxMeanVecOriginal])
1349 # Var/mu vs mu
1350 a3.plot(meanVecFit, ptcFunc(pars, meanVecFit) / meanVecFit, color="red")
1351 a3.scatter(
1352 meanVecFinal,
1353 varVecFinal / meanVecFinal,
1354 c="blue",
1355 marker="o",
1356 s=markerSize,
1357 )
1358 a3.scatter(
1359 meanVecOutliers,
1360 varVecOutliers / meanVecOutliers,
1361 c="magenta",
1362 marker="s",
1363 s=markerSize,
1364 )
1365 a3.text(
1366 0.05, 0.1, stringLegend, transform=a3.transAxes, fontsize=legendFontSize
1367 )
1368 a3.set_title(amp, fontsize=titleFontSize)
1369 a3.set_xlim(
1370 [
1371 minMeanVecOriginal - 0.2 * deltaXlim,
1372 maxMeanVecOriginal + 0.2 * deltaXlim,
1373 ]
1374 )
1375 f.suptitle("PTC \n Fit: " + stringTitle, fontsize=supTitleFontSize)
1376 f2.suptitle("PTC (log-log)", fontsize=supTitleFontSize)
1377 f3.suptitle(r"Var/$\mu$", fontsize=supTitleFontSize)
1379 figDict = {0: f, 1: f2, 2: f3}
1381 return figDict
1383 @staticmethod
1384 def indexForBins(x, nBins):
1385 """Builds an index with regular binning. The result can be fed into
1386 binData.
1388 Parameters
1389 ----------
1390 x : `numpy.array`
1391 Data to bin.
1392 nBins : `int`
1393 Number of bin.
1395 Returns
1396 -------
1397 np.digitize(x, bins): `numpy.array`
1398 Bin indices.
1399 """
1400 bins = np.linspace(x.min(), x.max() + abs(x.max() * 1e-7), nBins + 1)
1401 return np.digitize(x, bins)
1403 @staticmethod
1404 def binData(x, y, binIndex, wy=None):
1405 """Bin data (usually for display purposes).
1407 Parameters
1408 ----------
1409 x : `numpy.array`
1410 Data to bin.
1411 y : `numpy.array`
1412 Data to bin.
1413 binIdex : `list`
1414 Bin number of each datum.
1415 wy : `numpy.array`
1416 Inverse rms of each datum to use when averaging (the
1417 actual weight is wy**2).
1419 Returns
1420 -------
1421 xbin : `numpy.array`
1422 Binned data in x.
1423 ybin : `numpy.array`
1424 Binned data in y.
1425 wybin : `numpy.array`
1426 Binned weights in y, computed from wy's in each bin.
1427 sybin : `numpy.array`
1428 Uncertainty on the bin average, considering actual
1429 scatter, and ignoring weights.
1430 """
1431 if wy is None:
1432 wy = np.ones_like(x)
1433 binIndexSet = set(binIndex)
1434 w2 = wy * wy
1435 xw2 = x * (w2)
1436 xbin = np.array(
1437 [xw2[binIndex == i].sum() / w2[binIndex == i].sum() for i in binIndexSet]
1438 )
1440 yw2 = y * w2
1441 ybin = np.array(
1442 [yw2[binIndex == i].sum() / w2[binIndex == i].sum() for i in binIndexSet]
1443 )
1445 wybin = np.sqrt(np.array([w2[binIndex == i].sum() for i in binIndexSet]))
1446 sybin = np.array(
1447 [
1448 y[binIndex == i].std() / np.sqrt(np.array([binIndex == i]).sum())
1449 for i in binIndexSet
1450 ]
1451 )
1453 return xbin, ybin, wybin, sybin