lsst.cp.pipe  19.0.0-11-g57ef05f+2
ptc.py
Go to the documentation of this file.
1 # This file is part of cp_pipe.
2 #
3 # Developed for the LSST Data Management System.
4 # This product includes software developed by the LSST Project
5 # (https://www.lsst.org).
6 # See the COPYRIGHT file at the top-level directory of this distribution
7 # for details of code ownership.
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <https://www.gnu.org/licenses/>.
21 #
22 
23 __all__ = ['MeasurePhotonTransferCurveTask',
24  'MeasurePhotonTransferCurveTaskConfig',
25  'PhotonTransferCurveDataset']
26 
27 import numpy as np
28 import matplotlib.pyplot as plt
29 import os
30 from matplotlib.backends.backend_pdf import PdfPages
31 from sqlite3 import OperationalError
32 from collections import Counter
33 
34 import lsst.afw.math as afwMath
35 import lsst.pex.config as pexConfig
36 import lsst.pipe.base as pipeBase
37 from lsst.ip.isr import IsrTask
38 from .utils import (NonexistentDatasetTaskDataIdContainer, PairedVisitListTaskRunner,
39  checkExpLengthEqual, validateIsrConfig)
40 from scipy.optimize import leastsq, least_squares
41 import numpy.polynomial.polynomial as poly
42 
43 from lsst.ip.isr.linearize import Linearizer
44 import datetime
45 
46 
47 class MeasurePhotonTransferCurveTaskConfig(pexConfig.Config):
48  """Config class for photon transfer curve measurement task"""
49  isr = pexConfig.ConfigurableField(
50  target=IsrTask,
51  doc="""Task to perform instrumental signature removal.""",
52  )
53  isrMandatorySteps = pexConfig.ListField(
54  dtype=str,
55  doc="isr operations that must be performed for valid results. Raises if any of these are False.",
56  default=['doAssembleCcd']
57  )
58  isrForbiddenSteps = pexConfig.ListField(
59  dtype=str,
60  doc="isr operations that must NOT be performed for valid results. Raises if any of these are True",
61  default=['doFlat', 'doFringe', 'doAddDistortionModel', 'doBrighterFatter', 'doUseOpticsTransmission',
62  'doUseFilterTransmission', 'doUseSensorTransmission', 'doUseAtmosphereTransmission']
63  )
64  isrDesirableSteps = pexConfig.ListField(
65  dtype=str,
66  doc="isr operations that it is advisable to perform, but are not mission-critical." +
67  " WARNs are logged for any of these found to be False.",
68  default=['doBias', 'doDark', 'doCrosstalk', 'doDefect']
69  )
70  isrUndesirableSteps = pexConfig.ListField(
71  dtype=str,
72  doc="isr operations that it is *not* advisable to perform in the general case, but are not" +
73  " forbidden as some use-cases might warrant them." +
74  " WARNs are logged for any of these found to be True.",
75  default=['doLinearize']
76  )
77  ccdKey = pexConfig.Field(
78  dtype=str,
79  doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'.",
80  default='ccd',
81  )
82  makePlots = pexConfig.Field(
83  dtype=bool,
84  doc="Plot the PTC curves?",
85  default=False,
86  )
87  ptcFitType = pexConfig.ChoiceField(
88  dtype=str,
89  doc="Fit PTC to approximation in Astier+19 (Equation 16) or to a polynomial.",
90  default="POLYNOMIAL",
91  allowed={
92  "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').",
93  "ASTIERAPPROXIMATION": "Approximation in Astier+19 (Eq. 16)."
94  }
95  )
96  polynomialFitDegree = pexConfig.Field(
97  dtype=int,
98  doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.",
99  default=2,
100  )
101  polynomialFitDegreeNonLinearity = pexConfig.Field(
102  dtype=int,
103  doc="Degree of polynomial to fit the meanSignal vs exposureTime curve to produce" +
104  " the table for LinearizeLookupTable.",
105  default=3,
106  )
107  binSize = pexConfig.Field(
108  dtype=int,
109  doc="Bin the image by this factor in both dimensions.",
110  default=1,
111  )
112  minMeanSignal = pexConfig.Field(
113  dtype=float,
114  doc="Minimum value (inclusive) of mean signal (in DN) above which to consider.",
115  default=0,
116  )
117  maxMeanSignal = pexConfig.Field(
118  dtype=float,
119  doc="Maximum value (inclusive) of mean signal (in DN) below which to consider.",
120  default=9e6,
121  )
122  initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField(
123  dtype=float,
124  doc="Initially exclude data points with a variance that are more than a factor of this from being"
125  " linear in the positive direction, from the PTC fit. Note that these points will also be"
126  " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
127  " to allow an accurate determination of the sigmas for said iterative fit.",
128  default=0.12,
129  min=0.0,
130  max=1.0,
131  )
132  initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField(
133  dtype=float,
134  doc="Initially exclude data points with a variance that are more than a factor of this from being"
135  " linear in the negative direction, from the PTC fit. Note that these points will also be"
136  " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
137  " to allow an accurate determination of the sigmas for said iterative fit.",
138  default=0.25,
139  min=0.0,
140  max=1.0,
141  )
142  sigmaCutPtcOutliers = pexConfig.Field(
143  dtype=float,
144  doc="Sigma cut for outlier rejection in PTC.",
145  default=5.0,
146  )
147  maxIterationsPtcOutliers = pexConfig.Field(
148  dtype=int,
149  doc="Maximum number of iterations for outlier rejection in PTC.",
150  default=2,
151  )
152  doFitBootstrap = pexConfig.Field(
153  dtype=bool,
154  doc="Use bootstrap for the PTC fit parameters and errors?.",
155  default=False,
156  )
157  linResidualTimeIndex = pexConfig.Field(
158  dtype=int,
159  doc="Index position in time array for reference time in linearity residual calculation.",
160  default=2,
161  )
162  maxAduForLookupTableLinearizer = pexConfig.Field(
163  dtype=int,
164  doc="Maximum DN value for the LookupTable linearizer.",
165  default=2**18,
166  )
167  instrumentName = pexConfig.Field(
168  dtype=str,
169  doc="Instrument name.",
170  default='',
171  )
172 
173 
175  """A simple class to hold the output data from the PTC task.
176 
177  The dataset is made up of a dictionary for each item, keyed by the
178  amplifiers' names, which much be supplied at construction time.
179 
180  New items cannot be added to the class to save accidentally saving to the
181  wrong property, and the class can be frozen if desired.
182 
183  inputVisitPairs records the visits used to produce the data.
184  When fitPtcAndNonLinearity() is run, a mask is built up, which is by definition
185  always the same length as inputVisitPairs, rawExpTimes, rawMeans
186  and rawVars, and is a list of bools, which are incrementally set to False
187  as points are discarded from the fits.
188 
189  PTC fit parameters for polynomials are stored in a list in ascending order
190  of polynomial term, i.e. par[0]*x^0 + par[1]*x + par[2]*x^2 etc
191  with the length of the list corresponding to the order of the polynomial
192  plus one.
193  """
194  def __init__(self, ampNames):
195  # add items to __dict__ directly because __setattr__ is overridden
196 
197  # instance variables
198  self.__dict__["ampNames"] = ampNames
199  self.__dict__["badAmps"] = []
200 
201  # raw data variables
202  self.__dict__["inputVisitPairs"] = {ampName: [] for ampName in ampNames}
203  self.__dict__["visitMask"] = {ampName: [] for ampName in ampNames}
204  self.__dict__["rawExpTimes"] = {ampName: [] for ampName in ampNames}
205  self.__dict__["rawMeans"] = {ampName: [] for ampName in ampNames}
206  self.__dict__["rawVars"] = {ampName: [] for ampName in ampNames}
207 
208  # fit information
209  self.__dict__["ptcFitType"] = {ampName: "" for ampName in ampNames}
210  self.__dict__["ptcFitPars"] = {ampName: [] for ampName in ampNames}
211  self.__dict__["ptcFitParsError"] = {ampName: [] for ampName in ampNames}
212  self.__dict__["ptcFitReducedChiSquared"] = {ampName: [] for ampName in ampNames}
213  self.__dict__["nonLinearity"] = {ampName: [] for ampName in ampNames}
214  self.__dict__["nonLinearityError"] = {ampName: [] for ampName in ampNames}
215  self.__dict__["nonLinearityResiduals"] = {ampName: [] for ampName in ampNames}
216  self.__dict__["nonLinearityReducedChiSquared"] = {ampName: [] for ampName in ampNames}
217 
218  # final results
219  self.__dict__["gain"] = {ampName: -1. for ampName in ampNames}
220  self.__dict__["gainErr"] = {ampName: -1. for ampName in ampNames}
221  self.__dict__["noise"] = {ampName: -1. for ampName in ampNames}
222  self.__dict__["noiseErr"] = {ampName: -1. for ampName in ampNames}
223  self.__dict__["coefficientsLinearizePolynomial"] = {ampName: [] for ampName in ampNames}
224  self.__dict__["coefficientLinearizeSquared"] = {ampName: [] for ampName in ampNames}
225 
226  def __setattr__(self, attribute, value):
227  """Protect class attributes"""
228  if attribute not in self.__dict__:
229  raise AttributeError(f"{attribute} is not already a member of PhotonTransferCurveDataset, which"
230  " does not support setting of new attributes.")
231  else:
232  self.__dict__[attribute] = value
233 
234  def getVisitsUsed(self, ampName):
235  """Get the visits used, i.e. not discarded, for a given amp.
236 
237  If no mask has been created yet, all visits are returned.
238  """
239  if self.visitMask[ampName] == []:
240  return self.inputVisitPairs[ampName]
241 
242  # if the mask exists it had better be the same length as the visitPairs
243  assert len(self.visitMask[ampName]) == len(self.inputVisitPairs[ampName])
244 
245  pairs = self.inputVisitPairs[ampName]
246  mask = self.visitMask[ampName]
247  # cast to bool required because numpy
248  return [(v1, v2) for ((v1, v2), m) in zip(pairs, mask) if bool(m) is True]
249 
250  def getGoodAmps(self):
251  return [amp for amp in self.ampNames if amp not in self.badAmps]
252 
253 
254 class MeasurePhotonTransferCurveTask(pipeBase.CmdLineTask):
255  """A class to calculate, fit, and plot a PTC from a set of flat pairs.
256 
257  The Photon Transfer Curve (var(signal) vs mean(signal)) is a standard tool
258  used in astronomical detectors characterization (e.g., Janesick 2001,
259  Janesick 2007). This task calculates the PTC from a series of pairs of
260  flat-field images; each pair taken at identical exposure times. The
261  difference image of each pair is formed to eliminate fixed pattern noise,
262  and then the variance of the difference image and the mean of the average image
263  are used to produce the PTC. An n-degree polynomial or the approximation in Equation
264  16 of Astier+19 ("The Shape of the Photon Transfer Curve of CCD sensors",
265  arXiv:1905.08677) can be fitted to the PTC curve. These models include
266  parameters such as the gain (e/DN) and readout noise.
267 
268  Linearizers to correct for signal-chain non-linearity are also calculated.
269  The `Linearizer` class, in general, can support per-amp linearizers, but in this
270  task this is not supported.
271  Parameters
272  ----------
273 
274  *args: `list`
275  Positional arguments passed to the Task constructor. None used at this
276  time.
277  **kwargs: `dict`
278  Keyword arguments passed on to the Task constructor. None used at this
279  time.
280 
281  """
282 
283  RunnerClass = PairedVisitListTaskRunner
284  ConfigClass = MeasurePhotonTransferCurveTaskConfig
285  _DefaultName = "measurePhotonTransferCurve"
286 
287  def __init__(self, *args, **kwargs):
288  pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
289  self.makeSubtask("isr")
290  plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too
291  validateIsrConfig(self.isr, self.config.isrMandatorySteps,
292  self.config.isrForbiddenSteps, self.config.isrDesirableSteps, checkTrim=False)
293  self.config.validate()
294  self.config.freeze()
295 
296  @classmethod
297  def _makeArgumentParser(cls):
298  """Augment argument parser for the MeasurePhotonTransferCurveTask."""
299  parser = pipeBase.ArgumentParser(name=cls._DefaultName)
300  parser.add_argument("--visit-pairs", dest="visitPairs", nargs="*",
301  help="Visit pairs to use. Each pair must be of the form INT,INT e.g. 123,456")
302  parser.add_id_argument("--id", datasetType="photonTransferCurveDataset",
303  ContainerClass=NonexistentDatasetTaskDataIdContainer,
304  help="The ccds to use, e.g. --id ccd=0..100")
305  return parser
306 
307  @pipeBase.timeMethod
308  def runDataRef(self, dataRef, visitPairs):
309  """Run the Photon Transfer Curve (PTC) measurement task.
310 
311  For a dataRef (which is each detector here),
312  and given a list of visit pairs at different exposure times,
313  measure the PTC.
314 
315  Parameters
316  ----------
317  dataRef : list of lsst.daf.persistence.ButlerDataRef
318  dataRef for the detector for the visits to be fit.
319  visitPairs : `iterable` of `tuple` of `int`
320  Pairs of visit numbers to be processed together
321  """
322 
323  # setup necessary objects
324  detNum = dataRef.dataId[self.config.ccdKey]
325  detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
326  # expand some missing fields that we need for lsstCam. This is a work-around
327  # for Gen2 problems that I (RHL) don't feel like solving. The calibs pipelines
328  # (which inherit from CalibTask) use addMissingKeys() to do basically the same thing
329  #
330  # Basically, the butler's trying to look up the fields in `raw_visit` which won't work
331  for name in dataRef.getButler().getKeys('bias'):
332  if name not in dataRef.dataId:
333  try:
334  dataRef.dataId[name] = \
335  dataRef.getButler().queryMetadata('raw', [name], detector=detNum)[0]
336  except OperationalError:
337  pass
338 
339  amps = detector.getAmplifiers()
340  ampNames = [amp.getName() for amp in amps]
341  dataset = PhotonTransferCurveDataset(ampNames)
342 
343  self.log.info('Measuring PTC using %s visits for detector %s' % (visitPairs, detNum))
344 
345  for (v1, v2) in visitPairs:
346  # Perform ISR on each exposure
347  dataRef.dataId['expId'] = v1
348  exp1 = self.isr.runDataRef(dataRef).exposure
349  dataRef.dataId['expId'] = v2
350  exp2 = self.isr.runDataRef(dataRef).exposure
351  del dataRef.dataId['expId']
352 
353  checkExpLengthEqual(exp1, exp2, v1, v2, raiseWithMessage=True)
354  expTime = exp1.getInfo().getVisitInfo().getExposureTime()
355 
356  for amp in detector:
357  mu, varDiff = self.measureMeanVarPair(exp1, exp2, region=amp.getBBox())
358  ampName = amp.getName()
359 
360  dataset.rawExpTimes[ampName].append(expTime)
361  dataset.rawMeans[ampName].append(mu)
362  dataset.rawVars[ampName].append(varDiff)
363  dataset.inputVisitPairs[ampName].append((v1, v2))
364 
365  numberAmps = len(detector.getAmplifiers())
366  numberAduValues = self.config.maxAduForLookupTableLinearizer
367  lookupTableArray = np.zeros((numberAmps, numberAduValues), dtype=np.int)
368 
369  # Fit PTC and (non)linearity of signal vs time curve.
370  # Fill up PhotonTransferCurveDataset object.
371  # Fill up array for LUT linearizer.
372  # Produce coefficients for Polynomial ans Squared linearizers.
373  dataset = self.fitPtcAndNonLinearity(dataset, self.config.ptcFitType,
374  tableArray=lookupTableArray)
375 
376  if self.config.makePlots:
377  self.plot(dataRef, dataset, ptcFitType=self.config.ptcFitType)
378 
379  # Save data, PTC fit, and NL fit dictionaries
380  self.log.info(f"Writing PTC and NL data to {dataRef.getUri(write=True)}")
381  dataRef.put(dataset, datasetType="photonTransferCurveDataset")
382 
383  butler = dataRef.getButler()
384  self.log.info(f"Writing linearizers: \n "
385  "lookup table (linear component of polynomial fit), \n "
386  "polynomial (coefficients for a polynomial correction), \n "
387  "and squared linearizer (quadratic coefficient from polynomial)")
388 
389  detName = detector.getName()
390  now = datetime.datetime.utcnow()
391  calibDate = now.strftime("%Y-%m-%d")
392 
393  for linType, dataType in [("LOOKUPTABLE", 'linearizeLut'),
394  ("LINEARIZEPOLYNOMIAL", 'linearizePolynomial'),
395  ("LINEARIZESQUARED", 'linearizeSquared')]:
396 
397  if linType == "LOOKUPTABLE":
398  tableArray = lookupTableArray
399  else:
400  tableArray = None
401 
402  linearizer = self.buildLinearizerObject(dataset, detector, calibDate, linType,
403  instruName=self.config.instrumentName,
404  tableArray=tableArray,
405  log=self.log)
406  butler.put(linearizer, datasetType=dataType, dataId={'detector': detNum,
407  'detectorName': detName, 'calibDate': calibDate})
408 
409  self.log.info('Finished measuring PTC for in detector %s' % detNum)
410 
411  return pipeBase.Struct(exitStatus=0)
412 
413  def buildLinearizerObject(self, dataset, detector, calibDate, linearizerType, instruName='',
414  tableArray=None, log=None):
415  """Build linearizer object to persist.
416 
417  Parameters
418  ----------
419  dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
420  The dataset containing the means, variances, and exposure times
421  detector : `lsst.afw.cameraGeom.Detector`
422  Detector object
423  calibDate : `datetime.datetime`
424  Calibration date
425  linearizerType : `str`
426  'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'
427  instruName : `str`, optional
428  Instrument name
429  tableArray : `np.array`, optional
430  Look-up table array with size rows=nAmps and columns=DN values
431  log : `lsst.log.Log`, optional
432  Logger to handle messages
433 
434  Returns
435  -------
436  linearizer : `lsst.ip.isr.Linearizer`
437  Linearizer object
438  """
439  detName = detector.getName()
440  detNum = detector.getId()
441  if linearizerType == "LOOKUPTABLE":
442  if tableArray is not None:
443  linearizer = Linearizer(detector=detector, table=tableArray, log=log)
444  else:
445  raise RuntimeError("tableArray must be provided when creating a LookupTable linearizer")
446  elif linearizerType in ("LINEARIZESQUARED", "LINEARIZEPOLYNOMIAL"):
447  linearizer = Linearizer(log=log)
448  else:
449  raise RuntimeError("Invalid linearizerType {linearizerType} to build a Linearizer object. "
450  "Supported: 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'")
451  for i, amp in enumerate(detector.getAmplifiers()):
452  ampName = amp.getName()
453  if linearizerType == "LOOKUPTABLE":
454  linearizer.linearityCoeffs[ampName] = [i, 0]
455  linearizer.linearityType[ampName] = "LookupTable"
456  elif linearizerType == "LINEARIZESQUARED":
457  linearizer.linearityCoeffs[ampName] = [dataset.coefficientLinearizeSquared[ampName]]
458  linearizer.linearityType[ampName] = "Squared"
459  elif linearizerType == "LINEARIZEPOLYNOMIAL":
460  linearizer.linearityCoeffs[ampName] = dataset.coefficientsLinearizePolynomial[ampName]
461  linearizer.linearityType[ampName] = "Polynomial"
462  linearizer.linearityBBox[ampName] = amp.getBBox()
463 
464  linearizer.validate()
465  calibId = f"detectorName={detName} detector={detNum} calibDate={calibDate} ccd={detNum} filter=NONE"
466 
467  try:
468  raftName = detName.split("_")[0]
469  calibId += f" raftName={raftName}"
470  except Exception:
471  raftname = "NONE"
472  calibId += f" raftName={raftname}"
473 
474  serial = detector.getSerial()
475  linearizer.updateMetadata(instrumentName=instruName, detectorId=f"{detNum}",
476  calibId=calibId, serial=serial, detectorName=f"{detName}")
477 
478  return linearizer
479 
480  def measureMeanVarPair(self, exposure1, exposure2, region=None):
481  """Calculate the mean signal of two exposures and the variance of their difference.
482 
483  Parameters
484  ----------
485  exposure1 : `lsst.afw.image.exposure.exposure.ExposureF`
486  First exposure of flat field pair.
487 
488  exposure2 : `lsst.afw.image.exposure.exposure.ExposureF`
489  Second exposure of flat field pair.
490 
491  region : `lsst.geom.Box2I`
492  Region of each exposure where to perform the calculations (e.g, an amplifier).
493 
494  Return
495  ------
496 
497  mu : `float`
498  0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means of the regions in
499  both exposures.
500 
501  varDiff : `float`
502  Half of the clipped variance of the difference of the regions inthe two input
503  exposures.
504  """
505 
506  if region is not None:
507  im1Area = exposure1.maskedImage[region]
508  im2Area = exposure2.maskedImage[region]
509  else:
510  im1Area = exposure1.maskedImage
511  im2Area = exposure2.maskedImage
512 
513  im1Area = afwMath.binImage(im1Area, self.config.binSize)
514  im2Area = afwMath.binImage(im2Area, self.config.binSize)
515 
516  # Clipped mean of images; then average of mean.
517  mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP).getValue()
518  mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP).getValue()
519  mu = 0.5*(mu1 + mu2)
520 
521  # Take difference of pairs
522  # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2))
523  temp = im2Area.clone()
524  temp *= mu1
525  diffIm = im1Area.clone()
526  diffIm *= mu2
527  diffIm -= temp
528  diffIm /= mu
529 
530  varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP).getValue())
531 
532  return mu, varDiff
533 
534  def _fitLeastSq(self, initialParams, dataX, dataY, function):
535  """Do a fit and estimate the parameter errors using using scipy.optimize.leastq.
536 
537  optimize.leastsq returns the fractional covariance matrix. To estimate the
538  standard deviation of the fit parameters, multiply the entries of this matrix
539  by the unweighted reduced chi squared and take the square root of the diagonal elements.
540 
541  Parameters
542  ----------
543  initialParams : `list` of `float`
544  initial values for fit parameters. For ptcFitType=POLYNOMIAL, its length
545  determines the degree of the polynomial.
546 
547  dataX : `numpy.array` of `float`
548  Data in the abscissa axis.
549 
550  dataY : `numpy.array` of `float`
551  Data in the ordinate axis.
552 
553  function : callable object (function)
554  Function to fit the data with.
555 
556  Return
557  ------
558  pFitSingleLeastSquares : `list` of `float`
559  List with fitted parameters.
560 
561  pErrSingleLeastSquares : `list` of `float`
562  List with errors for fitted parameters.
563 
564  reducedChiSqSingleLeastSquares : `float`
565  Unweighted reduced chi squared
566  """
567 
568  def errFunc(p, x, y):
569  return function(p, x) - y
570 
571  pFit, pCov, infoDict, errMessage, success = leastsq(errFunc, initialParams,
572  args=(dataX, dataY), full_output=1, epsfcn=0.0001)
573 
574  if (len(dataY) > len(initialParams)) and pCov is not None:
575  reducedChiSq = (errFunc(pFit, dataX, dataY)**2).sum()/(len(dataY)-len(initialParams))
576  pCov *= reducedChiSq
577  else:
578  pCov[:, :] = np.inf
579 
580  errorVec = []
581  for i in range(len(pFit)):
582  errorVec.append(np.fabs(pCov[i][i])**0.5)
583 
584  pFitSingleLeastSquares = pFit
585  pErrSingleLeastSquares = np.array(errorVec)
586 
587  return pFitSingleLeastSquares, pErrSingleLeastSquares, reducedChiSq
588 
589  def _fitBootstrap(self, initialParams, dataX, dataY, function, confidenceSigma=1.):
590  """Do a fit using least squares and bootstrap to estimate parameter errors.
591 
592  The bootstrap error bars are calculated by fitting 100 random data sets.
593 
594  Parameters
595  ----------
596  initialParams : `list` of `float`
597  initial values for fit parameters. For ptcFitType=POLYNOMIAL, its length
598  determines the degree of the polynomial.
599 
600  dataX : `numpy.array` of `float`
601  Data in the abscissa axis.
602 
603  dataY : `numpy.array` of `float`
604  Data in the ordinate axis.
605 
606  function : callable object (function)
607  Function to fit the data with.
608 
609  confidenceSigma : `float`
610  Number of sigmas that determine confidence interval for the bootstrap errors.
611 
612  Return
613  ------
614  pFitBootstrap : `list` of `float`
615  List with fitted parameters.
616 
617  pErrBootstrap : `list` of `float`
618  List with errors for fitted parameters.
619 
620  reducedChiSqBootstrap : `float`
621  Reduced chi squared.
622  """
623 
624  def errFunc(p, x, y):
625  return function(p, x) - y
626 
627  # Fit first time
628  pFit, _ = leastsq(errFunc, initialParams, args=(dataX, dataY), full_output=0)
629 
630  # Get the stdev of the residuals
631  residuals = errFunc(pFit, dataX, dataY)
632  sigmaErrTotal = np.std(residuals)
633 
634  # 100 random data sets are generated and fitted
635  pars = []
636  for i in range(100):
637  randomDelta = np.random.normal(0., sigmaErrTotal, len(dataY))
638  randomDataY = dataY + randomDelta
639  randomFit, _ = leastsq(errFunc, initialParams,
640  args=(dataX, randomDataY), full_output=0)
641  pars.append(randomFit)
642  pars = np.array(pars)
643  meanPfit = np.mean(pars, 0)
644 
645  # confidence interval for parameter estimates
646  nSigma = confidenceSigma
647  errPfit = nSigma*np.std(pars, 0)
648  pFitBootstrap = meanPfit
649  pErrBootstrap = errPfit
650 
651  reducedChiSq = (errFunc(pFitBootstrap, dataX, dataY)**2).sum()/(len(dataY)-len(initialParams))
652  return pFitBootstrap, pErrBootstrap, reducedChiSq
653 
654  def funcPolynomial(self, pars, x):
655  """Polynomial function definition"""
656  return poly.polyval(x, [*pars])
657 
658  def funcAstier(self, pars, x):
659  """Single brighter-fatter parameter model for PTC; Equation 16 of Astier+19"""
660  a00, gain, noise = pars
661  return 0.5/(a00*gain*gain)*(np.exp(2*a00*x*gain)-1) + noise/(gain*gain)
662 
663  @staticmethod
664  def _initialParsForPolynomial(order):
665  assert(order >= 2)
666  pars = np.zeros(order, dtype=np.float)
667  pars[0] = 10
668  pars[1] = 1
669  pars[2:] = 0.0001
670  return pars
671 
672  @staticmethod
673  def _boundsForPolynomial(initialPars):
674  lowers = [np.NINF for p in initialPars]
675  uppers = [np.inf for p in initialPars]
676  lowers[1] = 0 # no negative gains
677  return (lowers, uppers)
678 
679  @staticmethod
680  def _boundsForAstier(initialPars):
681  lowers = [np.NINF for p in initialPars]
682  uppers = [np.inf for p in initialPars]
683  return (lowers, uppers)
684 
685  @staticmethod
686  def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative):
687  """Return a boolean array to mask bad points.
688 
689  A linear function has a constant ratio, so find the median
690  value of the ratios, and exclude the points that deviate
691  from that by more than a factor of maxDeviationPositive/negative.
692  Asymmetric deviations are supported as we expect the PTC to turn
693  down as the flux increases, but sometimes it anomalously turns
694  upwards just before turning over, which ruins the fits, so it
695  is wise to be stricter about restricting positive outliers than
696  negative ones.
697 
698  Too high and points that are so bad that fit will fail will be included
699  Too low and the non-linear points will be excluded, biasing the NL fit."""
700  ratios = [b/a for (a, b) in zip(means, variances)]
701  medianRatio = np.median(ratios)
702  ratioDeviations = [(r/medianRatio)-1 for r in ratios]
703 
704  # so that it doesn't matter if the deviation is expressed as positive or negative
705  maxDeviationPositive = abs(maxDeviationPositive)
706  maxDeviationNegative = -1. * abs(maxDeviationNegative)
707 
708  goodPoints = np.array([True if (r < maxDeviationPositive and r > maxDeviationNegative)
709  else False for r in ratioDeviations])
710  return goodPoints
711 
712  def _makeZeroSafe(self, array, warn=True, substituteValue=1e-9):
713  """"""
714  nBad = Counter(array)[0]
715  if nBad == 0:
716  return array
717 
718  if warn:
719  msg = f"Found {nBad} zeros in array at elements {[x for x in np.where(array==0)[0]]}"
720  self.log.warn(msg)
721 
722  array[array == 0] = substituteValue
723  return array
724 
725  def calculateLinearityResidualAndLinearizers(self, exposureTimeVector, meanSignalVector):
726  """Calculate linearity residual and fit an n-order polynomial to the mean vs time curve
727  to produce corrections (deviation from linear part of polynomial) for a particular amplifier
728  to populate LinearizeLookupTable.
729  Use the coefficients of this fit to calculate the correction coefficients for LinearizePolynomial
730  and LinearizeSquared."
731 
732  Parameters
733  ---------
734 
735  exposureTimeVector: `list` of `float`
736  List of exposure times for each flat pair
737 
738  meanSignalVector: `list` of `float`
739  List of mean signal from diference image of flat pairs
740 
741  Returns
742  -------
743  polynomialLinearizerCoefficients : `list` of `float`
744  Coefficients for LinearizePolynomial, where corrImage = uncorrImage + sum_i c_i uncorrImage^(2 +
745  i).
746  c_(j-2) = -k_j/(k_1^j) with units (DN^(1-j)). The units of k_j are DN/t^j, and they are fit from
747  meanSignalVector = k0 + k1*exposureTimeVector + k2*exposureTimeVector^2 +...
748  + kn*exposureTimeVector^n, with n = "polynomialFitDegreeNonLinearity".
749  k_0 and k_1 and degenerate with bias level and gain, and are not used by the non-linearity
750  correction. Therefore, j = 2...n in the above expression (see `LinearizePolynomial` class in
751  `linearize.py`.)
752 
753  c0 : `float`
754  Coefficient for LinearizeSquared, where corrImage = uncorrImage + c0*uncorrImage^2.
755  c0 = -k2/(k1^2), where k1 and k2 are fit from
756  meanSignalVector = k0 + k1*exposureTimeVector + k2*exposureTimeVector^2 +...
757  + kn*exposureTimeVector^n, with n = "polynomialFitDegreeNonLinearity".
758 
759  linearizerTableRow : `list` of `float`
760  One dimensional array with deviation from linear part of n-order polynomial fit
761  to mean vs time curve. This array will be one row (for the particular amplifier at hand)
762  of the table array for LinearizeLookupTable.
763 
764  linResidual : `list` of `float`
765  Linearity residual from the mean vs time curve, defined as
766  100*(1 - meanSignalReference/expTimeReference/(meanSignal/expTime).
767 
768  parsFit : `list` of `float`
769  Parameters from n-order polynomial fit to meanSignalVector vs exposureTimeVector.
770 
771  parsFitErr : list of `float`
772  Parameters from n-order polynomial fit to meanSignalVector vs exposureTimeVector.
773 
774  reducedChiSquaredNonLinearityFit : `float`
775  Reduced chi squared from polynomial fit to meanSignalVector vs exposureTimeVector.
776  """
777 
778  # Lookup table linearizer
779  parsIniNonLinearity = self._initialParsForPolynomial(self.config.polynomialFitDegreeNonLinearity + 1)
780  if self.config.doFitBootstrap:
781  parsFit, parsFitErr, reducedChiSquaredNonLinearityFit = self._fitBootstrap(parsIniNonLinearity,
782  exposureTimeVector,
783  meanSignalVector,
784  self.funcPolynomial)
785  else:
786  parsFit, parsFitErr, reducedChiSquaredNonLinearityFit = self._fitLeastSq(parsIniNonLinearity,
787  exposureTimeVector,
788  meanSignalVector,
789  self.funcPolynomial)
790 
791  # LinearizeLookupTable:
792  # Use linear part to get time at wich signal is maxAduForLookupTableLinearizer DN
793  tMax = (self.config.maxAduForLookupTableLinearizer - parsFit[0])/parsFit[1]
794  timeRange = np.linspace(0, tMax, self.config.maxAduForLookupTableLinearizer)
795  signalIdeal = (parsFit[0] + parsFit[1]*timeRange).astype(int)
796  signalUncorrected = (self.funcPolynomial(parsFit, timeRange)).astype(int)
797  linearizerTableRow = signalIdeal - signalUncorrected # LinearizerLookupTable has corrections
798 
799  # LinearizePolynomial and LinearizeSquared:
800  # Check that magnitude of higher order (>= 3) coefficents of the polyFit are small,
801  # i.e., less than threshold = 1e-10 (typical quadratic and cubic coefficents are ~1e-6
802  # and ~1e-12).
803  k1 = parsFit[1]
804  polynomialLinearizerCoefficients = []
805  for i, coefficient in enumerate(parsFit):
806  c = -coefficient/(k1**i)
807  polynomialLinearizerCoefficients.append(c)
808  if np.fabs(c) > 1e-10:
809  msg = f"Coefficient {c} in polynomial fit larger than threshold 1e-10."
810  self.log.warn(msg)
811  # Coefficient for LinearizedSquared. Called "c0" in linearize.py
812  c0 = polynomialLinearizerCoefficients[2]
813 
814  # Linearity residual
815  linResidualTimeIndex = self.config.linResidualTimeIndex
816  if exposureTimeVector[linResidualTimeIndex] == 0.0:
817  raise RuntimeError("Reference time for linearity residual can't be 0.0")
818  linResidual = 100*(1 - ((meanSignalVector[linResidualTimeIndex] /
819  exposureTimeVector[linResidualTimeIndex]) /
820  (meanSignalVector/exposureTimeVector)))
821 
822  return (polynomialLinearizerCoefficients, c0, linearizerTableRow, linResidual, parsFit, parsFitErr,
823  reducedChiSquaredNonLinearityFit)
824 
825  def fitPtcAndNonLinearity(self, dataset, ptcFitType, tableArray=None):
826  """Fit the photon transfer curve and calculate linearity and residuals.
827 
828  Fit the photon transfer curve with either a polynomial of the order
829  specified in the task config, or using the Astier approximation.
830 
831  Sigma clipping is performed iteratively for the fit, as well as an
832  initial clipping of data points that are more than
833  config.initialNonLinearityExclusionThreshold away from lying on a
834  straight line. This other step is necessary because the photon transfer
835  curve turns over catastrophically at very high flux (because saturation
836  drops the variance to ~0) and these far outliers cause the initial fit
837  to fail, meaning the sigma cannot be calculated to perform the
838  sigma-clipping.
839 
840  Parameters
841  ----------
842  dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
843  The dataset containing the means, variances and exposure times
844  ptcFitType : `str`
845  Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
846  'ASTIERAPPROXIMATION' to the PTC
847  tableArray : `np.array`
848  Optional. Look-up table array with size rows=nAmps and columns=DN values.
849  It will be modified in-place if supplied.
850 
851  Returns
852  -------
853  dataset: `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
854  This is the same dataset as the input paramter, however, it has been modified
855  to include information such as the fit vectors and the fit parameters. See
856  the class `PhotonTransferCurveDatase`.
857  """
858 
859  def errFunc(p, x, y):
860  return ptcFunc(p, x) - y
861 
862  sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
863  maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
864 
865  for i, ampName in enumerate(dataset.ampNames):
866  timeVecOriginal = np.array(dataset.rawExpTimes[ampName])
867  meanVecOriginal = np.array(dataset.rawMeans[ampName])
868  varVecOriginal = np.array(dataset.rawVars[ampName])
869  varVecOriginal = self._makeZeroSafe(varVecOriginal)
870 
871  mask = ((meanVecOriginal >= self.config.minMeanSignal) &
872  (meanVecOriginal <= self.config.maxMeanSignal))
873 
874  goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal,
875  self.config.initialNonLinearityExclusionThresholdPositive,
876  self.config.initialNonLinearityExclusionThresholdNegative)
877  mask = mask & goodPoints
878 
879  if ptcFitType == 'ASTIERAPPROXIMATION':
880  ptcFunc = self.funcAstier
881  parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noise
882  bounds = self._boundsForAstier(parsIniPtc)
883  if ptcFitType == 'POLYNOMIAL':
884  ptcFunc = self.funcPolynomial
885  parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1)
886  bounds = self._boundsForPolynomial(parsIniPtc)
887 
888  # Before bootstrap fit, do an iterative fit to get rid of outliers
889  count = 1
890  while count <= maxIterationsPtcOutliers:
891  # Note that application of the mask actually shrinks the array
892  # to size rather than setting elements to zero (as we want) so
893  # always update mask itself and re-apply to the original data
894  meanTempVec = meanVecOriginal[mask]
895  varTempVec = varVecOriginal[mask]
896  res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec))
897  pars = res.x
898 
899  # change this to the original from the temp because the masks are ANDed
900  # meaning once a point is masked it's always masked, and the masks must
901  # always be the same length for broadcasting
902  sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
903  newMask = np.array([True if np.abs(r) < sigmaCutPtcOutliers else False for r in sigResids])
904  mask = mask & newMask
905 
906  nDroppedTotal = Counter(mask)[False]
907  self.log.debug(f"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}")
908  count += 1
909  # objects should never shrink
910  assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal))
911 
912  dataset.visitMask[ampName] = mask # store the final mask
913 
914  parsIniPtc = pars
915  timeVecFinal = timeVecOriginal[mask]
916  meanVecFinal = meanVecOriginal[mask]
917  varVecFinal = varVecOriginal[mask]
918 
919  if Counter(mask)[False] > 0:
920  self.log.info((f"Number of points discarded in PTC of amplifier {ampName}:" +
921  f" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))
922 
923  if (len(meanVecFinal) < len(parsIniPtc)):
924  msg = (f"\nSERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of"
925  f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
926  self.log.warn(msg)
927  dataset.badAmps.append(ampName)
928  dataset.gain[ampName] = np.nan
929  dataset.gainErr[ampName] = np.nan
930  dataset.noise[ampName] = np.nan
931  dataset.noiseErr[ampName] = np.nan
932  dataset.nonLinearity[ampName] = np.nan
933  dataset.nonLinearityError[ampName] = np.nan
934  dataset.nonLinearityResiduals[ampName] = np.nan
935  dataset.coefficientLinearizeSquared[ampName] = np.nan
936  continue
937 
938  # Fit the PTC
939  if self.config.doFitBootstrap:
940  parsFit, parsFitErr, reducedChiSqPtc = self._fitBootstrap(parsIniPtc, meanVecFinal,
941  varVecFinal, ptcFunc)
942  else:
943  parsFit, parsFitErr, reducedChiSqPtc = self._fitLeastSq(parsIniPtc, meanVecFinal,
944  varVecFinal, ptcFunc)
945 
946  dataset.ptcFitPars[ampName] = parsFit
947  dataset.ptcFitParsError[ampName] = parsFitErr
948  dataset.ptcFitReducedChiSquared[ampName] = reducedChiSqPtc
949 
950  if ptcFitType == 'ASTIERAPPROXIMATION':
951  ptcGain = parsFit[1]
952  ptcGainErr = parsFitErr[1]
953  ptcNoise = np.sqrt(np.fabs(parsFit[2]))
954  ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
955  if ptcFitType == 'POLYNOMIAL':
956  ptcGain = 1./parsFit[1]
957  ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
958  ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
959  ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
960 
961  dataset.gain[ampName] = ptcGain
962  dataset.gainErr[ampName] = ptcGainErr
963  dataset.noise[ampName] = ptcNoise
964  dataset.noiseErr[ampName] = ptcNoiseErr
965  dataset.ptcFitType[ampName] = ptcFitType
966 
967  # Non-linearity residuals (NL of mean vs time curve): percentage, and fit to a quadratic function
968  # In this case, len(parsIniNonLinearity) = 3 indicates that we want a quadratic fit
969 
970  (coeffsLinPoly, c0, linearizerTableRow, linResidualNonLinearity,
971  parsFitNonLinearity, parsFitErrNonLinearity,
972  reducedChiSqNonLinearity) = self.calculateLinearityResidualAndLinearizers(timeVecFinal,
973  meanVecFinal)
974 
975  # LinearizerLookupTable
976  if tableArray is not None:
977  tableArray[i, :] = linearizerTableRow
978 
979  dataset.nonLinearity[ampName] = parsFitNonLinearity
980  dataset.nonLinearityError[ampName] = parsFitErrNonLinearity
981  dataset.nonLinearityResiduals[ampName] = linResidualNonLinearity
982  dataset.nonLinearityReducedChiSquared[ampName] = reducedChiSqNonLinearity
983  # Slice correction coefficients (starting at 2) for polynomial linearizer. The first
984  # and second are reduntant with the bias and gain, respectively,
985  # and are not used by LinearizerPolynomial.
986  dataset.coefficientsLinearizePolynomial[ampName] = np.array(coeffsLinPoly[2:])
987  dataset.coefficientLinearizeSquared[ampName] = c0
988 
989  return dataset
990 
991  def plot(self, dataRef, dataset, ptcFitType):
992  dirname = dataRef.getUri(datasetType='cpPipePlotRoot', write=True)
993  if not os.path.exists(dirname):
994  os.makedirs(dirname)
995 
996  detNum = dataRef.dataId[self.config.ccdKey]
997  filename = f"PTC_det{detNum}.pdf"
998  filenameFull = os.path.join(dirname, filename)
999  with PdfPages(filenameFull) as pdfPages:
1000  self._plotPtc(dataset, ptcFitType, pdfPages)
1001 
1002  def _plotPtc(self, dataset, ptcFitType, pdfPages):
1003  """Plot PTC, linearity, and linearity residual per amplifier"""
1004 
1005  reducedChiSqPtc = dataset.ptcFitReducedChiSquared
1006  if ptcFitType == 'ASTIERAPPROXIMATION':
1007  ptcFunc = self.funcAstier
1008  stringTitle = (r"Var = $\frac{1}{2g^2a_{00}}(\exp (2a_{00} \mu g) - 1) + \frac{n_{00}}{g^2}$ "
1009  r" ($chi^2$/dof = %g)" % (reducedChiSqPtc))
1010  if ptcFitType == 'POLYNOMIAL':
1011  ptcFunc = self.funcPolynomial
1012  stringTitle = r"Polynomial (degree: %g)" % (self.config.polynomialFitDegree)
1013 
1014  legendFontSize = 7
1015  labelFontSize = 7
1016  titleFontSize = 9
1017  supTitleFontSize = 18
1018  markerSize = 25
1019 
1020  # General determination of the size of the plot grid
1021  nAmps = len(dataset.ampNames)
1022  if nAmps == 2:
1023  nRows, nCols = 2, 1
1024  nRows = np.sqrt(nAmps)
1025  mantissa, _ = np.modf(nRows)
1026  if mantissa > 0:
1027  nRows = int(nRows) + 1
1028  nCols = nRows
1029  else:
1030  nRows = int(nRows)
1031  nCols = nRows
1032 
1033  f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
1034  f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
1035 
1036  for i, (amp, a, a2) in enumerate(zip(dataset.ampNames, ax.flatten(), ax2.flatten())):
1037  meanVecOriginal = np.array(dataset.rawMeans[amp])
1038  varVecOriginal = np.array(dataset.rawVars[amp])
1039  mask = dataset.visitMask[amp]
1040  meanVecFinal = meanVecOriginal[mask]
1041  varVecFinal = varVecOriginal[mask]
1042  meanVecOutliers = meanVecOriginal[np.invert(mask)]
1043  varVecOutliers = varVecOriginal[np.invert(mask)]
1044  pars, parsErr = dataset.ptcFitPars[amp], dataset.ptcFitParsError[amp]
1045 
1046  if ptcFitType == 'ASTIERAPPROXIMATION':
1047  ptcA00, ptcA00error = pars[0], parsErr[0]
1048  ptcGain, ptcGainError = pars[1], parsErr[1]
1049  ptcNoise = np.sqrt(np.fabs(pars[2]))
1050  ptcNoiseError = 0.5*(parsErr[2]/np.fabs(pars[2]))*np.sqrt(np.fabs(pars[2]))
1051  stringLegend = (f"a00: {ptcA00:.2e}+/-{ptcA00error:.2e} 1/e"
1052  f"\n Gain: {ptcGain:.4}+/-{ptcGainError:.2e} e/DN"
1053  f"\n Noise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e \n")
1054 
1055  if ptcFitType == 'POLYNOMIAL':
1056  ptcGain, ptcGainError = 1./pars[1], np.fabs(1./pars[1])*(parsErr[1]/pars[1])
1057  ptcNoise = np.sqrt(np.fabs(pars[0]))*ptcGain
1058  ptcNoiseError = (0.5*(parsErr[0]/np.fabs(pars[0]))*(np.sqrt(np.fabs(pars[0]))))*ptcGain
1059  stringLegend = (f"Noise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e \n"
1060  f"Gain: {ptcGain:.4}+/-{ptcGainError:.2e} e/DN \n")
1061 
1062  minMeanVecFinal = np.min(meanVecFinal)
1063  maxMeanVecFinal = np.max(meanVecFinal)
1064  meanVecFit = np.linspace(minMeanVecFinal, maxMeanVecFinal, 100*len(meanVecFinal))
1065  minMeanVecOriginal = np.min(meanVecOriginal)
1066  maxMeanVecOriginal = np.max(meanVecOriginal)
1067  deltaXlim = maxMeanVecOriginal - minMeanVecOriginal
1068 
1069  a.plot(meanVecFit, ptcFunc(pars, meanVecFit), color='red')
1070  a.plot(meanVecFinal, pars[0] + pars[1]*meanVecFinal, color='green', linestyle='--')
1071  a.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize)
1072  a.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize)
1073  a.set_xlabel(r'Mean signal ($\mu$, DN)', fontsize=labelFontSize)
1074  a.set_xticks(meanVecOriginal)
1075  a.set_ylabel(r'Variance (DN$^2$)', fontsize=labelFontSize)
1076  a.tick_params(labelsize=11)
1077  a.text(0.03, 0.8, stringLegend, transform=a.transAxes, fontsize=legendFontSize)
1078  a.set_xscale('linear', fontsize=labelFontSize)
1079  a.set_yscale('linear', fontsize=labelFontSize)
1080  a.set_title(amp, fontsize=titleFontSize)
1081  a.set_xlim([minMeanVecOriginal - 0.2*deltaXlim, maxMeanVecOriginal + 0.2*deltaXlim])
1082 
1083  # Same, but in log-scale
1084  a2.plot(meanVecFit, ptcFunc(pars, meanVecFit), color='red')
1085  a2.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize)
1086  a2.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize)
1087  a2.set_xlabel(r'Mean Signal ($\mu$, DN)', fontsize=labelFontSize)
1088  a2.set_ylabel(r'Variance (DN$^2$)', fontsize=labelFontSize)
1089  a2.tick_params(labelsize=11)
1090  a2.text(0.03, 0.8, stringLegend, transform=a2.transAxes, fontsize=legendFontSize)
1091  a2.set_xscale('log')
1092  a2.set_yscale('log')
1093  a2.set_title(amp, fontsize=titleFontSize)
1094  a2.set_xlim([minMeanVecOriginal, maxMeanVecOriginal])
1095 
1096  f.suptitle(f"PTC \n Fit: " + stringTitle, fontsize=20)
1097  pdfPages.savefig(f)
1098  f2.suptitle(f"PTC (log-log)", fontsize=20)
1099  pdfPages.savefig(f2)
1100 
1101  # Plot mean vs time
1102  f, ax = plt.subplots(nrows=4, ncols=4, sharex='col', sharey='row', figsize=(13, 10))
1103  for i, (amp, a) in enumerate(zip(dataset.ampNames, ax.flatten())):
1104  meanVecFinal = np.array(dataset.rawMeans[amp])[dataset.visitMask[amp]]
1105  timeVecFinal = np.array(dataset.rawExpTimes[amp])[dataset.visitMask[amp]]
1106 
1107  pars, parsErr = dataset.nonLinearity[amp], dataset.nonLinearityError[amp]
1108  k0, k0Error = pars[0], parsErr[0]
1109  k1, k1Error = pars[1], parsErr[1]
1110  k2, k2Error = pars[2], parsErr[2]
1111  stringLegend = (f"k0: {k0:.4}+/-{k0Error:.2e} DN\n k1: {k1:.4}+/-{k1Error:.2e} DN/t"
1112  f"\n k2: {k2:.2e}+/-{k2Error:.2e} DN/t^2 \n")
1113  a.scatter(timeVecFinal, meanVecFinal)
1114  a.plot(timeVecFinal, self.funcPolynomial(pars, timeVecFinal), color='red')
1115  a.set_xlabel('Time (sec)', fontsize=labelFontSize)
1116  a.set_xticks(timeVecFinal)
1117  a.set_ylabel(r'Mean signal ($\mu$, DN)', fontsize=labelFontSize)
1118  a.tick_params(labelsize=labelFontSize)
1119  a.text(0.03, 0.75, stringLegend, transform=a.transAxes, fontsize=legendFontSize)
1120  a.set_xscale('linear', fontsize=labelFontSize)
1121  a.set_yscale('linear', fontsize=labelFontSize)
1122  a.set_title(amp, fontsize=titleFontSize)
1123  f.suptitle("Linearity \n Fit: Polynomial (degree: %g)"
1124  % (self.config.polynomialFitDegreeNonLinearity),
1125  fontsize=supTitleFontSize)
1126  pdfPages.savefig()
1127 
1128  # Plot linearity residual
1129  f, ax = plt.subplots(nrows=4, ncols=4, sharex='col', sharey='row', figsize=(13, 10))
1130  for i, (amp, a) in enumerate(zip(dataset.ampNames, ax.flatten())):
1131  meanVecFinal = np.array(dataset.rawMeans[amp])[dataset.visitMask[amp]]
1132  linRes = np.array(dataset.nonLinearityResiduals[amp])
1133 
1134  a.scatter(meanVecFinal, linRes)
1135  a.axhline(y=0, color='k')
1136  a.axvline(x=timeVecFinal[self.config.linResidualTimeIndex], color='g', linestyle='--')
1137  a.set_xlabel(r'Mean signal ($\mu$, DN)', fontsize=labelFontSize)
1138  a.set_xticks(meanVecFinal)
1139  a.set_ylabel('LR (%)', fontsize=labelFontSize)
1140  a.tick_params(labelsize=labelFontSize)
1141  a.set_xscale('linear', fontsize=labelFontSize)
1142  a.set_yscale('linear', fontsize=labelFontSize)
1143  a.set_title(amp, fontsize=titleFontSize)
1144 
1145  f.suptitle(r"Linearity Residual: $100(1 - \mu_{\rm{ref}}/t_{\rm{ref}})/(\mu / t))$" + "\n" +
1146  r"$t_{\rm{ref}}$: " + f"{timeVecFinal[2]} s", fontsize=supTitleFontSize)
1147  pdfPages.savefig()
1148 
1149  return
def _plotPtc(self, dataset, ptcFitType, pdfPages)
Definition: ptc.py:1002
def runDataRef(self, dataRef, visitPairs)
Definition: ptc.py:308
def calculateLinearityResidualAndLinearizers(self, exposureTimeVector, meanSignalVector)
Definition: ptc.py:725
def fitPtcAndNonLinearity(self, dataset, ptcFitType, tableArray=None)
Definition: ptc.py:825
def __setattr__(self, attribute, value)
Definition: ptc.py:226
def checkExpLengthEqual(exp1, exp2, v1=None, v2=None, raiseWithMessage=False)
Definition: utils.py:162
def buildLinearizerObject(self, dataset, detector, calibDate, linearizerType, instruName='', tableArray=None, log=None)
Definition: ptc.py:414
def validateIsrConfig(isrTask, mandatory=None, forbidden=None, desirable=None, undesirable=None, checkTrim=True, logName=None)
Definition: utils.py:200
def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative)
Definition: ptc.py:686
def _fitBootstrap(self, initialParams, dataX, dataY, function, confidenceSigma=1.)
Definition: ptc.py:589
def __init__(self, args, kwargs)
Definition: ptc.py:287
def measureMeanVarPair(self, exposure1, exposure2, region=None)
Definition: ptc.py:480
def _makeZeroSafe(self, array, warn=True, substituteValue=1e-9)
Definition: ptc.py:712
def _fitLeastSq(self, initialParams, dataX, dataY, function)
Definition: ptc.py:534
def plot(self, dataRef, dataset, ptcFitType)
Definition: ptc.py:991