lsst.cp.pipe  20.0.0-18-g541646f+c4459f5943
ptc.py
Go to the documentation of this file.
1 # This file is part of cp_pipe.
2 #
3 # Developed for the LSST Data Management System.
4 # This product includes software developed by the LSST Project
5 # (https://www.lsst.org).
6 # See the COPYRIGHT file at the top-level directory of this distribution
7 # for details of code ownership.
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <https://www.gnu.org/licenses/>.
21 #
22 import numpy as np
23 import matplotlib.pyplot as plt
24 from collections import Counter
25 
26 import lsst.afw.math as afwMath
27 import lsst.pex.config as pexConfig
28 import lsst.pipe.base as pipeBase
29 from .utils import (fitLeastSq, fitBootstrap, funcPolynomial, funcAstier)
30 from scipy.optimize import least_squares
31 
32 import datetime
33 
34 from .astierCovPtcUtils import (fftSize, CovFft, computeCovDirect, fitData)
35 from .linearity import LinearitySolveTask
36 from .photodiode import getBOTphotodiodeData
37 
38 from lsst.pipe.tasks.getRepositoryData import DataRefListRunner
39 from lsst.ip.isr import PhotonTransferCurveDataset
40 
41 __all__ = ['MeasurePhotonTransferCurveTask',
42  'MeasurePhotonTransferCurveTaskConfig']
43 
44 
45 class MeasurePhotonTransferCurveTaskConfig(pexConfig.Config):
46  """Config class for photon transfer curve measurement task"""
47  ccdKey = pexConfig.Field(
48  dtype=str,
49  doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'.",
50  default='ccd',
51  )
52  ptcFitType = pexConfig.ChoiceField(
53  dtype=str,
54  doc="Fit PTC to Eq. 16, Eq. 20 in Astier+19, or to a polynomial.",
55  default="POLYNOMIAL",
56  allowed={
57  "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').",
58  "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).",
59  "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)"
60  }
61  )
62  sigmaClipFullFitCovariancesAstier = pexConfig.Field(
63  dtype=float,
64  doc="sigma clip for full model fit for FULLCOVARIANCE ptcFitType ",
65  default=5.0,
66  )
67  maxIterFullFitCovariancesAstier = pexConfig.Field(
68  dtype=int,
69  doc="Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType",
70  default=3,
71  )
72  maximumRangeCovariancesAstier = pexConfig.Field(
73  dtype=int,
74  doc="Maximum range of covariances as in Astier+19",
75  default=8,
76  )
77  covAstierRealSpace = pexConfig.Field(
78  dtype=bool,
79  doc="Calculate covariances in real space or via FFT? (see appendix A of Astier+19).",
80  default=False,
81  )
82  polynomialFitDegree = pexConfig.Field(
83  dtype=int,
84  doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.",
85  default=3,
86  )
87  linearity = pexConfig.ConfigurableField(
88  target=LinearitySolveTask,
89  doc="Task to solve the linearity."
90  )
91 
92  doCreateLinearizer = pexConfig.Field(
93  dtype=bool,
94  doc="Calculate non-linearity and persist linearizer?",
95  default=False,
96  )
97 
98  binSize = pexConfig.Field(
99  dtype=int,
100  doc="Bin the image by this factor in both dimensions.",
101  default=1,
102  )
103  minMeanSignal = pexConfig.Field(
104  dtype=float,
105  doc="Minimum value (inclusive) of mean signal (in DN) above which to consider.",
106  default=0,
107  )
108  maxMeanSignal = pexConfig.Field(
109  dtype=float,
110  doc="Maximum value (inclusive) of mean signal (in DN) below which to consider.",
111  default=9e6,
112  )
113  initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField(
114  dtype=float,
115  doc="Initially exclude data points with a variance that are more than a factor of this from being"
116  " linear in the positive direction, from the PTC fit. Note that these points will also be"
117  " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
118  " to allow an accurate determination of the sigmas for said iterative fit.",
119  default=0.12,
120  min=0.0,
121  max=1.0,
122  )
123  initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField(
124  dtype=float,
125  doc="Initially exclude data points with a variance that are more than a factor of this from being"
126  " linear in the negative direction, from the PTC fit. Note that these points will also be"
127  " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
128  " to allow an accurate determination of the sigmas for said iterative fit.",
129  default=0.25,
130  min=0.0,
131  max=1.0,
132  )
133  sigmaCutPtcOutliers = pexConfig.Field(
134  dtype=float,
135  doc="Sigma cut for outlier rejection in PTC.",
136  default=5.0,
137  )
138  maskNameList = pexConfig.ListField(
139  dtype=str,
140  doc="Mask list to exclude from statistics calculations.",
141  default=['SUSPECT', 'BAD', 'NO_DATA'],
142  )
143  nSigmaClipPtc = pexConfig.Field(
144  dtype=float,
145  doc="Sigma cut for afwMath.StatisticsControl()",
146  default=5.5,
147  )
148  nIterSigmaClipPtc = pexConfig.Field(
149  dtype=int,
150  doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()",
151  default=1,
152  )
153  maxIterationsPtcOutliers = pexConfig.Field(
154  dtype=int,
155  doc="Maximum number of iterations for outlier rejection in PTC.",
156  default=2,
157  )
158  doFitBootstrap = pexConfig.Field(
159  dtype=bool,
160  doc="Use bootstrap for the PTC fit parameters and errors?.",
161  default=False,
162  )
163  doPhotodiode = pexConfig.Field(
164  dtype=bool,
165  doc="Apply a correction based on the photodiode readings if available?",
166  default=True,
167  )
168  photodiodeDataPath = pexConfig.Field(
169  dtype=str,
170  doc="Gen2 only: path to locate the data photodiode data files.",
171  default=""
172  )
173  instrumentName = pexConfig.Field(
174  dtype=str,
175  doc="Instrument name.",
176  default='',
177  )
178 
179 
180 class MeasurePhotonTransferCurveTask(pipeBase.CmdLineTask):
181  """A class to calculate, fit, and plot a PTC from a set of flat pairs.
182 
183  The Photon Transfer Curve (var(signal) vs mean(signal)) is a standard tool
184  used in astronomical detectors characterization (e.g., Janesick 2001,
185  Janesick 2007). If ptcFitType is "EXPAPPROXIMATION" or "POLYNOMIAL", this task calculates the
186  PTC from a series of pairs of flat-field images; each pair taken at identical exposure
187  times. The difference image of each pair is formed to eliminate fixed pattern noise,
188  and then the variance of the difference image and the mean of the average image
189  are used to produce the PTC. An n-degree polynomial or the approximation in Equation
190  16 of Astier+19 ("The Shape of the Photon Transfer Curve of CCD sensors",
191  arXiv:1905.08677) can be fitted to the PTC curve. These models include
192  parameters such as the gain (e/DN) and readout noise.
193 
194  Linearizers to correct for signal-chain non-linearity are also calculated.
195  The `Linearizer` class, in general, can support per-amp linearizers, but in this
196  task this is not supported.
197 
198  If ptcFitType is "FULLCOVARIANCE", the covariances of the difference images are calculated via the
199  DFT methods described in Astier+19 and the variances for the PTC are given by the cov[0,0] elements
200  at each signal level. The full model in Equation 20 of Astier+19 is fit to the PTC to get the gain
201  and the noise.
202 
203  Parameters
204  ----------
205 
206  *args: `list`
207  Positional arguments passed to the Task constructor. None used at this
208  time.
209  **kwargs: `dict`
210  Keyword arguments passed on to the Task constructor. None used at this
211  time.
212 
213  """
214 
215  RunnerClass = DataRefListRunner
216  ConfigClass = MeasurePhotonTransferCurveTaskConfig
217  _DefaultName = "measurePhotonTransferCurve"
218 
219  def __init__(self, *args, **kwargs):
220  pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
221  self.makeSubtask("linearity")
222  plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too
223  self.config.validate()
224  self.config.freeze()
225 
226  @pipeBase.timeMethod
227  def runDataRef(self, dataRefList):
228  """Run the Photon Transfer Curve (PTC) measurement task.
229 
230  For a dataRef (which is each detector here),
231  and given a list of exposure pairs (postISR) at different exposure times,
232  measure the PTC.
233 
234  Parameters
235  ----------
236  dataRefList : `list` [`lsst.daf.peristence.ButlerDataRef`]
237  Data references for exposures for detectors to process.
238  """
239  if len(dataRefList) < 2:
240  raise RuntimeError("Insufficient inputs to combine.")
241 
242  # setup necessary objects
243  dataRef = dataRefList[0]
244 
245  detNum = dataRef.dataId[self.config.ccdKey]
246  camera = dataRef.get('camera')
247  detector = camera[dataRef.dataId[self.config.ccdKey]]
248 
249  amps = detector.getAmplifiers()
250  ampNames = [amp.getName() for amp in amps]
251  datasetPtc = PhotonTransferCurveDataset(ampNames, self.config.ptcFitType)
252 
253  # Get the pairs of flat indexed by expTime
254  expPairs = self.makePairs(dataRefList)
255  expIds = []
256  for (exp1, exp2) in expPairs.values():
257  id1 = exp1.getInfo().getVisitInfo().getExposureId()
258  id2 = exp2.getInfo().getVisitInfo().getExposureId()
259  expIds.append((id1, id2))
260  self.log.info(f"Measuring PTC using {expIds} exposures for detector {detector.getId()}")
261 
262  # get photodiode data early so that logic can be put in to only use the
263  # data if all files are found, as partial corrections are not possible
264  # or at least require significant logic to deal with
265  if self.config.doPhotodiode:
266  for (expId1, expId2) in expIds:
267  charges = [-1, -1] # necessary to have a not-found value to keep lists in step
268  for i, expId in enumerate([expId1, expId2]):
269  # //1000 is a Gen2 only hack, working around the fact an
270  # exposure's ID is not the same as the expId in the
271  # registry. Currently expId is concatenated with the
272  # zero-padded detector ID. This will all go away in Gen3.
273  dataRef.dataId['expId'] = expId//1000
274  if self.config.photodiodeDataPath:
275  photodiodeData = getBOTphotodiodeData(dataRef, self.config.photodiodeDataPath)
276  else:
277  photodiodeData = getBOTphotodiodeData(dataRef)
278  if photodiodeData: # default path stored in function def to keep task clean
279  charges[i] = photodiodeData.getCharge()
280  else:
281  # full expId (not //1000) here, as that encodes the
282  # the detector number as so is fully qualifying
283  self.log.warn(f"No photodiode data found for {expId}")
284 
285  for ampName in ampNames:
286  datasetPtc.photoCharge[ampName].append((charges[0], charges[1]))
287  else:
288  # Can't be an empty list, as initialized, because astropy.Table won't allow it
289  # when saving as fits
290  for ampName in ampNames:
291  datasetPtc.photoCharge[ampName] = np.repeat(np.nan, len(expIds))
292 
293  for ampName in ampNames:
294  datasetPtc.inputExpIdPairs[ampName] = expIds
295 
296  tupleRecords = []
297  allTags = []
298  for expTime, (exp1, exp2) in expPairs.items():
299  expId1 = exp1.getInfo().getVisitInfo().getExposureId()
300  expId2 = exp2.getInfo().getVisitInfo().getExposureId()
301  tupleRows = []
302  nAmpsNan = 0
303  for ampNumber, amp in enumerate(detector):
304  ampName = amp.getName()
305  # covAstier: (i, j, var (cov[0,0]), cov, npix)
306  doRealSpace = self.config.covAstierRealSpace
307  muDiff, varDiff, covAstier = self.measureMeanVarCov(exp1, exp2, region=amp.getBBox(),
308  covAstierRealSpace=doRealSpace)
309  datasetPtc.rawExpTimes[ampName].append(expTime)
310  datasetPtc.rawMeans[ampName].append(muDiff)
311  datasetPtc.rawVars[ampName].append(varDiff)
312 
313  if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None):
314  msg = (f"NaN mean or var, or None cov in amp {ampName} in exposure pair {expId1},"
315  f" {expId2} of detector {detNum}.")
316  self.log.warn(msg)
317  nAmpsNan += 1
318  continue
319  tags = ['mu', 'i', 'j', 'var', 'cov', 'npix', 'ext', 'expTime', 'ampName']
320  if (muDiff <= self.config.minMeanSignal) or (muDiff >= self.config.maxMeanSignal):
321  continue
322 
323  tupleRows += [(muDiff, ) + covRow + (ampNumber, expTime, ampName) for covRow in covAstier]
324  if nAmpsNan == len(ampNames):
325  msg = f"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}."
326  self.log.warn(msg)
327  continue
328  allTags += tags
329  tupleRecords += tupleRows
330  covariancesWithTags = np.core.records.fromrecords(tupleRecords, names=allTags)
331 
332  if self.config.ptcFitType in ["FULLCOVARIANCE", ]:
333  # Calculate covariances and fit them, including the PTC, to Astier+19 full model (Eq. 20)
334  datasetPtc = self.fitCovariancesAstier(datasetPtc, covariancesWithTags)
335  elif self.config.ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]:
336  # Fit the PTC to a polynomial or to Astier+19 exponential approximation (Eq. 16)
337  # Fill up PhotonTransferCurveDataset object.
338  datasetPtc = self.fitPtc(datasetPtc, self.config.ptcFitType)
339 
340  detName = detector.getName()
341  now = datetime.datetime.utcnow()
342  calibDate = now.strftime("%Y-%m-%d")
343  butler = dataRef.getButler()
344 
345  datasetPtc.updateMetadata(setDate=True, camera=camera, detector=detector)
346 
347  # Fit a poynomial to calculate non-linearity and persist linearizer.
348  if self.config.doCreateLinearizer:
349  # Fit (non)linearity of signal vs time curve.
350  # Fill up PhotonTransferCurveDataset object.
351  # Fill up array for LUT linearizer (tableArray).
352  # Produce coefficients for Polynomial and Squared linearizers.
353  # Build linearizer objects.
354  dimensions = {'camera': camera.getName(), 'detector': detector.getId()}
355  linearityResults = self.linearity.run(datasetPtc, camera, dimensions)
356  linearizer = linearityResults.outputLinearizer
357 
358  self.log.info("Writing linearizer:")
359  butler.put(linearizer, datasetType='Linearizer', dataId={'detector': detNum,
360  'detectorName': detName, 'calibDate': calibDate})
361 
362  self.log.info(f"Writing PTC data.")
363  butler.put(datasetPtc, datasetType='photonTransferCurveDataset', dataId={'detector': detNum,
364  'detectorName': detName, 'calibDate': calibDate})
365 
366  return pipeBase.Struct(exitStatus=0)
367 
368  def makePairs(self, dataRefList):
369  """Produce a list of flat pairs indexed by exposure time.
370 
371  Parameters
372  ----------
373  dataRefList : `list` [`lsst.daf.peristence.ButlerDataRef`]
374  Data references for exposures for detectors to process.
375 
376  Return
377  ------
378  flatPairs : `dict` [`float`, `lsst.afw.image.exposure.exposure.ExposureF`]
379  Dictionary that groups flat-field exposures that have the same exposure time (seconds).
380 
381  Notes
382  -----
383  We use the difference of one pair of flat-field images taken at the same exposure time when
384  calculating the PTC to reduce Fixed Pattern Noise. If there are > 2 flat-field images with the
385  same exposure time, the first two are kept and the rest discarded.
386  """
387 
388  # Organize exposures by observation date.
389  expDict = {}
390  for dataRef in dataRefList:
391  try:
392  tempFlat = dataRef.get("postISRCCD")
393  except RuntimeError:
394  self.log.warn("postISR exposure could not be retrieved. Ignoring flat.")
395  continue
396  expDate = tempFlat.getInfo().getVisitInfo().getDate().get()
397  expDict.setdefault(expDate, tempFlat)
398  sortedExps = {k: expDict[k] for k in sorted(expDict)}
399 
400  flatPairs = {}
401  for exp in sortedExps:
402  tempFlat = sortedExps[exp]
403  expTime = tempFlat.getInfo().getVisitInfo().getExposureTime()
404  listAtExpTime = flatPairs.setdefault(expTime, [])
405  if len(listAtExpTime) < 2:
406  listAtExpTime.append(tempFlat)
407  if len(listAtExpTime) > 2:
408  self.log.warn(f"More than 2 exposures found at expTime {expTime}. Dropping exposures "
409  f"{listAtExpTime[2:]}.")
410 
411  for (key, value) in flatPairs.items():
412  if len(value) < 2:
413  flatPairs.pop(key)
414  self.log.warn(f"Only one exposure found at expTime {key}. Dropping exposure {value}.")
415  return flatPairs
416 
417  def fitCovariancesAstier(self, dataset, covariancesWithTagsArray):
418  """Fit measured flat covariances to full model in Astier+19.
419 
420  Parameters
421  ----------
422  dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
423  The dataset containing information such as the means, variances and exposure times.
424 
425  covariancesWithTagsArray : `numpy.recarray`
426  Tuple with at least (mu, cov, var, i, j, npix), where:
427  mu : 0.5*(m1 + m2), where:
428  mu1: mean value of flat1
429  mu2: mean value of flat2
430  cov: covariance value at lag(i, j)
431  var: variance(covariance value at lag(0, 0))
432  i: lag dimension
433  j: lag dimension
434  npix: number of pixels used for covariance calculation.
435 
436  Returns
437  -------
438  dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
439  This is the same dataset as the input paramter, however, it has been modified
440  to include information such as the fit vectors and the fit parameters. See
441  the class `PhotonTransferCurveDatase`.
442  """
443 
444  covFits, covFitsNoB = fitData(covariancesWithTagsArray, maxMu=self.config.maxMeanSignal,
445  r=self.config.maximumRangeCovariancesAstier,
446  nSigmaFullFit=self.config.sigmaClipFullFitCovariancesAstier,
447  maxIterFullFit=self.config.maxIterFullFitCovariancesAstier)
448 
449  dataset = self.getOutputPtcDataCovAstier(dataset, covFits, covFitsNoB)
450 
451  return dataset
452 
453  def getOutputPtcDataCovAstier(self, dataset, covFits, covFitsNoB):
454  """Get output data for PhotonTransferCurveCovAstierDataset from CovFit objects.
455 
456  Parameters
457  ----------
458  dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
459  The dataset containing information such as the means, variances and exposure times.
460 
461  covFits: `dict`
462  Dictionary of CovFit objects, with amp names as keys.
463 
464  covFitsNoB : `dict`
465  Dictionary of CovFit objects, with amp names as keys, and 'b=0' in Eq. 20 of Astier+19.
466 
467  Returns
468  -------
469  dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
470  This is the same dataset as the input paramter, however, it has been modified
471  to include extra information such as the mask 1D array, gains, reoudout noise, measured signal,
472  measured variance, modeled variance, a, and b coefficient matrices (see Astier+19) per amplifier.
473  See the class `PhotonTransferCurveDatase`.
474  """
475  assert(len(covFits) == len(covFitsNoB))
476 
477  for i, amp in enumerate(dataset.ampNames):
478  lenInputTimes = len(dataset.rawExpTimes[amp])
479  # Not used when ptcFitType is 'FULLCOVARIANCE'
480  dataset.ptcFitPars[amp] = np.nan
481  dataset.ptcFitParsError[amp] = np.nan
482  dataset.ptcFitChiSq[amp] = np.nan
483  if amp in covFits:
484  fit = covFits[amp]
485  fitNoB = covFitsNoB[amp]
486  # Save full covariances, covariances models, and their weights
487  dataset.covariances[amp] = fit.cov
488  dataset.covariancesModel[amp] = fit.evalCovModel()
489  dataset.covariancesSqrtWeights[amp] = fit.sqrtW
490  dataset.aMatrix[amp] = fit.getA()
491  dataset.bMatrix[amp] = fit.getB()
492  dataset.covariancesNoB[amp] = fitNoB.cov
493  dataset.covariancesModelNoB[amp] = fitNoB.evalCovModel()
494  dataset.covariancesSqrtWeightsNoB[amp] = fitNoB.sqrtW
495  dataset.aMatrixNoB[amp] = fitNoB.getA()
496 
497  (meanVecFinal, varVecFinal, varVecModel,
498  wc, varMask) = fit.getFitData(0, 0, divideByMu=False, returnMasked=True)
499  gain = fit.getGain()
500  dataset.expIdMask[amp] = varMask
501  dataset.gain[amp] = gain
502  dataset.gainErr[amp] = fit.getGainErr()
503  dataset.noise[amp] = np.sqrt(fit.getRon())
504  dataset.noiseErr[amp] = fit.getRonErr()
505 
506  padLength = lenInputTimes - len(varVecFinal)
507  dataset.finalVars[amp] = np.pad(varVecFinal/(gain**2), (0, padLength), 'constant',
508  constant_values=np.nan)
509  dataset.finalModelVars[amp] = np.pad(varVecModel/(gain**2), (0, padLength), 'constant',
510  constant_values=np.nan)
511  dataset.finalMeans[amp] = np.pad(meanVecFinal/gain, (0, padLength), 'constant',
512  constant_values=np.nan)
513  else:
514  # Bad amp
515  # Entries need to have proper dimensions so read/write with astropy.Table works.
516  matrixSide = self.config.maximumRangeCovariancesAstier
517  nanMatrix = np.full((matrixSide, matrixSide), np.nan)
518  listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan)
519 
520  dataset.covariances[amp] = listNanMatrix
521  dataset.covariancesModel[amp] = listNanMatrix
522  dataset.covariancesSqrtWeights[amp] = listNanMatrix
523  dataset.aMatrix[amp] = nanMatrix
524  dataset.bMatrix[amp] = nanMatrix
525  dataset.covariancesNoB[amp] = listNanMatrix
526  dataset.covariancesModelNoB[amp] = listNanMatrix
527  dataset.covariancesSqrtWeightsNoB[amp] = listNanMatrix
528  dataset.aMatrixNoB[amp] = nanMatrix
529 
530  dataset.expIdMask[amp] = np.repeat(np.nan, lenInputTimes)
531  dataset.gain[amp] = np.nan
532  dataset.gainErr[amp] = np.nan
533  dataset.noise[amp] = np.nan
534  dataset.noiseErr[amp] = np.nan
535  dataset.finalVars[amp] = np.repeat(np.nan, lenInputTimes)
536  dataset.finalModelVars[amp] = np.repeat(np.nan, lenInputTimes)
537  dataset.finalMeans[amp] = np.repeat(np.nan, lenInputTimes)
538 
539  return dataset
540 
541  def measureMeanVarCov(self, exposure1, exposure2, region=None, covAstierRealSpace=False):
542  """Calculate the mean of each of two exposures and the variance and covariance of their difference.
543 
544  The variance is calculated via afwMath, and the covariance via the methods in Astier+19 (appendix A).
545  In theory, var = covariance[0,0]. This should be validated, and in the future, we may decide to just
546  keep one (covariance).
547 
548  Parameters
549  ----------
550  exposure1 : `lsst.afw.image.exposure.exposure.ExposureF`
551  First exposure of flat field pair.
552 
553  exposure2 : `lsst.afw.image.exposure.exposure.ExposureF`
554  Second exposure of flat field pair.
555 
556  region : `lsst.geom.Box2I`, optional
557  Region of each exposure where to perform the calculations (e.g, an amplifier).
558 
559  covAstierRealSpace : `bool`, optional
560  Should the covariannces in Astier+19 be calculated in real space or via FFT?
561  See Appendix A of Astier+19.
562 
563  Returns
564  -------
565  mu : `float` or `NaN`
566  0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means of the regions in
567  both exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
568 
569  varDiff : `float` or `NaN`
570  Half of the clipped variance of the difference of the regions inthe two input
571  exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
572 
573  covDiffAstier : `list` or `NaN`
574  List with tuples of the form (dx, dy, var, cov, npix), where:
575  dx : `int`
576  Lag in x
577  dy : `int`
578  Lag in y
579  var : `float`
580  Variance at (dx, dy).
581  cov : `float`
582  Covariance at (dx, dy).
583  nPix : `int`
584  Number of pixel pairs used to evaluate var and cov.
585  If either mu1 or m2 are NaN's, the returned value is NaN.
586  """
587 
588  if region is not None:
589  im1Area = exposure1.maskedImage[region]
590  im2Area = exposure2.maskedImage[region]
591  else:
592  im1Area = exposure1.maskedImage
593  im2Area = exposure2.maskedImage
594 
595  if self.config.binSize > 1:
596  im1Area = afwMath.binImage(im1Area, self.config.binSize)
597  im2Area = afwMath.binImage(im2Area, self.config.binSize)
598 
599  im1MaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList)
600  im1StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
601  self.config.nIterSigmaClipPtc,
602  im1MaskVal)
603  im1StatsCtrl.setNanSafe(True)
604  im1StatsCtrl.setAndMask(im1MaskVal)
605 
606  im2MaskVal = exposure2.getMask().getPlaneBitMask(self.config.maskNameList)
607  im2StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
608  self.config.nIterSigmaClipPtc,
609  im2MaskVal)
610  im2StatsCtrl.setNanSafe(True)
611  im2StatsCtrl.setAndMask(im2MaskVal)
612 
613  # Clipped mean of images; then average of mean.
614  mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, im1StatsCtrl).getValue()
615  mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, im2StatsCtrl).getValue()
616  if np.isnan(mu1) or np.isnan(mu2):
617  return np.nan, np.nan, None
618  mu = 0.5*(mu1 + mu2)
619 
620  # Take difference of pairs
621  # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2))
622  temp = im2Area.clone()
623  temp *= mu1
624  diffIm = im1Area.clone()
625  diffIm *= mu2
626  diffIm -= temp
627  diffIm /= mu
628 
629  diffImMaskVal = diffIm.getMask().getPlaneBitMask(self.config.maskNameList)
630  diffImStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
631  self.config.nIterSigmaClipPtc,
632  diffImMaskVal)
633  diffImStatsCtrl.setNanSafe(True)
634  diffImStatsCtrl.setAndMask(diffImMaskVal)
635 
636  varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, diffImStatsCtrl).getValue())
637 
638  # Get the mask and identify good pixels as '1', and the rest as '0'.
639  w1 = np.where(im1Area.getMask().getArray() == 0, 1, 0)
640  w2 = np.where(im2Area.getMask().getArray() == 0, 1, 0)
641 
642  w12 = w1*w2
643  wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0)
644  w = w12*wDiff
645 
646  maxRangeCov = self.config.maximumRangeCovariancesAstier
647  if covAstierRealSpace:
648  covDiffAstier = computeCovDirect(diffIm.getImage().getArray(), w, maxRangeCov)
649  else:
650  shapeDiff = diffIm.getImage().getArray().shape
651  fftShape = (fftSize(shapeDiff[0] + maxRangeCov), fftSize(shapeDiff[1]+maxRangeCov))
652  c = CovFft(diffIm.getImage().getArray(), w, fftShape, maxRangeCov)
653  covDiffAstier = c.reportCovFft(maxRangeCov)
654 
655  return mu, varDiff, covDiffAstier
656 
657  def computeCovDirect(self, diffImage, weightImage, maxRange):
658  """Compute covariances of diffImage in real space.
659 
660  For lags larger than ~25, it is slower than the FFT way.
661  Taken from https://github.com/PierreAstier/bfptc/
662 
663  Parameters
664  ----------
665  diffImage : `numpy.array`
666  Image to compute the covariance of.
667 
668  weightImage : `numpy.array`
669  Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
670 
671  maxRange : `int`
672  Last index of the covariance to be computed.
673 
674  Returns
675  -------
676  outList : `list`
677  List with tuples of the form (dx, dy, var, cov, npix), where:
678  dx : `int`
679  Lag in x
680  dy : `int`
681  Lag in y
682  var : `float`
683  Variance at (dx, dy).
684  cov : `float`
685  Covariance at (dx, dy).
686  nPix : `int`
687  Number of pixel pairs used to evaluate var and cov.
688  """
689  outList = []
690  var = 0
691  # (dy,dx) = (0,0) has to be first
692  for dy in range(maxRange + 1):
693  for dx in range(0, maxRange + 1):
694  if (dx*dy > 0):
695  cov1, nPix1 = self.covDirectValue(diffImage, weightImage, dx, dy)
696  cov2, nPix2 = self.covDirectValue(diffImage, weightImage, dx, -dy)
697  cov = 0.5*(cov1 + cov2)
698  nPix = nPix1 + nPix2
699  else:
700  cov, nPix = self.covDirectValue(diffImage, weightImage, dx, dy)
701  if (dx == 0 and dy == 0):
702  var = cov
703  outList.append((dx, dy, var, cov, nPix))
704 
705  return outList
706 
707  def covDirectValue(self, diffImage, weightImage, dx, dy):
708  """Compute covariances of diffImage in real space at lag (dx, dy).
709 
710  Taken from https://github.com/PierreAstier/bfptc/ (c.f., appendix of Astier+19).
711 
712  Parameters
713  ----------
714  diffImage : `numpy.array`
715  Image to compute the covariance of.
716 
717  weightImage : `numpy.array`
718  Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
719 
720  dx : `int`
721  Lag in x.
722 
723  dy : `int`
724  Lag in y.
725 
726  Returns
727  -------
728  cov : `float`
729  Covariance at (dx, dy)
730 
731  nPix : `int`
732  Number of pixel pairs used to evaluate var and cov.
733  """
734  (nCols, nRows) = diffImage.shape
735  # switching both signs does not change anything:
736  # it just swaps im1 and im2 below
737  if (dx < 0):
738  (dx, dy) = (-dx, -dy)
739  # now, we have dx >0. We have to distinguish two cases
740  # depending on the sign of dy
741  if dy >= 0:
742  im1 = diffImage[dy:, dx:]
743  w1 = weightImage[dy:, dx:]
744  im2 = diffImage[:nCols - dy, :nRows - dx]
745  w2 = weightImage[:nCols - dy, :nRows - dx]
746  else:
747  im1 = diffImage[:nCols + dy, dx:]
748  w1 = weightImage[:nCols + dy, dx:]
749  im2 = diffImage[-dy:, :nRows - dx]
750  w2 = weightImage[-dy:, :nRows - dx]
751  # use the same mask for all 3 calculations
752  wAll = w1*w2
753  # do not use mean() because weightImage=0 pixels would then count
754  nPix = wAll.sum()
755  im1TimesW = im1*wAll
756  s1 = im1TimesW.sum()/nPix
757  s2 = (im2*wAll).sum()/nPix
758  p = (im1TimesW*im2).sum()/nPix
759  cov = p - s1*s2
760 
761  return cov, nPix
762 
763  @staticmethod
764  def _initialParsForPolynomial(order):
765  assert(order >= 2)
766  pars = np.zeros(order, dtype=np.float)
767  pars[0] = 10
768  pars[1] = 1
769  pars[2:] = 0.0001
770  return pars
771 
772  @staticmethod
773  def _boundsForPolynomial(initialPars):
774  lowers = [np.NINF for p in initialPars]
775  uppers = [np.inf for p in initialPars]
776  lowers[1] = 0 # no negative gains
777  return (lowers, uppers)
778 
779  @staticmethod
780  def _boundsForAstier(initialPars):
781  lowers = [np.NINF for p in initialPars]
782  uppers = [np.inf for p in initialPars]
783  return (lowers, uppers)
784 
785  @staticmethod
786  def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative):
787  """Return a boolean array to mask bad points.
788 
789  A linear function has a constant ratio, so find the median
790  value of the ratios, and exclude the points that deviate
791  from that by more than a factor of maxDeviationPositive/negative.
792  Asymmetric deviations are supported as we expect the PTC to turn
793  down as the flux increases, but sometimes it anomalously turns
794  upwards just before turning over, which ruins the fits, so it
795  is wise to be stricter about restricting positive outliers than
796  negative ones.
797 
798  Too high and points that are so bad that fit will fail will be included
799  Too low and the non-linear points will be excluded, biasing the NL fit."""
800  ratios = [b/a for (a, b) in zip(means, variances)]
801  medianRatio = np.median(ratios)
802  ratioDeviations = [(r/medianRatio)-1 for r in ratios]
803 
804  # so that it doesn't matter if the deviation is expressed as positive or negative
805  maxDeviationPositive = abs(maxDeviationPositive)
806  maxDeviationNegative = -1. * abs(maxDeviationNegative)
807 
808  goodPoints = np.array([True if (r < maxDeviationPositive and r > maxDeviationNegative)
809  else False for r in ratioDeviations])
810  return goodPoints
811 
812  def _makeZeroSafe(self, array, warn=True, substituteValue=1e-9):
813  """"""
814  nBad = Counter(array)[0]
815  if nBad == 0:
816  return array
817 
818  if warn:
819  msg = f"Found {nBad} zeros in array at elements {[x for x in np.where(array==0)[0]]}"
820  self.log.warn(msg)
821 
822  array[array == 0] = substituteValue
823  return array
824 
825  def fitPtc(self, dataset, ptcFitType):
826  """Fit the photon transfer curve to a polynimial or to Astier+19 approximation.
827 
828  Fit the photon transfer curve with either a polynomial of the order
829  specified in the task config, or using the Astier approximation.
830 
831  Sigma clipping is performed iteratively for the fit, as well as an
832  initial clipping of data points that are more than
833  config.initialNonLinearityExclusionThreshold away from lying on a
834  straight line. This other step is necessary because the photon transfer
835  curve turns over catastrophically at very high flux (because saturation
836  drops the variance to ~0) and these far outliers cause the initial fit
837  to fail, meaning the sigma cannot be calculated to perform the
838  sigma-clipping.
839 
840  Parameters
841  ----------
842  dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
843  The dataset containing the means, variances and exposure times
844 
845  ptcFitType : `str`
846  Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
847  'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC
848 
849  Returns
850  -------
851  dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
852  This is the same dataset as the input paramter, however, it has been modified
853  to include information such as the fit vectors and the fit parameters. See
854  the class `PhotonTransferCurveDatase`.
855  """
856 
857  matrixSide = self.config.maximumRangeCovariancesAstier
858  nanMatrix = np.empty((matrixSide, matrixSide))
859  nanMatrix[:] = np.nan
860 
861  for amp in dataset.ampNames:
862  lenInputTimes = len(dataset.rawExpTimes[amp])
863  listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide))
864  listNanMatrix[:] = np.nan
865 
866  dataset.covariances[amp] = listNanMatrix
867  dataset.covariancesModel[amp] = listNanMatrix
868  dataset.covariancesSqrtWeights[amp] = listNanMatrix
869  dataset.aMatrix[amp] = nanMatrix
870  dataset.bMatrix[amp] = nanMatrix
871  dataset.covariancesNoB[amp] = listNanMatrix
872  dataset.covariancesModelNoB[amp] = listNanMatrix
873  dataset.covariancesSqrtWeightsNoB[amp] = listNanMatrix
874  dataset.aMatrixNoB[amp] = nanMatrix
875 
876  def errFunc(p, x, y):
877  return ptcFunc(p, x) - y
878 
879  sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
880  maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
881 
882  for i, ampName in enumerate(dataset.ampNames):
883  timeVecOriginal = np.array(dataset.rawExpTimes[ampName])
884  meanVecOriginal = np.array(dataset.rawMeans[ampName])
885  varVecOriginal = np.array(dataset.rawVars[ampName])
886  varVecOriginal = self._makeZeroSafe(varVecOriginal)
887 
888  mask = ((meanVecOriginal >= self.config.minMeanSignal) &
889  (meanVecOriginal <= self.config.maxMeanSignal))
890 
891  goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal,
892  self.config.initialNonLinearityExclusionThresholdPositive,
893  self.config.initialNonLinearityExclusionThresholdNegative)
894  if not (mask.any() and goodPoints.any()):
895  msg = (f"\nSERIOUS: All points in either mask: {mask} or goodPoints: {goodPoints} are bad."
896  f"Setting {ampName} to BAD.")
897  self.log.warn(msg)
898  # The first and second parameters of initial fit are discarded (bias and gain)
899  # for the final NL coefficients
900  dataset.badAmps.append(ampName)
901  dataset.expIdMask[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
902  dataset.gain[ampName] = np.nan
903  dataset.gainErr[ampName] = np.nan
904  dataset.noise[ampName] = np.nan
905  dataset.noiseErr[ampName] = np.nan
906  dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if
907  ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3))
908  dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if
909  ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3))
910  dataset.ptcFitChiSq[ampName] = np.nan
911  dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
912  dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
913  dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
914  continue
915 
916  mask = mask & goodPoints
917 
918  if ptcFitType == 'EXPAPPROXIMATION':
919  ptcFunc = funcAstier
920  parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noise
921  bounds = self._boundsForAstier(parsIniPtc)
922  if ptcFitType == 'POLYNOMIAL':
923  ptcFunc = funcPolynomial
924  parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1)
925  bounds = self._boundsForPolynomial(parsIniPtc)
926 
927  # Before bootstrap fit, do an iterative fit to get rid of outliers
928  count = 1
929  while count <= maxIterationsPtcOutliers:
930  # Note that application of the mask actually shrinks the array
931  # to size rather than setting elements to zero (as we want) so
932  # always update mask itself and re-apply to the original data
933  meanTempVec = meanVecOriginal[mask]
934  varTempVec = varVecOriginal[mask]
935  res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec))
936  pars = res.x
937 
938  # change this to the original from the temp because the masks are ANDed
939  # meaning once a point is masked it's always masked, and the masks must
940  # always be the same length for broadcasting
941  sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
942  newMask = np.array([True if np.abs(r) < sigmaCutPtcOutliers else False for r in sigResids])
943  mask = mask & newMask
944  if not (mask.any() and newMask.any()):
945  msg = (f"\nSERIOUS: All points in either mask: {mask} or newMask: {newMask} are bad. "
946  f"Setting {ampName} to BAD.")
947  self.log.warn(msg)
948  # The first and second parameters of initial fit are discarded (bias and gain)
949  # for the final NL coefficients
950  dataset.badAmps.append(ampName)
951  dataset.expIdMask[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
952  dataset.gain[ampName] = np.nan
953  dataset.gainErr[ampName] = np.nan
954  dataset.noise[ampName] = np.nan
955  dataset.noiseErr[ampName] = np.nan
956  dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
957  if ptcFitType in ["POLYNOMIAL", ] else
958  np.repeat(np.nan, 3))
959  dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
960  if ptcFitType in ["POLYNOMIAL", ] else
961  np.repeat(np.nan, 3))
962  dataset.ptcFitChiSq[ampName] = np.nan
963  dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
964  dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
965  dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
966  break
967  nDroppedTotal = Counter(mask)[False]
968  self.log.debug(f"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}")
969  count += 1
970  # objects should never shrink
971  assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal))
972 
973  if not (mask.any() and newMask.any()):
974  continue
975  dataset.expIdMask[ampName] = mask # store the final mask
976  parsIniPtc = pars
977  meanVecFinal = meanVecOriginal[mask]
978  varVecFinal = varVecOriginal[mask]
979 
980  if Counter(mask)[False] > 0:
981  self.log.info((f"Number of points discarded in PTC of amplifier {ampName}:" +
982  f" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))
983 
984  if (len(meanVecFinal) < len(parsIniPtc)):
985  msg = (f"\nSERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of"
986  f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
987  self.log.warn(msg)
988  # The first and second parameters of initial fit are discarded (bias and gain)
989  # for the final NL coefficients
990  dataset.badAmps.append(ampName)
991  dataset.expIdMask[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
992  dataset.gain[ampName] = np.nan
993  dataset.gainErr[ampName] = np.nan
994  dataset.noise[ampName] = np.nan
995  dataset.noiseErr[ampName] = np.nan
996  dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if
997  ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3))
998  dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if
999  ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3))
1000  dataset.ptcFitChiSq[ampName] = np.nan
1001  dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1002  dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1003  dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1004  continue
1005 
1006  # Fit the PTC
1007  if self.config.doFitBootstrap:
1008  parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal,
1009  varVecFinal, ptcFunc,
1010  weightsY=1./np.sqrt(varVecFinal))
1011  else:
1012  parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal,
1013  varVecFinal, ptcFunc,
1014  weightsY=1./np.sqrt(varVecFinal))
1015  dataset.ptcFitPars[ampName] = parsFit
1016  dataset.ptcFitParsError[ampName] = parsFitErr
1017  dataset.ptcFitChiSq[ampName] = reducedChiSqPtc
1018  # Masked variances (measured and modeled) and means. Need to pad the array so astropy.Table does
1019  # not crash (the mask may vary per amp).
1020  padLength = len(dataset.rawExpTimes[ampName]) - len(varVecFinal)
1021  dataset.finalVars[ampName] = np.pad(varVecFinal, (0, padLength), 'constant',
1022  constant_values=np.nan)
1023  dataset.finalModelVars[ampName] = np.pad(ptcFunc(parsFit, meanVecFinal), (0, padLength),
1024  'constant', constant_values=np.nan)
1025  dataset.finalMeans[ampName] = np.pad(meanVecFinal, (0, padLength), 'constant',
1026  constant_values=np.nan)
1027 
1028  if ptcFitType == 'EXPAPPROXIMATION':
1029  ptcGain = parsFit[1]
1030  ptcGainErr = parsFitErr[1]
1031  ptcNoise = np.sqrt(np.fabs(parsFit[2]))
1032  ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
1033  if ptcFitType == 'POLYNOMIAL':
1034  ptcGain = 1./parsFit[1]
1035  ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
1036  ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
1037  ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
1038  dataset.gain[ampName] = ptcGain
1039  dataset.gainErr[ampName] = ptcGainErr
1040  dataset.noise[ampName] = ptcNoise
1041  dataset.noiseErr[ampName] = ptcNoiseErr
1042  if not len(dataset.ptcFitType) == 0:
1043  dataset.ptcFitType = ptcFitType
1044  if len(dataset.badAmps) == 0:
1045  dataset.badAmps = np.repeat(np.nan, len(list(dataset.rawExpTimes.values())[0]))
1046 
1047  return dataset
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.runDataRef
def runDataRef(self, dataRefList)
Definition: ptc.py:227
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.fitCovariancesAstier
def fitCovariancesAstier(self, dataset, covariancesWithTagsArray)
Definition: ptc.py:417
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.fitPtc
def fitPtc(self, dataset, ptcFitType)
Definition: ptc.py:825
lsst.cp.pipe.astierCovPtcUtils.fftSize
def fftSize(s)
Definition: astierCovPtcUtils.py:122
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask._boundsForAstier
def _boundsForAstier(initialPars)
Definition: ptc.py:780
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.measureMeanVarCov
def measureMeanVarCov(self, exposure1, exposure2, region=None, covAstierRealSpace=False)
Definition: ptc.py:541
lsst.cp.pipe.astierCovPtcUtils.fitData
def fitData(tupleName, maxMu=1e9, r=8, nSigmaFullFit=5.5, maxIterFullFit=3)
Definition: astierCovPtcUtils.py:323
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.__init__
def __init__(self, *args, **kwargs)
Definition: ptc.py:219
lsst.cp.pipe.photodiode.getBOTphotodiodeData
def getBOTphotodiodeData(dataRef, dataPath='/project/shared/BOT/_parent/raw/photodiode_data/', logger=None)
Definition: photodiode.py:30
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask._boundsForPolynomial
def _boundsForPolynomial(initialPars)
Definition: ptc.py:773
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTaskConfig
Definition: ptc.py:45
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.getOutputPtcDataCovAstier
def getOutputPtcDataCovAstier(self, dataset, covFits, covFitsNoB)
Definition: ptc.py:453
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.makePairs
def makePairs(self, dataRefList)
Definition: ptc.py:368
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.covDirectValue
def covDirectValue(self, diffImage, weightImage, dx, dy)
Definition: ptc.py:707
lsst::pex::config
lsst.cp.pipe.astierCovPtcUtils.CovFft
Definition: astierCovPtcUtils.py:28
lsst.cp.pipe.utils.fitLeastSq
def fitLeastSq(initialParams, dataX, dataY, function, weightsY=None)
Definition: utils.py:285
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask
Definition: ptc.py:180
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask._makeZeroSafe
def _makeZeroSafe(self, array, warn=True, substituteValue=1e-9)
Definition: ptc.py:812
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask._getInitialGoodPoints
def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative)
Definition: ptc.py:786
lsst::ip::isr
lsst::afw::math
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask._initialParsForPolynomial
def _initialParsForPolynomial(order)
Definition: ptc.py:764
lsst.cp.pipe.utils.fitBootstrap
def fitBootstrap(initialParams, dataX, dataY, function, weightsY=None, confidenceSigma=1.)
Definition: utils.py:352
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.computeCovDirect
def computeCovDirect(self, diffImage, weightImage, maxRange)
Definition: ptc.py:657
lsst::pipe::base