lsst.cp.pipe  20.0.0-20-g0a07b97+25538c912f
ptc.py
Go to the documentation of this file.
1 # This file is part of cp_pipe.
2 #
3 # Developed for the LSST Data Management System.
4 # This product includes software developed by the LSST Project
5 # (https://www.lsst.org).
6 # See the COPYRIGHT file at the top-level directory of this distribution
7 # for details of code ownership.
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <https://www.gnu.org/licenses/>.
21 #
22 import numpy as np
23 import matplotlib.pyplot as plt
24 from collections import Counter
25 
26 import lsst.afw.math as afwMath
27 import lsst.pex.config as pexConfig
28 import lsst.pipe.base as pipeBase
29 from .utils import (fitLeastSq, fitBootstrap, funcPolynomial, funcAstier)
30 from scipy.optimize import least_squares
31 
32 import datetime
33 
34 from .astierCovPtcUtils import (fftSize, CovFft, computeCovDirect, fitData)
35 from .linearity import LinearitySolveTask
36 from .photodiode import getBOTphotodiodeData
37 
38 from lsst.pipe.tasks.getRepositoryData import DataRefListRunner
39 from lsst.ip.isr import PhotonTransferCurveDataset
40 
41 __all__ = ['MeasurePhotonTransferCurveTask',
42  'MeasurePhotonTransferCurveTaskConfig']
43 
44 
45 class MeasurePhotonTransferCurveTaskConfig(pexConfig.Config):
46  """Config class for photon transfer curve measurement task"""
47  ccdKey = pexConfig.Field(
48  dtype=str,
49  doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'.",
50  default='ccd',
51  )
52  ptcFitType = pexConfig.ChoiceField(
53  dtype=str,
54  doc="Fit PTC to Eq. 16, Eq. 20 in Astier+19, or to a polynomial.",
55  default="POLYNOMIAL",
56  allowed={
57  "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').",
58  "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).",
59  "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)"
60  }
61  )
62  sigmaClipFullFitCovariancesAstier = pexConfig.Field(
63  dtype=float,
64  doc="sigma clip for full model fit for FULLCOVARIANCE ptcFitType ",
65  default=5.0,
66  )
67  maxIterFullFitCovariancesAstier = pexConfig.Field(
68  dtype=int,
69  doc="Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType",
70  default=3,
71  )
72  maximumRangeCovariancesAstier = pexConfig.Field(
73  dtype=int,
74  doc="Maximum range of covariances as in Astier+19",
75  default=8,
76  )
77  covAstierRealSpace = pexConfig.Field(
78  dtype=bool,
79  doc="Calculate covariances in real space or via FFT? (see appendix A of Astier+19).",
80  default=False,
81  )
82  polynomialFitDegree = pexConfig.Field(
83  dtype=int,
84  doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.",
85  default=3,
86  )
87  linearity = pexConfig.ConfigurableField(
88  target=LinearitySolveTask,
89  doc="Task to solve the linearity."
90  )
91 
92  doCreateLinearizer = pexConfig.Field(
93  dtype=bool,
94  doc="Calculate non-linearity and persist linearizer?",
95  default=False,
96  )
97 
98  binSize = pexConfig.Field(
99  dtype=int,
100  doc="Bin the image by this factor in both dimensions.",
101  default=1,
102  )
103  minMeanSignal = pexConfig.Field(
104  dtype=float,
105  doc="Minimum value (inclusive) of mean signal (in DN) above which to consider.",
106  default=0,
107  )
108  maxMeanSignal = pexConfig.Field(
109  dtype=float,
110  doc="Maximum value (inclusive) of mean signal (in DN) below which to consider.",
111  default=9e6,
112  )
113  initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField(
114  dtype=float,
115  doc="Initially exclude data points with a variance that are more than a factor of this from being"
116  " linear in the positive direction, from the PTC fit. Note that these points will also be"
117  " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
118  " to allow an accurate determination of the sigmas for said iterative fit.",
119  default=0.12,
120  min=0.0,
121  max=1.0,
122  )
123  initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField(
124  dtype=float,
125  doc="Initially exclude data points with a variance that are more than a factor of this from being"
126  " linear in the negative direction, from the PTC fit. Note that these points will also be"
127  " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
128  " to allow an accurate determination of the sigmas for said iterative fit.",
129  default=0.25,
130  min=0.0,
131  max=1.0,
132  )
133  sigmaCutPtcOutliers = pexConfig.Field(
134  dtype=float,
135  doc="Sigma cut for outlier rejection in PTC.",
136  default=5.0,
137  )
138  maskNameList = pexConfig.ListField(
139  dtype=str,
140  doc="Mask list to exclude from statistics calculations.",
141  default=['SUSPECT', 'BAD', 'NO_DATA'],
142  )
143  nSigmaClipPtc = pexConfig.Field(
144  dtype=float,
145  doc="Sigma cut for afwMath.StatisticsControl()",
146  default=5.5,
147  )
148  nIterSigmaClipPtc = pexConfig.Field(
149  dtype=int,
150  doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()",
151  default=1,
152  )
153  maxIterationsPtcOutliers = pexConfig.Field(
154  dtype=int,
155  doc="Maximum number of iterations for outlier rejection in PTC.",
156  default=2,
157  )
158  doFitBootstrap = pexConfig.Field(
159  dtype=bool,
160  doc="Use bootstrap for the PTC fit parameters and errors?.",
161  default=False,
162  )
163  doPhotodiode = pexConfig.Field(
164  dtype=bool,
165  doc="Apply a correction based on the photodiode readings if available?",
166  default=True,
167  )
168  photodiodeDataPath = pexConfig.Field(
169  dtype=str,
170  doc="Gen2 only: path to locate the data photodiode data files.",
171  default=""
172  )
173  instrumentName = pexConfig.Field(
174  dtype=str,
175  doc="Instrument name.",
176  default='',
177  )
178 
179 
180 class MeasurePhotonTransferCurveTask(pipeBase.CmdLineTask):
181  """A class to calculate, fit, and plot a PTC from a set of flat pairs.
182 
183  The Photon Transfer Curve (var(signal) vs mean(signal)) is a standard tool
184  used in astronomical detectors characterization (e.g., Janesick 2001,
185  Janesick 2007). If ptcFitType is "EXPAPPROXIMATION" or "POLYNOMIAL", this task calculates the
186  PTC from a series of pairs of flat-field images; each pair taken at identical exposure
187  times. The difference image of each pair is formed to eliminate fixed pattern noise,
188  and then the variance of the difference image and the mean of the average image
189  are used to produce the PTC. An n-degree polynomial or the approximation in Equation
190  16 of Astier+19 ("The Shape of the Photon Transfer Curve of CCD sensors",
191  arXiv:1905.08677) can be fitted to the PTC curve. These models include
192  parameters such as the gain (e/DN) and readout noise.
193 
194  Linearizers to correct for signal-chain non-linearity are also calculated.
195  The `Linearizer` class, in general, can support per-amp linearizers, but in this
196  task this is not supported.
197 
198  If ptcFitType is "FULLCOVARIANCE", the covariances of the difference images are calculated via the
199  DFT methods described in Astier+19 and the variances for the PTC are given by the cov[0,0] elements
200  at each signal level. The full model in Equation 20 of Astier+19 is fit to the PTC to get the gain
201  and the noise.
202 
203  Parameters
204  ----------
205 
206  *args: `list`
207  Positional arguments passed to the Task constructor. None used at this
208  time.
209  **kwargs: `dict`
210  Keyword arguments passed on to the Task constructor. None used at this
211  time.
212 
213  """
214 
215  RunnerClass = DataRefListRunner
216  ConfigClass = MeasurePhotonTransferCurveTaskConfig
217  _DefaultName = "measurePhotonTransferCurve"
218 
219  def __init__(self, *args, **kwargs):
220  pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
221  self.makeSubtask("linearity")
222  plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too
223  self.config.validate()
224  self.config.freeze()
225 
226  @pipeBase.timeMethod
227  def runDataRef(self, dataRefList):
228  """Run the Photon Transfer Curve (PTC) measurement task.
229 
230  For a dataRef (which is each detector here),
231  and given a list of exposure pairs (postISR) at different exposure times,
232  measure the PTC.
233 
234  Parameters
235  ----------
236  dataRefList : `list` [`lsst.daf.peristence.ButlerDataRef`]
237  Data references for exposures for detectors to process.
238  """
239  if len(dataRefList) < 2:
240  raise RuntimeError("Insufficient inputs to combine.")
241 
242  # setup necessary objects
243  dataRef = dataRefList[0]
244 
245  detNum = dataRef.dataId[self.config.ccdKey]
246  camera = dataRef.get('camera')
247  detector = camera[dataRef.dataId[self.config.ccdKey]]
248 
249  amps = detector.getAmplifiers()
250  ampNames = [amp.getName() for amp in amps]
251  datasetPtc = PhotonTransferCurveDataset(ampNames, self.config.ptcFitType)
252 
253  # Get the pairs of flat indexed by expTime
254  expPairs = self.makePairs(dataRefList)
255  expIds = []
256  for (exp1, exp2) in expPairs.values():
257  id1 = exp1.getInfo().getVisitInfo().getExposureId()
258  id2 = exp2.getInfo().getVisitInfo().getExposureId()
259  expIds.append((id1, id2))
260  self.log.info(f"Measuring PTC using {expIds} exposures for detector {detector.getId()}")
261 
262  # get photodiode data early so that logic can be put in to only use the
263  # data if all files are found, as partial corrections are not possible
264  # or at least require significant logic to deal with
265  if self.config.doPhotodiode:
266  for (expId1, expId2) in expIds:
267  charges = [-1, -1] # necessary to have a not-found value to keep lists in step
268  for i, expId in enumerate([expId1, expId2]):
269  # //1000 is a Gen2 only hack, working around the fact an
270  # exposure's ID is not the same as the expId in the
271  # registry. Currently expId is concatenated with the
272  # zero-padded detector ID. This will all go away in Gen3.
273  dataRef.dataId['expId'] = expId//1000
274  if self.config.photodiodeDataPath:
275  photodiodeData = getBOTphotodiodeData(dataRef, self.config.photodiodeDataPath)
276  else:
277  photodiodeData = getBOTphotodiodeData(dataRef)
278  if photodiodeData: # default path stored in function def to keep task clean
279  charges[i] = photodiodeData.getCharge()
280  else:
281  # full expId (not //1000) here, as that encodes the
282  # the detector number as so is fully qualifying
283  self.log.warn(f"No photodiode data found for {expId}")
284 
285  for ampName in ampNames:
286  datasetPtc.photoCharge[ampName].append((charges[0], charges[1]))
287  else:
288  # Can't be an empty list, as initialized, because astropy.Table won't allow it
289  # when saving as fits
290  for ampName in ampNames:
291  datasetPtc.photoCharge[ampName] = np.repeat(np.nan, len(expIds))
292 
293  for ampName in ampNames:
294  datasetPtc.inputExpIdPairs[ampName] = expIds
295 
296  tupleRecords = []
297  allTags = []
298  for expTime, (exp1, exp2) in expPairs.items():
299  expId1 = exp1.getInfo().getVisitInfo().getExposureId()
300  expId2 = exp2.getInfo().getVisitInfo().getExposureId()
301  tupleRows = []
302  nAmpsNan = 0
303  for ampNumber, amp in enumerate(detector):
304  ampName = amp.getName()
305  # covAstier: (i, j, var (cov[0,0]), cov, npix)
306  doRealSpace = self.config.covAstierRealSpace
307  muDiff, varDiff, covAstier = self.measureMeanVarCov(exp1, exp2, region=amp.getBBox(),
308  covAstierRealSpace=doRealSpace)
309  datasetPtc.rawExpTimes[ampName].append(expTime)
310  datasetPtc.rawMeans[ampName].append(muDiff)
311  datasetPtc.rawVars[ampName].append(varDiff)
312 
313  if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None):
314  msg = (f"NaN mean or var, or None cov in amp {ampName} in exposure pair {expId1},"
315  f" {expId2} of detector {detNum}.")
316  self.log.warn(msg)
317  nAmpsNan += 1
318  continue
319  tags = ['mu', 'i', 'j', 'var', 'cov', 'npix', 'ext', 'expTime', 'ampName']
320  if (muDiff <= self.config.minMeanSignal) or (muDiff >= self.config.maxMeanSignal):
321  continue
322 
323  tupleRows += [(muDiff, ) + covRow + (ampNumber, expTime, ampName) for covRow in covAstier]
324  if nAmpsNan == len(ampNames):
325  msg = f"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}."
326  self.log.warn(msg)
327  continue
328  allTags += tags
329  tupleRecords += tupleRows
330  covariancesWithTags = np.core.records.fromrecords(tupleRecords, names=allTags)
331 
332  if self.config.ptcFitType in ["FULLCOVARIANCE", ]:
333  # Calculate covariances and fit them, including the PTC, to Astier+19 full model (Eq. 20)
334  datasetPtc = self.fitCovariancesAstier(datasetPtc, covariancesWithTags)
335  elif self.config.ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]:
336  # Fit the PTC to a polynomial or to Astier+19 exponential approximation (Eq. 16)
337  # Fill up PhotonTransferCurveDataset object.
338  datasetPtc = self.fitPtc(datasetPtc, self.config.ptcFitType)
339 
340  detName = detector.getName()
341  now = datetime.datetime.utcnow()
342  calibDate = now.strftime("%Y-%m-%d")
343  butler = dataRef.getButler()
344 
345  datasetPtc.updateMetadata(setDate=True, camera=camera, detector=detector)
346 
347  # Fit a poynomial to calculate non-linearity and persist linearizer.
348  if self.config.doCreateLinearizer:
349  # Fit (non)linearity of signal vs time curve.
350  # Fill up PhotonTransferCurveDataset object.
351  # Fill up array for LUT linearizer (tableArray).
352  # Produce coefficients for Polynomial and Squared linearizers.
353  # Build linearizer objects.
354  dimensions = {'camera': camera.getName(), 'detector': detector.getId()}
355  linearityResults = self.linearity.run(datasetPtc, camera, dimensions)
356  linearizer = linearityResults.outputLinearizer
357 
358  self.log.info("Writing linearizer:")
359  butler.put(linearizer, datasetType='Linearizer', dataId={'detector': detNum,
360  'detectorName': detName, 'calibDate': calibDate})
361 
362  self.log.info(f"Writing PTC data.")
363  butler.put(datasetPtc, datasetType='photonTransferCurveDataset', dataId={'detector': detNum,
364  'detectorName': detName, 'calibDate': calibDate})
365 
366  return pipeBase.Struct(exitStatus=0)
367 
368  def makePairs(self, dataRefList):
369  """Produce a list of flat pairs indexed by exposure time.
370 
371  Parameters
372  ----------
373  dataRefList : `list` [`lsst.daf.peristence.ButlerDataRef`]
374  Data references for exposures for detectors to process.
375 
376  Return
377  ------
378  flatPairs : `dict` [`float`, `lsst.afw.image.exposure.exposure.ExposureF`]
379  Dictionary that groups flat-field exposures that have the same exposure time (seconds).
380 
381  Notes
382  -----
383  We use the difference of one pair of flat-field images taken at the same exposure time when
384  calculating the PTC to reduce Fixed Pattern Noise. If there are > 2 flat-field images with the
385  same exposure time, the first two are kept and the rest discarded.
386  """
387 
388  # Organize exposures by observation date.
389  expDict = {}
390  for dataRef in dataRefList:
391  try:
392  tempFlat = dataRef.get("postISRCCD")
393  except RuntimeError:
394  self.log.warn("postISR exposure could not be retrieved. Ignoring flat.")
395  continue
396  expDate = tempFlat.getInfo().getVisitInfo().getDate().get()
397  expDict.setdefault(expDate, tempFlat)
398  sortedExps = {k: expDict[k] for k in sorted(expDict)}
399 
400  flatPairs = {}
401  for exp in sortedExps:
402  tempFlat = sortedExps[exp]
403  expTime = tempFlat.getInfo().getVisitInfo().getExposureTime()
404  listAtExpTime = flatPairs.setdefault(expTime, [])
405  if len(listAtExpTime) >= 2:
406  self.log.warn(f"Already found 2 exposures at expTime {expTime}. "
407  f"Ignoring exposure {tempFlat.getInfo().getVisitInfo().getExposureId()}")
408  else:
409  listAtExpTime.append(tempFlat)
410 
411  keysToDrop = []
412  for (key, value) in flatPairs.items():
413  if len(value) < 2:
414  keysToDrop.append(key)
415 
416  if len(keysToDrop):
417  for key in keysToDrop:
418  self.log.warn(f"Only one exposure found at expTime {key}. Dropping exposure "
419  f"{flatPairs[key][0].getInfo().getVisitInfo().getExposureId()}.")
420  flatPairs.pop(key)
421  return flatPairs
422 
423  def fitCovariancesAstier(self, dataset, covariancesWithTagsArray):
424  """Fit measured flat covariances to full model in Astier+19.
425 
426  Parameters
427  ----------
428  dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
429  The dataset containing information such as the means, variances and exposure times.
430 
431  covariancesWithTagsArray : `numpy.recarray`
432  Tuple with at least (mu, cov, var, i, j, npix), where:
433  mu : 0.5*(m1 + m2), where:
434  mu1: mean value of flat1
435  mu2: mean value of flat2
436  cov: covariance value at lag(i, j)
437  var: variance(covariance value at lag(0, 0))
438  i: lag dimension
439  j: lag dimension
440  npix: number of pixels used for covariance calculation.
441 
442  Returns
443  -------
444  dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
445  This is the same dataset as the input paramter, however, it has been modified
446  to include information such as the fit vectors and the fit parameters. See
447  the class `PhotonTransferCurveDatase`.
448  """
449 
450  covFits, covFitsNoB = fitData(covariancesWithTagsArray, maxMu=self.config.maxMeanSignal,
451  r=self.config.maximumRangeCovariancesAstier,
452  nSigmaFullFit=self.config.sigmaClipFullFitCovariancesAstier,
453  maxIterFullFit=self.config.maxIterFullFitCovariancesAstier)
454 
455  dataset = self.getOutputPtcDataCovAstier(dataset, covFits, covFitsNoB)
456 
457  return dataset
458 
459  def getOutputPtcDataCovAstier(self, dataset, covFits, covFitsNoB):
460  """Get output data for PhotonTransferCurveCovAstierDataset from CovFit objects.
461 
462  Parameters
463  ----------
464  dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
465  The dataset containing information such as the means, variances and exposure times.
466 
467  covFits: `dict`
468  Dictionary of CovFit objects, with amp names as keys.
469 
470  covFitsNoB : `dict`
471  Dictionary of CovFit objects, with amp names as keys, and 'b=0' in Eq. 20 of Astier+19.
472 
473  Returns
474  -------
475  dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
476  This is the same dataset as the input paramter, however, it has been modified
477  to include extra information such as the mask 1D array, gains, reoudout noise, measured signal,
478  measured variance, modeled variance, a, and b coefficient matrices (see Astier+19) per amplifier.
479  See the class `PhotonTransferCurveDatase`.
480  """
481  assert(len(covFits) == len(covFitsNoB))
482 
483  for i, amp in enumerate(dataset.ampNames):
484  lenInputTimes = len(dataset.rawExpTimes[amp])
485  # Not used when ptcFitType is 'FULLCOVARIANCE'
486  dataset.ptcFitPars[amp] = np.nan
487  dataset.ptcFitParsError[amp] = np.nan
488  dataset.ptcFitChiSq[amp] = np.nan
489  if amp in covFits:
490  fit = covFits[amp]
491  fitNoB = covFitsNoB[amp]
492  # Save full covariances, covariances models, and their weights
493  dataset.covariances[amp] = fit.cov
494  dataset.covariancesModel[amp] = fit.evalCovModel()
495  dataset.covariancesSqrtWeights[amp] = fit.sqrtW
496  dataset.aMatrix[amp] = fit.getA()
497  dataset.bMatrix[amp] = fit.getB()
498  dataset.covariancesNoB[amp] = fitNoB.cov
499  dataset.covariancesModelNoB[amp] = fitNoB.evalCovModel()
500  dataset.covariancesSqrtWeightsNoB[amp] = fitNoB.sqrtW
501  dataset.aMatrixNoB[amp] = fitNoB.getA()
502 
503  (meanVecFinal, varVecFinal, varVecModel,
504  wc, varMask) = fit.getFitData(0, 0, divideByMu=False, returnMasked=True)
505  gain = fit.getGain()
506  dataset.expIdMask[amp] = varMask
507  dataset.gain[amp] = gain
508  dataset.gainErr[amp] = fit.getGainErr()
509  dataset.noise[amp] = np.sqrt(fit.getRon())
510  dataset.noiseErr[amp] = fit.getRonErr()
511 
512  padLength = lenInputTimes - len(varVecFinal)
513  dataset.finalVars[amp] = np.pad(varVecFinal/(gain**2), (0, padLength), 'constant',
514  constant_values=np.nan)
515  dataset.finalModelVars[amp] = np.pad(varVecModel/(gain**2), (0, padLength), 'constant',
516  constant_values=np.nan)
517  dataset.finalMeans[amp] = np.pad(meanVecFinal/gain, (0, padLength), 'constant',
518  constant_values=np.nan)
519  else:
520  # Bad amp
521  # Entries need to have proper dimensions so read/write with astropy.Table works.
522  matrixSide = self.config.maximumRangeCovariancesAstier
523  nanMatrix = np.full((matrixSide, matrixSide), np.nan)
524  listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan)
525 
526  dataset.covariances[amp] = listNanMatrix
527  dataset.covariancesModel[amp] = listNanMatrix
528  dataset.covariancesSqrtWeights[amp] = listNanMatrix
529  dataset.aMatrix[amp] = nanMatrix
530  dataset.bMatrix[amp] = nanMatrix
531  dataset.covariancesNoB[amp] = listNanMatrix
532  dataset.covariancesModelNoB[amp] = listNanMatrix
533  dataset.covariancesSqrtWeightsNoB[amp] = listNanMatrix
534  dataset.aMatrixNoB[amp] = nanMatrix
535 
536  dataset.expIdMask[amp] = np.repeat(np.nan, lenInputTimes)
537  dataset.gain[amp] = np.nan
538  dataset.gainErr[amp] = np.nan
539  dataset.noise[amp] = np.nan
540  dataset.noiseErr[amp] = np.nan
541  dataset.finalVars[amp] = np.repeat(np.nan, lenInputTimes)
542  dataset.finalModelVars[amp] = np.repeat(np.nan, lenInputTimes)
543  dataset.finalMeans[amp] = np.repeat(np.nan, lenInputTimes)
544 
545  return dataset
546 
547  def measureMeanVarCov(self, exposure1, exposure2, region=None, covAstierRealSpace=False):
548  """Calculate the mean of each of two exposures and the variance and covariance of their difference.
549 
550  The variance is calculated via afwMath, and the covariance via the methods in Astier+19 (appendix A).
551  In theory, var = covariance[0,0]. This should be validated, and in the future, we may decide to just
552  keep one (covariance).
553 
554  Parameters
555  ----------
556  exposure1 : `lsst.afw.image.exposure.exposure.ExposureF`
557  First exposure of flat field pair.
558 
559  exposure2 : `lsst.afw.image.exposure.exposure.ExposureF`
560  Second exposure of flat field pair.
561 
562  region : `lsst.geom.Box2I`, optional
563  Region of each exposure where to perform the calculations (e.g, an amplifier).
564 
565  covAstierRealSpace : `bool`, optional
566  Should the covariannces in Astier+19 be calculated in real space or via FFT?
567  See Appendix A of Astier+19.
568 
569  Returns
570  -------
571  mu : `float` or `NaN`
572  0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means of the regions in
573  both exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
574 
575  varDiff : `float` or `NaN`
576  Half of the clipped variance of the difference of the regions inthe two input
577  exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
578 
579  covDiffAstier : `list` or `NaN`
580  List with tuples of the form (dx, dy, var, cov, npix), where:
581  dx : `int`
582  Lag in x
583  dy : `int`
584  Lag in y
585  var : `float`
586  Variance at (dx, dy).
587  cov : `float`
588  Covariance at (dx, dy).
589  nPix : `int`
590  Number of pixel pairs used to evaluate var and cov.
591  If either mu1 or m2 are NaN's, the returned value is NaN.
592  """
593 
594  if region is not None:
595  im1Area = exposure1.maskedImage[region]
596  im2Area = exposure2.maskedImage[region]
597  else:
598  im1Area = exposure1.maskedImage
599  im2Area = exposure2.maskedImage
600 
601  if self.config.binSize > 1:
602  im1Area = afwMath.binImage(im1Area, self.config.binSize)
603  im2Area = afwMath.binImage(im2Area, self.config.binSize)
604 
605  im1MaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList)
606  im1StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
607  self.config.nIterSigmaClipPtc,
608  im1MaskVal)
609  im1StatsCtrl.setNanSafe(True)
610  im1StatsCtrl.setAndMask(im1MaskVal)
611 
612  im2MaskVal = exposure2.getMask().getPlaneBitMask(self.config.maskNameList)
613  im2StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
614  self.config.nIterSigmaClipPtc,
615  im2MaskVal)
616  im2StatsCtrl.setNanSafe(True)
617  im2StatsCtrl.setAndMask(im2MaskVal)
618 
619  # Clipped mean of images; then average of mean.
620  mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, im1StatsCtrl).getValue()
621  mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, im2StatsCtrl).getValue()
622  if np.isnan(mu1) or np.isnan(mu2):
623  return np.nan, np.nan, None
624  mu = 0.5*(mu1 + mu2)
625 
626  # Take difference of pairs
627  # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2))
628  temp = im2Area.clone()
629  temp *= mu1
630  diffIm = im1Area.clone()
631  diffIm *= mu2
632  diffIm -= temp
633  diffIm /= mu
634 
635  diffImMaskVal = diffIm.getMask().getPlaneBitMask(self.config.maskNameList)
636  diffImStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
637  self.config.nIterSigmaClipPtc,
638  diffImMaskVal)
639  diffImStatsCtrl.setNanSafe(True)
640  diffImStatsCtrl.setAndMask(diffImMaskVal)
641 
642  varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, diffImStatsCtrl).getValue())
643 
644  # Get the mask and identify good pixels as '1', and the rest as '0'.
645  w1 = np.where(im1Area.getMask().getArray() == 0, 1, 0)
646  w2 = np.where(im2Area.getMask().getArray() == 0, 1, 0)
647 
648  w12 = w1*w2
649  wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0)
650  w = w12*wDiff
651 
652  maxRangeCov = self.config.maximumRangeCovariancesAstier
653  if covAstierRealSpace:
654  covDiffAstier = computeCovDirect(diffIm.getImage().getArray(), w, maxRangeCov)
655  else:
656  shapeDiff = diffIm.getImage().getArray().shape
657  fftShape = (fftSize(shapeDiff[0] + maxRangeCov), fftSize(shapeDiff[1]+maxRangeCov))
658  c = CovFft(diffIm.getImage().getArray(), w, fftShape, maxRangeCov)
659  covDiffAstier = c.reportCovFft(maxRangeCov)
660 
661  return mu, varDiff, covDiffAstier
662 
663  def computeCovDirect(self, diffImage, weightImage, maxRange):
664  """Compute covariances of diffImage in real space.
665 
666  For lags larger than ~25, it is slower than the FFT way.
667  Taken from https://github.com/PierreAstier/bfptc/
668 
669  Parameters
670  ----------
671  diffImage : `numpy.array`
672  Image to compute the covariance of.
673 
674  weightImage : `numpy.array`
675  Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
676 
677  maxRange : `int`
678  Last index of the covariance to be computed.
679 
680  Returns
681  -------
682  outList : `list`
683  List with tuples of the form (dx, dy, var, cov, npix), where:
684  dx : `int`
685  Lag in x
686  dy : `int`
687  Lag in y
688  var : `float`
689  Variance at (dx, dy).
690  cov : `float`
691  Covariance at (dx, dy).
692  nPix : `int`
693  Number of pixel pairs used to evaluate var and cov.
694  """
695  outList = []
696  var = 0
697  # (dy,dx) = (0,0) has to be first
698  for dy in range(maxRange + 1):
699  for dx in range(0, maxRange + 1):
700  if (dx*dy > 0):
701  cov1, nPix1 = self.covDirectValue(diffImage, weightImage, dx, dy)
702  cov2, nPix2 = self.covDirectValue(diffImage, weightImage, dx, -dy)
703  cov = 0.5*(cov1 + cov2)
704  nPix = nPix1 + nPix2
705  else:
706  cov, nPix = self.covDirectValue(diffImage, weightImage, dx, dy)
707  if (dx == 0 and dy == 0):
708  var = cov
709  outList.append((dx, dy, var, cov, nPix))
710 
711  return outList
712 
713  def covDirectValue(self, diffImage, weightImage, dx, dy):
714  """Compute covariances of diffImage in real space at lag (dx, dy).
715 
716  Taken from https://github.com/PierreAstier/bfptc/ (c.f., appendix of Astier+19).
717 
718  Parameters
719  ----------
720  diffImage : `numpy.array`
721  Image to compute the covariance of.
722 
723  weightImage : `numpy.array`
724  Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
725 
726  dx : `int`
727  Lag in x.
728 
729  dy : `int`
730  Lag in y.
731 
732  Returns
733  -------
734  cov : `float`
735  Covariance at (dx, dy)
736 
737  nPix : `int`
738  Number of pixel pairs used to evaluate var and cov.
739  """
740  (nCols, nRows) = diffImage.shape
741  # switching both signs does not change anything:
742  # it just swaps im1 and im2 below
743  if (dx < 0):
744  (dx, dy) = (-dx, -dy)
745  # now, we have dx >0. We have to distinguish two cases
746  # depending on the sign of dy
747  if dy >= 0:
748  im1 = diffImage[dy:, dx:]
749  w1 = weightImage[dy:, dx:]
750  im2 = diffImage[:nCols - dy, :nRows - dx]
751  w2 = weightImage[:nCols - dy, :nRows - dx]
752  else:
753  im1 = diffImage[:nCols + dy, dx:]
754  w1 = weightImage[:nCols + dy, dx:]
755  im2 = diffImage[-dy:, :nRows - dx]
756  w2 = weightImage[-dy:, :nRows - dx]
757  # use the same mask for all 3 calculations
758  wAll = w1*w2
759  # do not use mean() because weightImage=0 pixels would then count
760  nPix = wAll.sum()
761  im1TimesW = im1*wAll
762  s1 = im1TimesW.sum()/nPix
763  s2 = (im2*wAll).sum()/nPix
764  p = (im1TimesW*im2).sum()/nPix
765  cov = p - s1*s2
766 
767  return cov, nPix
768 
769  @staticmethod
770  def _initialParsForPolynomial(order):
771  assert(order >= 2)
772  pars = np.zeros(order, dtype=np.float)
773  pars[0] = 10
774  pars[1] = 1
775  pars[2:] = 0.0001
776  return pars
777 
778  @staticmethod
779  def _boundsForPolynomial(initialPars):
780  lowers = [np.NINF for p in initialPars]
781  uppers = [np.inf for p in initialPars]
782  lowers[1] = 0 # no negative gains
783  return (lowers, uppers)
784 
785  @staticmethod
786  def _boundsForAstier(initialPars):
787  lowers = [np.NINF for p in initialPars]
788  uppers = [np.inf for p in initialPars]
789  return (lowers, uppers)
790 
791  @staticmethod
792  def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative):
793  """Return a boolean array to mask bad points.
794 
795  A linear function has a constant ratio, so find the median
796  value of the ratios, and exclude the points that deviate
797  from that by more than a factor of maxDeviationPositive/negative.
798  Asymmetric deviations are supported as we expect the PTC to turn
799  down as the flux increases, but sometimes it anomalously turns
800  upwards just before turning over, which ruins the fits, so it
801  is wise to be stricter about restricting positive outliers than
802  negative ones.
803 
804  Too high and points that are so bad that fit will fail will be included
805  Too low and the non-linear points will be excluded, biasing the NL fit."""
806  ratios = [b/a for (a, b) in zip(means, variances)]
807  medianRatio = np.median(ratios)
808  ratioDeviations = [(r/medianRatio)-1 for r in ratios]
809 
810  # so that it doesn't matter if the deviation is expressed as positive or negative
811  maxDeviationPositive = abs(maxDeviationPositive)
812  maxDeviationNegative = -1. * abs(maxDeviationNegative)
813 
814  goodPoints = np.array([True if (r < maxDeviationPositive and r > maxDeviationNegative)
815  else False for r in ratioDeviations])
816  return goodPoints
817 
818  def _makeZeroSafe(self, array, warn=True, substituteValue=1e-9):
819  """"""
820  nBad = Counter(array)[0]
821  if nBad == 0:
822  return array
823 
824  if warn:
825  msg = f"Found {nBad} zeros in array at elements {[x for x in np.where(array==0)[0]]}"
826  self.log.warn(msg)
827 
828  array[array == 0] = substituteValue
829  return array
830 
831  def fitPtc(self, dataset, ptcFitType):
832  """Fit the photon transfer curve to a polynimial or to Astier+19 approximation.
833 
834  Fit the photon transfer curve with either a polynomial of the order
835  specified in the task config, or using the Astier approximation.
836 
837  Sigma clipping is performed iteratively for the fit, as well as an
838  initial clipping of data points that are more than
839  config.initialNonLinearityExclusionThreshold away from lying on a
840  straight line. This other step is necessary because the photon transfer
841  curve turns over catastrophically at very high flux (because saturation
842  drops the variance to ~0) and these far outliers cause the initial fit
843  to fail, meaning the sigma cannot be calculated to perform the
844  sigma-clipping.
845 
846  Parameters
847  ----------
848  dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
849  The dataset containing the means, variances and exposure times
850 
851  ptcFitType : `str`
852  Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
853  'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC
854 
855  Returns
856  -------
857  dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
858  This is the same dataset as the input paramter, however, it has been modified
859  to include information such as the fit vectors and the fit parameters. See
860  the class `PhotonTransferCurveDatase`.
861  """
862 
863  matrixSide = self.config.maximumRangeCovariancesAstier
864  nanMatrix = np.empty((matrixSide, matrixSide))
865  nanMatrix[:] = np.nan
866 
867  for amp in dataset.ampNames:
868  lenInputTimes = len(dataset.rawExpTimes[amp])
869  listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide))
870  listNanMatrix[:] = np.nan
871 
872  dataset.covariances[amp] = listNanMatrix
873  dataset.covariancesModel[amp] = listNanMatrix
874  dataset.covariancesSqrtWeights[amp] = listNanMatrix
875  dataset.aMatrix[amp] = nanMatrix
876  dataset.bMatrix[amp] = nanMatrix
877  dataset.covariancesNoB[amp] = listNanMatrix
878  dataset.covariancesModelNoB[amp] = listNanMatrix
879  dataset.covariancesSqrtWeightsNoB[amp] = listNanMatrix
880  dataset.aMatrixNoB[amp] = nanMatrix
881 
882  def errFunc(p, x, y):
883  return ptcFunc(p, x) - y
884 
885  sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
886  maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
887 
888  for i, ampName in enumerate(dataset.ampNames):
889  timeVecOriginal = np.array(dataset.rawExpTimes[ampName])
890  meanVecOriginal = np.array(dataset.rawMeans[ampName])
891  varVecOriginal = np.array(dataset.rawVars[ampName])
892  varVecOriginal = self._makeZeroSafe(varVecOriginal)
893 
894  mask = ((meanVecOriginal >= self.config.minMeanSignal) &
895  (meanVecOriginal <= self.config.maxMeanSignal))
896 
897  goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal,
898  self.config.initialNonLinearityExclusionThresholdPositive,
899  self.config.initialNonLinearityExclusionThresholdNegative)
900  if not (mask.any() and goodPoints.any()):
901  msg = (f"\nSERIOUS: All points in either mask: {mask} or goodPoints: {goodPoints} are bad."
902  f"Setting {ampName} to BAD.")
903  self.log.warn(msg)
904  # The first and second parameters of initial fit are discarded (bias and gain)
905  # for the final NL coefficients
906  dataset.badAmps.append(ampName)
907  dataset.expIdMask[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
908  dataset.gain[ampName] = np.nan
909  dataset.gainErr[ampName] = np.nan
910  dataset.noise[ampName] = np.nan
911  dataset.noiseErr[ampName] = np.nan
912  dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if
913  ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3))
914  dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if
915  ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3))
916  dataset.ptcFitChiSq[ampName] = np.nan
917  dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
918  dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
919  dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
920  continue
921 
922  mask = mask & goodPoints
923 
924  if ptcFitType == 'EXPAPPROXIMATION':
925  ptcFunc = funcAstier
926  parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noise
927  bounds = self._boundsForAstier(parsIniPtc)
928  if ptcFitType == 'POLYNOMIAL':
929  ptcFunc = funcPolynomial
930  parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1)
931  bounds = self._boundsForPolynomial(parsIniPtc)
932 
933  # Before bootstrap fit, do an iterative fit to get rid of outliers
934  count = 1
935  while count <= maxIterationsPtcOutliers:
936  # Note that application of the mask actually shrinks the array
937  # to size rather than setting elements to zero (as we want) so
938  # always update mask itself and re-apply to the original data
939  meanTempVec = meanVecOriginal[mask]
940  varTempVec = varVecOriginal[mask]
941  res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec))
942  pars = res.x
943 
944  # change this to the original from the temp because the masks are ANDed
945  # meaning once a point is masked it's always masked, and the masks must
946  # always be the same length for broadcasting
947  sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
948  newMask = np.array([True if np.abs(r) < sigmaCutPtcOutliers else False for r in sigResids])
949  mask = mask & newMask
950  if not (mask.any() and newMask.any()):
951  msg = (f"\nSERIOUS: All points in either mask: {mask} or newMask: {newMask} are bad. "
952  f"Setting {ampName} to BAD.")
953  self.log.warn(msg)
954  # The first and second parameters of initial fit are discarded (bias and gain)
955  # for the final NL coefficients
956  dataset.badAmps.append(ampName)
957  dataset.expIdMask[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
958  dataset.gain[ampName] = np.nan
959  dataset.gainErr[ampName] = np.nan
960  dataset.noise[ampName] = np.nan
961  dataset.noiseErr[ampName] = np.nan
962  dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
963  if ptcFitType in ["POLYNOMIAL", ] else
964  np.repeat(np.nan, 3))
965  dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
966  if ptcFitType in ["POLYNOMIAL", ] else
967  np.repeat(np.nan, 3))
968  dataset.ptcFitChiSq[ampName] = np.nan
969  dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
970  dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
971  dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
972  break
973  nDroppedTotal = Counter(mask)[False]
974  self.log.debug(f"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}")
975  count += 1
976  # objects should never shrink
977  assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal))
978 
979  if not (mask.any() and newMask.any()):
980  continue
981  dataset.expIdMask[ampName] = mask # store the final mask
982  parsIniPtc = pars
983  meanVecFinal = meanVecOriginal[mask]
984  varVecFinal = varVecOriginal[mask]
985 
986  if Counter(mask)[False] > 0:
987  self.log.info((f"Number of points discarded in PTC of amplifier {ampName}:" +
988  f" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))
989 
990  if (len(meanVecFinal) < len(parsIniPtc)):
991  msg = (f"\nSERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of"
992  f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
993  self.log.warn(msg)
994  # The first and second parameters of initial fit are discarded (bias and gain)
995  # for the final NL coefficients
996  dataset.badAmps.append(ampName)
997  dataset.expIdMask[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
998  dataset.gain[ampName] = np.nan
999  dataset.gainErr[ampName] = np.nan
1000  dataset.noise[ampName] = np.nan
1001  dataset.noiseErr[ampName] = np.nan
1002  dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if
1003  ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3))
1004  dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if
1005  ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3))
1006  dataset.ptcFitChiSq[ampName] = np.nan
1007  dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1008  dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1009  dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1010  continue
1011 
1012  # Fit the PTC
1013  if self.config.doFitBootstrap:
1014  parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal,
1015  varVecFinal, ptcFunc,
1016  weightsY=1./np.sqrt(varVecFinal))
1017  else:
1018  parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal,
1019  varVecFinal, ptcFunc,
1020  weightsY=1./np.sqrt(varVecFinal))
1021  dataset.ptcFitPars[ampName] = parsFit
1022  dataset.ptcFitParsError[ampName] = parsFitErr
1023  dataset.ptcFitChiSq[ampName] = reducedChiSqPtc
1024  # Masked variances (measured and modeled) and means. Need to pad the array so astropy.Table does
1025  # not crash (the mask may vary per amp).
1026  padLength = len(dataset.rawExpTimes[ampName]) - len(varVecFinal)
1027  dataset.finalVars[ampName] = np.pad(varVecFinal, (0, padLength), 'constant',
1028  constant_values=np.nan)
1029  dataset.finalModelVars[ampName] = np.pad(ptcFunc(parsFit, meanVecFinal), (0, padLength),
1030  'constant', constant_values=np.nan)
1031  dataset.finalMeans[ampName] = np.pad(meanVecFinal, (0, padLength), 'constant',
1032  constant_values=np.nan)
1033 
1034  if ptcFitType == 'EXPAPPROXIMATION':
1035  ptcGain = parsFit[1]
1036  ptcGainErr = parsFitErr[1]
1037  ptcNoise = np.sqrt(np.fabs(parsFit[2]))
1038  ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
1039  if ptcFitType == 'POLYNOMIAL':
1040  ptcGain = 1./parsFit[1]
1041  ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
1042  ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
1043  ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
1044  dataset.gain[ampName] = ptcGain
1045  dataset.gainErr[ampName] = ptcGainErr
1046  dataset.noise[ampName] = ptcNoise
1047  dataset.noiseErr[ampName] = ptcNoiseErr
1048  if not len(dataset.ptcFitType) == 0:
1049  dataset.ptcFitType = ptcFitType
1050  if len(dataset.badAmps) == 0:
1051  dataset.badAmps = np.repeat(np.nan, len(list(dataset.rawExpTimes.values())[0]))
1052 
1053  return dataset
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.runDataRef
def runDataRef(self, dataRefList)
Definition: ptc.py:227
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.fitCovariancesAstier
def fitCovariancesAstier(self, dataset, covariancesWithTagsArray)
Definition: ptc.py:423
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.fitPtc
def fitPtc(self, dataset, ptcFitType)
Definition: ptc.py:831
lsst.cp.pipe.astierCovPtcUtils.fftSize
def fftSize(s)
Definition: astierCovPtcUtils.py:122
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask._boundsForAstier
def _boundsForAstier(initialPars)
Definition: ptc.py:786
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.measureMeanVarCov
def measureMeanVarCov(self, exposure1, exposure2, region=None, covAstierRealSpace=False)
Definition: ptc.py:547
lsst.cp.pipe.astierCovPtcUtils.fitData
def fitData(tupleName, maxMu=1e9, r=8, nSigmaFullFit=5.5, maxIterFullFit=3)
Definition: astierCovPtcUtils.py:323
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.__init__
def __init__(self, *args, **kwargs)
Definition: ptc.py:219
lsst.cp.pipe.photodiode.getBOTphotodiodeData
def getBOTphotodiodeData(dataRef, dataPath='/project/shared/BOT/_parent/raw/photodiode_data/', logger=None)
Definition: photodiode.py:30
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask._boundsForPolynomial
def _boundsForPolynomial(initialPars)
Definition: ptc.py:779
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTaskConfig
Definition: ptc.py:45
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.getOutputPtcDataCovAstier
def getOutputPtcDataCovAstier(self, dataset, covFits, covFitsNoB)
Definition: ptc.py:459
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.makePairs
def makePairs(self, dataRefList)
Definition: ptc.py:368
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.covDirectValue
def covDirectValue(self, diffImage, weightImage, dx, dy)
Definition: ptc.py:713
lsst::pex::config
lsst.cp.pipe.astierCovPtcUtils.CovFft
Definition: astierCovPtcUtils.py:28
lsst.cp.pipe.utils.fitLeastSq
def fitLeastSq(initialParams, dataX, dataY, function, weightsY=None)
Definition: utils.py:285
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask
Definition: ptc.py:180
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask._makeZeroSafe
def _makeZeroSafe(self, array, warn=True, substituteValue=1e-9)
Definition: ptc.py:818
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask._getInitialGoodPoints
def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative)
Definition: ptc.py:792
lsst::ip::isr
lsst::afw::math
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask._initialParsForPolynomial
def _initialParsForPolynomial(order)
Definition: ptc.py:770
lsst.cp.pipe.utils.fitBootstrap
def fitBootstrap(initialParams, dataX, dataY, function, weightsY=None, confidenceSigma=1.)
Definition: utils.py:352
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask.computeCovDirect
def computeCovDirect(self, diffImage, weightImage, maxRange)
Definition: ptc.py:663
lsst::pipe::base