Coverage for python/lsst/cp/pipe/makeBrighterFatterKernel.py : 9%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
22"""Calculation of brighter-fatter effect correlations and kernels."""
24__all__ = ['MakeBrighterFatterKernelTaskConfig',
25 'MakeBrighterFatterKernelTask',
26 'calcBiasCorr']
28import os
29import copy
30from scipy import stats
31import numpy as np
32import matplotlib.pyplot as plt
33# the following import is required for 3d projection
34from mpl_toolkits.mplot3d import axes3d # noqa: F401
35from matplotlib.backends.backend_pdf import PdfPages
36from dataclasses import dataclass
38import lsstDebug
39import lsst.afw.image as afwImage
40import lsst.afw.math as afwMath
41import lsst.afw.display as afwDisp
42from lsst.ip.isr import IsrTask
43import lsst.log as lsstLog
44import lsst.pex.config as pexConfig
45import lsst.pipe.base as pipeBase
46from .utils import PairedVisitListTaskRunner, checkExpLengthEqual
47import lsst.daf.persistence.butlerExceptions as butlerExceptions
48from lsst.cp.pipe.ptc import (MeasurePhotonTransferCurveTaskConfig, MeasurePhotonTransferCurveTask,
49 PhotonTransferCurveDataset)
52class MakeBrighterFatterKernelTaskConfig(pexConfig.Config):
53 """Config class for bright-fatter effect coefficient calculation."""
55 isr = pexConfig.ConfigurableField(
56 target=IsrTask,
57 doc="""Task to perform instrumental signature removal""",
58 )
59 isrMandatorySteps = pexConfig.ListField(
60 dtype=str,
61 doc="isr operations that must be performed for valid results. Raises if any of these are False",
62 default=['doAssembleCcd']
63 )
64 isrForbiddenSteps = pexConfig.ListField(
65 dtype=str,
66 doc="isr operations that must NOT be performed for valid results. Raises if any of these are True",
67 default=['doFlat', 'doFringe', 'doBrighterFatter', 'doUseOpticsTransmission',
68 'doUseFilterTransmission', 'doUseSensorTransmission', 'doUseAtmosphereTransmission']
69 )
70 isrDesirableSteps = pexConfig.ListField(
71 dtype=str,
72 doc="isr operations that it is advisable to perform, but are not mission-critical." +
73 " WARNs are logged for any of these found to be False.",
74 default=['doBias', 'doDark', 'doCrosstalk', 'doDefect', 'doLinearize']
75 )
76 doCalcGains = pexConfig.Field(
77 dtype=bool,
78 doc="Measure the per-amplifier gains (using the photon transfer curve method)?",
79 default=True,
80 )
81 doPlotPtcs = pexConfig.Field(
82 dtype=bool,
83 doc="Plot the PTCs and butler.put() them as defined by the plotBrighterFatterPtc template",
84 default=False,
85 )
86 forceZeroSum = pexConfig.Field(
87 dtype=bool,
88 doc="Force the correlation matrix to have zero sum by adjusting the (0,0) value?",
89 default=False,
90 )
91 correlationQuadraticFit = pexConfig.Field(
92 dtype=bool,
93 doc="Use a quadratic fit to find the correlations instead of simple averaging?",
94 default=False,
95 )
96 correlationModelRadius = pexConfig.Field(
97 dtype=int,
98 doc="Build a model of the correlation coefficients for radii larger than this value in pixels?",
99 default=100,
100 )
101 correlationModelSlope = pexConfig.Field(
102 dtype=float,
103 doc="Slope of the correlation model for radii larger than correlationModelRadius",
104 default=-1.35,
105 )
106 ccdKey = pexConfig.Field(
107 dtype=str,
108 doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'",
109 default='ccd',
110 )
111 minMeanSignal = pexConfig.DictField(
112 keytype=str,
113 itemtype=float,
114 doc="Minimum values (inclusive) of mean signal (in ADU) above which to consider, per amp."
115 " The same cut is applied to all amps if this dictionary is of the form"
116 " {'ALL_AMPS': value}",
117 default={'ALL_AMPS': 0.0},
118 )
119 maxMeanSignal = pexConfig.DictField(
120 keytype=str,
121 itemtype=float,
122 doc="Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp."
123 " The same cut is applied to all amps if this dictionary is of the form"
124 " {'ALL_AMPS': value}",
125 default={'ALL_AMPS': 1e6},
126 )
127 maxIterRegression = pexConfig.Field(
128 dtype=int,
129 doc="Maximum number of iterations for the regression fitter",
130 default=2
131 )
132 nSigmaClipGainCalc = pexConfig.Field(
133 dtype=int,
134 doc="Number of sigma to clip the pixel value distribution to during gain calculation",
135 default=5
136 )
137 nSigmaClipRegression = pexConfig.Field(
138 dtype=int,
139 doc="Number of sigma to clip outliers from the line of best fit to during iterative regression",
140 default=4
141 )
142 xcorrCheckRejectLevel = pexConfig.Field(
143 dtype=float,
144 doc="Sanity check level for the sum of the input cross-correlations. Arrays which " +
145 "sum to greater than this are discarded before the clipped mean is calculated.",
146 default=2.0
147 )
148 maxIterSuccessiveOverRelaxation = pexConfig.Field(
149 dtype=int,
150 doc="The maximum number of iterations allowed for the successive over-relaxation method",
151 default=10000
152 )
153 eLevelSuccessiveOverRelaxation = pexConfig.Field(
154 dtype=float,
155 doc="The target residual error for the successive over-relaxation method",
156 default=5.0e-14
157 )
158 nSigmaClipKernelGen = pexConfig.Field(
159 dtype=float,
160 doc="Number of sigma to clip to during pixel-wise clipping when generating the kernel. See " +
161 "the generateKernel docstring for more info.",
162 default=4
163 )
164 nSigmaClipXCorr = pexConfig.Field(
165 dtype=float,
166 doc="Number of sigma to clip when calculating means for the cross-correlation",
167 default=5
168 )
169 maxLag = pexConfig.Field(
170 dtype=int,
171 doc="The maximum lag (in pixels) to use when calculating the cross-correlation/kernel",
172 default=8
173 )
174 nPixBorderGainCalc = pexConfig.Field(
175 dtype=int,
176 doc="The number of border pixels to exclude when calculating the gain",
177 default=10
178 )
179 nPixBorderXCorr = pexConfig.Field(
180 dtype=int,
181 doc="The number of border pixels to exclude when calculating the cross-correlation and kernel",
182 default=10
183 )
184 biasCorr = pexConfig.Field(
185 dtype=float,
186 doc="An empirically determined correction factor, used to correct for the sigma-clipping of" +
187 " a non-Gaussian distribution. Post DM-15277, code will exist here to calculate appropriate values",
188 default=0.9241
189 )
190 backgroundBinSize = pexConfig.Field(
191 dtype=int,
192 doc="Size of the background bins",
193 default=128
194 )
195 fixPtcThroughOrigin = pexConfig.Field(
196 dtype=bool,
197 doc="Constrain the fit of the photon transfer curve to go through the origin when measuring" +
198 "the gain?",
199 default=True
200 )
201 level = pexConfig.ChoiceField(
202 doc="The level at which to calculate the brighter-fatter kernels",
203 dtype=str,
204 default="DETECTOR",
205 allowed={
206 "AMP": "Every amplifier treated separately",
207 "DETECTOR": "One kernel per detector",
208 }
209 )
210 ignoreAmpsForAveraging = pexConfig.ListField(
211 dtype=str,
212 doc="List of amp names to ignore when averaging the amplifier kernels into the detector" +
213 " kernel. Only relevant for level = AMP",
214 default=[]
215 )
216 backgroundWarnLevel = pexConfig.Field(
217 dtype=float,
218 doc="Log warnings if the mean of the fitted background is found to be above this level after " +
219 "differencing image pair.",
220 default=0.1
221 )
224class BrighterFatterKernelTaskDataIdContainer(pipeBase.DataIdContainer):
225 """A DataIdContainer for the MakeBrighterFatterKernelTask."""
227 def makeDataRefList(self, namespace):
228 """Compute refList based on idList.
230 This method must be defined as the dataset does not exist before this
231 task is run.
233 Parameters
234 ----------
235 namespace
236 Results of parsing the command-line.
238 Notes
239 -----
240 Not called if ``add_id_argument`` called
241 with ``doMakeDataRefList=False``.
242 Note that this is almost a copy-and-paste of the vanilla implementation,
243 but without checking if the datasets already exist,
244 as this task exists to make them.
245 """
246 if self.datasetType is None:
247 raise RuntimeError("Must call setDatasetType first")
248 butler = namespace.butler
249 for dataId in self.idList:
250 refList = list(butler.subset(datasetType=self.datasetType, level=self.level, dataId=dataId))
251 # exclude nonexistent data
252 # this is a recursive test, e.g. for the sake of "raw" data
253 if not refList:
254 namespace.log.warn("No data found for dataId=%s", dataId)
255 continue
256 self.refList += refList
259class BrighterFatterKernel:
260 """A simple class to hold the kernel(s) generated and the intermediate
261 data products.
263 kernel.ampwiseKernels are the kernels for each amplifier in the detector,
264 as generated by having LEVEL == 'AMP'
266 kernel.detectorKernel is the kernel generated for the detector as a whole,
267 as generated by having LEVEL == 'DETECTOR'
269 kernel.detectorKernelFromAmpKernels is the kernel for the detector,
270 generated by averaging together the amps in the detector
272 The originalLevel is the level for which the kernel(s) were generated,
273 i.e. the level at which the task was originally run.
274 """
276 def __init__(self, originalLevel, **kwargs):
277 self.__dict__["originalLevel"] = originalLevel
278 self.__dict__["ampwiseKernels"] = {}
279 self.__dict__["detectorKernel"] = {}
280 self.__dict__["detectorKernelFromAmpKernels"] = {}
281 self.__dict__["means"] = []
282 self.__dict__["rawMeans"] = []
283 self.__dict__["rawXcorrs"] = []
284 self.__dict__["xCorrs"] = []
285 self.__dict__["meanXcorrs"] = []
286 self.__dict__["gain"] = None # will be a dict keyed by amp if set
287 self.__dict__["gainErr"] = None # will be a dict keyed by amp if set
288 self.__dict__["noise"] = None # will be a dict keyed by amp if set
289 self.__dict__["noiseErr"] = None # will be a dict keyed by amp if set
291 for key, value in kwargs.items():
292 if hasattr(self, key):
293 setattr(self, key, value)
295 def __setattr__(self, attribute, value):
296 """Protect class attributes"""
297 if attribute not in self.__dict__:
298 print(f"Cannot set {attribute}")
299 else:
300 self.__dict__[attribute] = value
302 def replaceDetectorKernelWithAmpKernel(self, ampName, detectorName):
303 self.detectorKernel[detectorName] = self.ampwiseKernels[ampName]
305 def makeDetectorKernelFromAmpwiseKernels(self, detectorName, ampsToExclude=[], overwrite=False):
306 if detectorName not in self.detectorKernelFromAmpKernels.keys():
307 self.detectorKernelFromAmpKernels[detectorName] = {}
309 if self.detectorKernelFromAmpKernels[detectorName] != {} and overwrite is False:
310 raise RuntimeError('Was told to replace existing detector kernel with overwrite==False')
312 ampNames = self.ampwiseKernels.keys()
313 ampsToAverage = [amp for amp in ampNames if amp not in ampsToExclude]
314 avgKernel = np.zeros_like(self.ampwiseKernels[ampsToAverage[0]])
315 for ampName in ampsToAverage:
316 avgKernel += self.ampwiseKernels[ampName]
317 avgKernel /= len(ampsToAverage)
319 self.detectorKernelFromAmpKernels[detectorName] = avgKernel
322@dataclass
323class BrighterFatterGain:
324 """The gains and the results of the PTC fits."""
325 gains: dict
326 ptcResults: dict
329class MakeBrighterFatterKernelTask(pipeBase.CmdLineTask):
330 """Brighter-fatter effect correction-kernel calculation task.
332 A command line task for calculating the brighter-fatter correction
333 kernel from pairs of flat-field images (with the same exposure length).
335 The following operations are performed:
337 - The configurable isr task is called, which unpersists and assembles the
338 raw images, and performs the selected instrument signature removal tasks.
339 For the purpose of brighter-fatter coefficient calculation is it
340 essential that certain components of isr are *not* performed, and
341 recommended that certain others are. The task checks the selected isr
342 configuration before it is run, and if forbidden components have been
343 selected task will raise, and if recommended ones have not been selected,
344 warnings are logged.
346 - The gain of the each amplifier in the detector is calculated using
347 the photon transfer curve (PTC) method and used to correct the images
348 so that all calculations are done in units of electrons, and so that the
349 level across amplifier boundaries is continuous.
350 Outliers in the PTC are iteratively rejected
351 before fitting, with the nSigma rejection level set by
352 config.nSigmaClipRegression. Individual pixels are ignored in the input
353 images the image based on config.nSigmaClipGainCalc.
355 - Each image is then cross-correlated with the one it's paired with
356 (with the pairing defined by the --visit-pairs command line argument),
357 which is done either the whole-image to whole-image,
358 or amplifier-by-amplifier, depending on config.level.
360 - Once the cross-correlations have been calculated for each visit pair,
361 these are used to generate the correction kernel.
362 The maximum lag used, in pixels, and hence the size of the half-size
363 of the kernel generated, is given by config.maxLag,
364 i.e. a value of 10 will result in a kernel of size 2n-1 = 19x19 pixels.
365 Outlier values in these cross-correlations are rejected by using a
366 pixel-wise sigma-clipped thresholding to each cross-correlation in
367 the visit-pairs-length stack of cross-correlations.
368 The number of sigma clipped to is set by config.nSigmaClipKernelGen.
370 - Once DM-15277 has been completed, a method will exist to calculate the
371 empirical correction factor, config.biasCorr.
372 TODO: DM-15277 update this part of the docstring once the ticket is done.
373 """
375 RunnerClass = PairedVisitListTaskRunner
376 ConfigClass = MakeBrighterFatterKernelTaskConfig
377 _DefaultName = "makeBrighterFatterKernel"
379 def __init__(self, *args, **kwargs):
380 pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
381 self.makeSubtask("isr")
383 self.debug = lsstDebug.Info(__name__)
384 if self.debug.enabled:
385 self.log.info("Running with debug enabled...")
386 # If we're displaying, test it works and save displays for later.
387 # It's worth testing here as displays are flaky and sometimes
388 # can't be contacted, and given processing takes a while,
389 # it's a shame to fail late due to display issues.
390 if self.debug.display:
391 try:
392 afwDisp.setDefaultBackend(self.debug.displayBackend)
393 afwDisp.Display.delAllDisplays()
394 self.disp1 = afwDisp.Display(0, open=True)
395 self.disp2 = afwDisp.Display(1, open=True)
397 im = afwImage.ImageF(1, 1)
398 im.array[:] = [[1]]
399 self.disp1.mtv(im)
400 self.disp1.erase()
401 except NameError:
402 self.debug.display = False
403 self.log.warn('Failed to setup/connect to display! Debug display has been disabled')
405 plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too
406 self.validateIsrConfig()
407 self.config.validate()
408 self.config.freeze()
410 @classmethod
411 def _makeArgumentParser(cls):
412 """Augment argument parser for the MakeBrighterFatterKernelTask."""
413 parser = pipeBase.ArgumentParser(name=cls._DefaultName)
414 parser.add_argument("--visit-pairs", dest="visitPairs", nargs="*",
415 help="Visit pairs to use. Each pair must be of the form INT,INT e.g. 123,456")
416 parser.add_id_argument("--id", datasetType="brighterFatterKernel",
417 ContainerClass=BrighterFatterKernelTaskDataIdContainer,
418 help="The ccds to use, e.g. --id ccd=0..100")
419 return parser
421 def validateIsrConfig(self):
422 """Check that appropriate ISR settings are being used
423 for brighter-fatter kernel calculation."""
425 # How should we handle saturation/bad regions?
426 # 'doSaturationInterpolation': True
427 # 'doNanInterpAfterFlat': False
428 # 'doSaturation': True
429 # 'doSuspect': True
430 # 'doWidenSaturationTrails': True
431 # 'doSetBadRegions': True
433 configDict = self.config.isr.toDict()
435 for configParam in self.config.isrMandatorySteps:
436 if configDict[configParam] is False:
437 raise RuntimeError('Must set config.isr.%s to True '
438 'for brighter-fatter kernel calculation' % configParam)
440 for configParam in self.config.isrForbiddenSteps:
441 if configDict[configParam] is True:
442 raise RuntimeError('Must set config.isr.%s to False '
443 'for brighter-fatter kernel calculation' % configParam)
445 for configParam in self.config.isrDesirableSteps:
446 if configParam not in configDict:
447 self.log.info('Failed to find key %s in the isr config dict. You probably want ' +
448 'to set the equivalent for your obs_package to True.' % configParam)
449 continue
450 if configDict[configParam] is False:
451 self.log.warn('Found config.isr.%s set to False for brighter-fatter kernel calculation. '
452 'It is probably desirable to have this set to True' % configParam)
454 # subtask settings
455 if not self.config.isr.assembleCcd.doTrim:
456 raise RuntimeError('Must trim when assembling CCDs. Set config.isr.assembleCcd.doTrim to True')
458 @pipeBase.timeMethod
459 def runDataRef(self, dataRef, visitPairs):
460 """Run the brighter-fatter measurement task.
462 For a dataRef (which is each detector here),
463 and given a list of visit pairs, calculate the
464 brighter-fatter kernel for the detector.
466 Parameters
467 ----------
468 dataRef : `list` of `lsst.daf.persistence.ButlerDataRef`
469 dataRef for the detector for the visits to be fit.
470 visitPairs : `iterable` of `tuple` of `int`
471 Pairs of visit numbers to be processed together
472 """
473 np.random.seed(0) # used in the PTC fit bootstrap
475 # setup necessary objects
476 # NB: don't use dataRef.get('raw_detector')
477 # this currently doesn't work for composites because of the way
478 # composite objects (i.e. LSST images) are handled/constructed
479 # these need to be retrieved from the camera and dereferenced
480 # rather than accessed directly
481 detNum = dataRef.dataId[self.config.ccdKey]
482 detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
483 amps = detector.getAmplifiers()
484 ampNames = [amp.getName() for amp in amps]
486 if self.config.level == 'DETECTOR':
487 kernels = {detNum: []}
488 means = {detNum: []}
489 xcorrs = {detNum: []}
490 meanXcorrs = {detNum: []}
491 elif self.config.level == 'AMP':
492 kernels = {key: [] for key in ampNames}
493 means = {key: [] for key in ampNames}
494 xcorrs = {key: [] for key in ampNames}
495 meanXcorrs = {key: [] for key in ampNames}
496 else:
497 raise RuntimeError("Unsupported level: {}".format(self.config.level))
499 # we must be able to get the gains one way or the other, so check early
500 if not self.config.doCalcGains:
501 deleteMe = None
502 try:
503 deleteMe = dataRef.get('photonTransferCurveDataset')
504 except butlerExceptions.NoResults:
505 try:
506 deleteMe = dataRef.get('brighterFatterGain')
507 except butlerExceptions.NoResults:
508 pass
509 if not deleteMe:
510 raise RuntimeError("doCalcGains == False and gains could not be got from butler") from None
511 else:
512 del deleteMe
514 # if the level is DETECTOR we need to have the gains first so that each
515 # amp can be gain corrected in order to treat the detector as a single
516 # imaging area. However, if the level is AMP we can wait, calculate
517 # the correlations and correct for the gains afterwards
518 if self.config.level == 'DETECTOR':
519 if self.config.doCalcGains:
520 self.log.info('Computing gains for detector %s' % detNum)
521 gains, nomGains = self.estimateGains(dataRef, visitPairs)
522 dataRef.put(gains, datasetType='brighterFatterGain')
523 self.log.debug('Finished gain estimation for detector %s' % detNum)
524 else:
525 gains = dataRef.get('brighterFatterGain')
526 if not gains:
527 raise RuntimeError('Failed to retrieved gains for detector %s' % detNum)
528 self.log.info('Retrieved stored gain for detector %s' % detNum)
529 self.log.debug('Detector %s has gains %s' % (detNum, gains))
530 else: # we fake the gains as 1 for now, and correct later
531 gains = BrighterFatterGain({}, {})
532 for ampName in ampNames:
533 gains.gains[ampName] = 1.0
534 # We'll use the ptc.py code to calculate the gains, so we set this up
535 ptcConfig = MeasurePhotonTransferCurveTaskConfig()
536 ptcConfig.isrForbiddenSteps = []
537 ptcConfig.doFitBootstrap = True
538 ptcConfig.ptcFitType = 'POLYNOMIAL' # default Astier doesn't work for gain correction
539 ptcConfig.polynomialFitDegree = 3
540 ptcConfig.minMeanSignal = self.config.minMeanSignal
541 ptcConfig.maxMeanSignal = self.config.maxMeanSignal
542 ptcTask = MeasurePhotonTransferCurveTask(config=ptcConfig)
543 ptcDataset = PhotonTransferCurveDataset(ampNames)
545 # Loop over pairs of visits
546 # calculating the cross-correlations at the required level
547 for (v1, v2) in visitPairs:
548 dataRef.dataId['expId'] = v1
549 exp1 = self.isr.runDataRef(dataRef).exposure
550 dataRef.dataId['expId'] = v2
551 exp2 = self.isr.runDataRef(dataRef).exposure
552 del dataRef.dataId['expId']
553 checkExpLengthEqual(exp1, exp2, v1, v2, raiseWithMessage=True)
555 self.log.info('Preparing images for cross-correlation calculation for detector %s' % detNum)
556 # note the shape of these returns depends on level
557 _scaledMaskedIms1, _means1 = self._makeCroppedExposures(exp1, gains, self.config.level)
558 _scaledMaskedIms2, _means2 = self._makeCroppedExposures(exp2, gains, self.config.level)
560 # Compute the cross-correlation and means
561 # at the appropriate config.level:
562 # - "DETECTOR": one key, so compare the two visits to each other
563 # - "AMP": n_amp keys, comparing each amplifier of one visit
564 # to the same amplifier in the visit its paired with
565 for det_object in _scaledMaskedIms1.keys(): # det_object is ampName or detName depending on level
566 self.log.debug("Calculating correlations for %s" % det_object)
567 _xcorr, _mean = self._crossCorrelate(_scaledMaskedIms1[det_object],
568 _scaledMaskedIms2[det_object])
569 xcorrs[det_object].append(_xcorr)
570 means[det_object].append([_means1[det_object], _means2[det_object]])
571 if self.config.level != 'DETECTOR':
572 # Populate the ptcDataset for running fitting in the PTC task
573 expTime = exp1.getInfo().getVisitInfo().getExposureTime()
574 ptcDataset.rawExpTimes[det_object].append(expTime)
575 ptcDataset.rawMeans[det_object].append((_means1[det_object] + _means2[det_object]) / 2.0)
576 ptcDataset.rawVars[det_object].append(_xcorr[0, 0] / 2.0)
578 # TODO: DM-15305 improve debug functionality here.
579 # This is position 1 for the removed code.
581 # Save the raw means and xcorrs so we can look at them before any modifications
582 rawMeans = copy.deepcopy(means)
583 rawXcorrs = copy.deepcopy(xcorrs)
585 # gains are always and only pre-applied for DETECTOR
586 # so for all other levels we now calculate them from the correlations
587 # and apply them
588 if self.config.level != 'DETECTOR':
589 if self.config.doCalcGains: # Run the PTC task for calculating the gains, put results
590 self.log.info('Calculating gains for detector %s using PTC task' % detNum)
591 ptcDataset = ptcTask.fitPtc(ptcDataset, ptcConfig.ptcFitType)
592 dataRef.put(ptcDataset, datasetType='photonTransferCurveDataset')
593 self.log.debug('Finished gain estimation for detector %s' % detNum)
594 else: # load results - confirmed to work much earlier on, so can be relied upon here
595 ptcDataset = dataRef.get('photonTransferCurveDataset')
597 self._applyGains(means, xcorrs, ptcDataset)
599 if self.config.doPlotPtcs:
600 dirname = dataRef.getUri(datasetType='cpPipePlotRoot', write=True)
601 if not os.path.exists(dirname):
602 os.makedirs(dirname)
603 detNum = dataRef.dataId[self.config.ccdKey]
604 filename = f"PTC_det{detNum}.pdf"
605 filenameFull = os.path.join(dirname, filename)
606 with PdfPages(filenameFull) as pdfPages:
607 ptcTask._plotPtc(ptcDataset, ptcConfig.ptcFitType, pdfPages)
609 # having calculated and applied the gains for all code-paths we can now
610 # generate the kernel(s)
611 self.log.info('Generating kernel(s) for %s' % detNum)
612 for det_object in xcorrs.keys(): # looping over either detectors or amps
613 if self.config.level == 'DETECTOR':
614 objId = 'detector %s' % det_object
615 elif self.config.level == 'AMP':
616 objId = 'detector %s AMP %s' % (detNum, det_object)
618 try:
619 meanXcorr, kernel = self.generateKernel(xcorrs[det_object], means[det_object], objId)
620 kernels[det_object] = kernel
621 meanXcorrs[det_object] = meanXcorr
622 except RuntimeError:
623 # bad amps will cause failures here which we want to ignore
624 self.log.warn('RuntimeError during kernel generation for %s' % objId)
625 continue
627 bfKernel = BrighterFatterKernel(self.config.level)
628 bfKernel.means = means
629 bfKernel.rawMeans = rawMeans
630 bfKernel.rawXcorrs = rawXcorrs
631 bfKernel.xCorrs = xcorrs
632 bfKernel.meanXcorrs = meanXcorrs
633 bfKernel.originalLevel = self.config.level
634 try:
635 bfKernel.gain = ptcDataset.gain
636 bfKernel.gainErr = ptcDataset.gainErr
637 bfKernel.noise = ptcDataset.noise
638 bfKernel.noiseErr = ptcDataset.noiseErr
639 except NameError: # we don't have a ptcDataset to store results from
640 pass
642 if self.config.level == 'AMP':
643 bfKernel.ampwiseKernels = kernels
644 ex = self.config.ignoreAmpsForAveraging
645 bfKernel.detectorKernel = bfKernel.makeDetectorKernelFromAmpwiseKernels(detNum, ampsToExclude=ex)
647 elif self.config.level == 'DETECTOR':
648 bfKernel.detectorKernel = kernels
649 else:
650 raise RuntimeError('Invalid level for kernel calculation; this should not be possible.')
652 dataRef.put(bfKernel)
654 self.log.info('Finished generating kernel(s) for %s' % detNum)
655 return pipeBase.Struct(exitStatus=0)
657 def _applyGains(self, means, xcorrs, ptcData):
658 """Apply the gains calculated by the PtcTask.
660 It also removes datapoints that were thrown out in the PTC algorithm.
662 Parameters
663 ----------
664 means : `dict` [`str`, `list` of `tuple`]
665 Dictionary, keyed by ampName, containing a list of the means for
666 each visit pair.
668 xcorrs : `dict` [`str`, `list` of `np.array`]
669 Dictionary, keyed by ampName, containing a list of the
670 cross-correlations for each visit pair.
672 ptcDataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
673 The results of running the ptcTask.
674 """
675 ampNames = means.keys()
676 assert set(xcorrs.keys()) == set(ampNames) == set(ptcData.ampNames)
678 for ampName in ampNames:
679 mask = ptcData.expIdMask[ampName]
680 gain = ptcData.gain[ampName]
682 fitType = ptcData.ptcFitType[ampName]
683 if fitType != 'POLYNOMIAL':
684 raise RuntimeError(f"Only polynomial fit types supported currently, found {fitType}")
685 ptcFitPars = ptcData.ptcFitPars[ampName]
686 # polynomial params go in ascending order, so this is safe w.r.t.
687 # the polynomial order, as the constant term is always first,
688 # the linear term second etc
690 # Adjust xcorrs[0,0] to remove the linear gain part, leaving just the second order part
691 for i in range(len(means[ampName])):
692 ampMean = np.mean(means[ampName][i])
693 xcorrs[ampName][i][0, 0] -= 2.0 * (ampMean * ptcFitPars[1] + ptcFitPars[0])
695 # Now adjust the means and xcorrs for the calculated gain and remove the bad indices
696 means[ampName] = [[value*gain for value in pair] for pair in np.array(means[ampName])[mask]]
697 xcorrs[ampName] = [arr*gain*gain for arr in np.array(xcorrs[ampName])[mask]]
698 return
700 def _makeCroppedExposures(self, exp, gains, level):
701 """Prepare exposure for cross-correlation calculation.
703 For each amp, crop by the border amount, specified by
704 config.nPixBorderXCorr, then rescale by the gain
705 and subtract the sigma-clipped mean.
706 If the level is 'DETECTOR' then this is done
707 to the whole image so that it can be cross-correlated, with a copy
708 being returned.
709 If the level is 'AMP' then this is done per-amplifier,
710 and a copy of each prepared amp-image returned.
712 Parameters:
713 -----------
714 exp : `lsst.afw.image.exposure.ExposureF`
715 The exposure to prepare
716 gains : `lsst.cp.pipe.makeBrighterFatterKernel.BrighterFatterGain`
717 The object holding the amplifier gains, essentially a
718 dictionary of the amplifier gain values, keyed by amplifier name
719 level : `str`
720 Either `AMP` or `DETECTOR`
722 Returns:
723 --------
724 scaledMaskedIms : `dict` [`str`, `lsst.afw.image.maskedImage.MaskedImageF`]
725 Depending on level, this is either one item, or n_amp items,
726 keyed by detectorId or ampName
728 Notes:
729 ------
730 This function is controlled by the following config parameters:
731 nPixBorderXCorr : `int`
732 The number of border pixels to exclude
733 nSigmaClipXCorr : `float`
734 The number of sigma to be clipped to
735 """
736 assert(isinstance(exp, afwImage.ExposureF))
738 local_exp = exp.clone() # we don't want to modify the image passed in
739 del exp # ensure we don't make mistakes!
741 border = self.config.nPixBorderXCorr
742 sigma = self.config.nSigmaClipXCorr
744 sctrl = afwMath.StatisticsControl()
745 sctrl.setNumSigmaClip(sigma)
747 means = {}
748 returnAreas = {}
750 detector = local_exp.getDetector()
751 amps = detector.getAmplifiers()
753 mi = local_exp.getMaskedImage() # makeStatistics does not seem to take exposures
754 temp = mi.clone()
756 # Rescale each amp by the appropriate gain and subtract the mean.
757 # NB these are views modifying the image in-place
758 for amp in amps:
759 ampName = amp.getName()
760 rescaleIm = mi[amp.getBBox()] # the soon-to-be scaled, mean subtracted, amp image
761 rescaleTemp = temp[amp.getBBox()]
762 mean = afwMath.makeStatistics(rescaleIm, afwMath.MEANCLIP, sctrl).getValue()
763 gain = gains.gains[ampName]
764 rescaleIm *= gain
765 rescaleTemp *= gain
766 self.log.debug("mean*gain = %s, clipped mean = %s" %
767 (mean*gain, afwMath.makeStatistics(rescaleIm, afwMath.MEANCLIP,
768 sctrl).getValue()))
769 rescaleIm -= mean*gain
771 if level == 'AMP': # build the dicts if doing amp-wise
772 means[ampName] = afwMath.makeStatistics(rescaleTemp[border: -border, border: -border,
773 afwImage.LOCAL], afwMath.MEANCLIP, sctrl).getValue()
774 returnAreas[ampName] = rescaleIm
776 if level == 'DETECTOR': # else just average the whole detector
777 detName = local_exp.getDetector().getId()
778 means[detName] = afwMath.makeStatistics(temp[border: -border, border: -border, afwImage.LOCAL],
779 afwMath.MEANCLIP, sctrl).getValue()
780 returnAreas[detName] = rescaleIm
782 return returnAreas, means
784 def _crossCorrelate(self, maskedIm0, maskedIm1, runningBiasCorrSim=False, frameId=None, detId=None):
785 """Calculate the cross-correlation of an area.
787 If the area in question contains multiple amplifiers then they must
788 have been gain corrected.
790 Parameters:
791 -----------
792 maskedIm0 : `lsst.afw.image.MaskedImageF`
793 The first image area
794 maskedIm1 : `lsst.afw.image.MaskedImageF`
795 The first image area
796 frameId : `str`, optional
797 The frame identifier for use in the filename
798 if writing debug outputs.
799 detId : `str`, optional
800 The detector identifier (detector, or detector+amp,
801 depending on config.level) for use in the filename
802 if writing debug outputs.
803 runningBiasCorrSim : `bool`
804 Set to true when using this function to calculate the amount of bias
805 introduced by the sigma clipping. If False, the biasCorr parameter
806 is divided by to remove the bias, but this is, of course, not
807 appropriate when this is the parameter being measured.
809 Returns:
810 --------
811 xcorr : `np.ndarray`
812 The quarter-image cross-correlation
813 mean : `float`
814 The sum of the means of the input images,
815 sigma-clipped, and with borders applied.
816 This is used when using this function with simulations to calculate
817 the biasCorr parameter.
819 Notes:
820 ------
821 This function is controlled by the following config parameters:
822 maxLag : `int`
823 The maximum lag to use in the cross-correlation calculation
824 nPixBorderXCorr : `int`
825 The number of border pixels to exclude
826 nSigmaClipXCorr : `float`
827 The number of sigma to be clipped to
828 biasCorr : `float`
829 Parameter used to correct from the bias introduced
830 by the sigma cuts.
831 """
832 maxLag = self.config.maxLag
833 border = self.config.nPixBorderXCorr
834 sigma = self.config.nSigmaClipXCorr
835 biasCorr = self.config.biasCorr
837 sctrl = afwMath.StatisticsControl()
838 sctrl.setNumSigmaClip(sigma)
840 mean = afwMath.makeStatistics(maskedIm0.getImage()[border: -border, border: -border, afwImage.LOCAL],
841 afwMath.MEANCLIP, sctrl).getValue()
842 mean += afwMath.makeStatistics(maskedIm1.getImage()[border: -border, border: -border, afwImage.LOCAL],
843 afwMath.MEANCLIP, sctrl).getValue()
845 # Diff the images, and apply border
846 diff = maskedIm0.clone()
847 diff -= maskedIm1.getImage()
848 diff = diff[border: -border, border: -border, afwImage.LOCAL]
850 if self.debug.writeDiffImages:
851 filename = '_'.join(['diff', 'detector', detId, frameId, '.fits'])
852 diff.writeFits(os.path.join(self.debug.debugDataPath, filename))
854 # Subtract background. It should be a constant, but it isn't always
855 binsize = self.config.backgroundBinSize
856 nx = diff.getWidth()//binsize
857 ny = diff.getHeight()//binsize
858 bctrl = afwMath.BackgroundControl(nx, ny, sctrl, afwMath.MEANCLIP)
859 bkgd = afwMath.makeBackground(diff, bctrl)
860 bgImg = bkgd.getImageF(afwMath.Interpolate.CUBIC_SPLINE, afwMath.REDUCE_INTERP_ORDER)
861 bgMean = np.mean(bgImg.getArray())
862 if abs(bgMean) >= self.config.backgroundWarnLevel:
863 self.log.warn('Mean of background = %s > config.maxBackground' % bgMean)
865 diff -= bgImg
867 if self.debug.writeDiffImages:
868 filename = '_'.join(['bgSub', 'diff', 'detector', detId, frameId, '.fits'])
869 diff.writeFits(os.path.join(self.debug.debugDataPath, filename))
870 if self.debug.display:
871 self.disp1.mtv(diff, title=frameId)
873 self.log.debug("Median and variance of diff:")
874 self.log.debug("%s" % afwMath.makeStatistics(diff, afwMath.MEDIAN, sctrl).getValue())
875 self.log.debug("%s, %s" % (afwMath.makeStatistics(diff, afwMath.VARIANCECLIP, sctrl).getValue(),
876 np.var(diff.getImage().getArray())))
878 # Measure the correlations
879 dim0 = diff[0: -maxLag, : -maxLag, afwImage.LOCAL]
880 dim0 -= afwMath.makeStatistics(dim0, afwMath.MEANCLIP, sctrl).getValue()
881 width, height = dim0.getDimensions()
882 xcorr = np.zeros((maxLag + 1, maxLag + 1), dtype=np.float64)
884 for xlag in range(maxLag + 1):
885 for ylag in range(maxLag + 1):
886 dim_xy = diff[xlag:xlag + width, ylag: ylag + height, afwImage.LOCAL].clone()
887 dim_xy -= afwMath.makeStatistics(dim_xy, afwMath.MEANCLIP, sctrl).getValue()
888 dim_xy *= dim0
889 xcorr[xlag, ylag] = afwMath.makeStatistics(dim_xy, afwMath.MEANCLIP, sctrl).getValue()
890 if not runningBiasCorrSim:
891 xcorr[xlag, ylag] /= biasCorr
893 # TODO: DM-15305 improve debug functionality here.
894 # This is position 2 for the removed code.
896 return xcorr, mean
898 def estimateGains(self, dataRef, visitPairs):
899 """Estimate the amplifier gains using the specified visits.
901 Given a dataRef and list of flats of varying intensity,
902 calculate the gain for each amplifier in the detector
903 using the photon transfer curve (PTC) method.
905 The config.fixPtcThroughOrigin option determines whether the iterative
906 fitting is forced to go through the origin or not.
907 This defaults to True, fitting var=1/gain * mean.
908 If set to False then var=1/g * mean + const is fitted.
910 This is really a photo transfer curve (PTC) gain measurement task.
911 See DM-14063 for results from of a comparison between
912 this task's numbers and the gain values in the HSC camera model,
913 and those measured by the PTC task in eotest.
915 Parameters
916 ----------
917 dataRef : `lsst.daf.persistence.butler.Butler.dataRef`
918 dataRef for the detector for the flats to be used
919 visitPairs : `list` of `tuple`
920 List of visit-pairs to use, as [(v1,v2), (v3,v4)...]
922 Returns
923 -------
924 gains : `lsst.cp.pipe.makeBrighterFatterKernel.BrighterFatterGain`
925 Object holding the per-amplifier gains, essentially a
926 dict of the as-calculated amplifier gain values, keyed by amp name
927 nominalGains : `dict` [`str`, `float`]
928 Dict of the amplifier gains, as reported by the `detector` object,
929 keyed by amplifier name
930 """
931 # NB: don't use dataRef.get('raw_detector') due to composites
932 detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
933 amps = detector.getAmplifiers()
934 ampNames = [amp.getName() for amp in amps]
936 ampMeans = {key: [] for key in ampNames} # these get turned into np.arrays later
937 ampCoVariances = {key: [] for key in ampNames}
938 ampVariances = {key: [] for key in ampNames}
940 # Loop over the amps in the detector,
941 # calculating a PTC for each amplifier.
942 # The amplifier iteration is performed in _calcMeansAndVars()
943 # NB: no gain correction is applied
944 for visPairNum, visPair in enumerate(visitPairs):
945 _means, _vars, _covars = self._calcMeansAndVars(dataRef, visPair[0], visPair[1])
947 # Do sanity checks; if these are failed more investigation is needed
948 breaker = 0
949 for amp in detector:
950 ampName = amp.getName()
951 if _means[ampName]*10 < _vars[ampName] or _means[ampName]*10 < _covars[ampName]:
952 msg = 'Sanity check failed; check visit pair %s amp %s' % (visPair, ampName)
953 self.log.warn(msg)
954 breaker += 1
955 if breaker:
956 continue
958 # having made sanity checks
959 # pull the values out into the respective dicts
960 for k in _means.keys(): # keys are necessarily the same
961 if _vars[k]*1.3 < _covars[k] or _vars[k]*0.7 > _covars[k]:
962 self.log.warn('Dropped a value')
963 continue
964 ampMeans[k].append(_means[k])
965 ampVariances[k].append(_vars[k])
966 ampCoVariances[k].append(_covars[k])
968 gains = {}
969 nomGains = {}
970 ptcResults = {}
971 for amp in detector:
972 ampName = amp.getName()
973 if ampMeans[ampName] == []: # all the data was dropped, amp is presumed bad
974 gains[ampName] = 1.0
975 ptcResults[ampName] = (0, 0, 1, 0)
976 continue
978 nomGains[ampName] = amp.getGain()
979 slopeRaw, interceptRaw, rVal, pVal, stdErr = \
980 stats.linregress(np.asarray(ampMeans[ampName]), np.asarray(ampCoVariances[ampName]))
981 slopeFix, _ = self._iterativeRegression(np.asarray(ampMeans[ampName]),
982 np.asarray(ampCoVariances[ampName]),
983 fixThroughOrigin=True)
984 slopeUnfix, intercept = self._iterativeRegression(np.asarray(ampMeans[ampName]),
985 np.asarray(ampCoVariances[ampName]),
986 fixThroughOrigin=False)
987 self.log.info("Slope of raw fit: %s, intercept: %s p value: %s" % (slopeRaw,
988 interceptRaw, pVal))
989 self.log.info("slope of fixed fit: %s, difference vs raw:%s" % (slopeFix,
990 slopeFix - slopeRaw))
991 self.log.info("slope of unfixed fit: %s, difference vs fix:%s" % (slopeUnfix,
992 slopeFix - slopeUnfix))
993 if self.config.fixPtcThroughOrigin:
994 slopeToUse = slopeFix
995 else:
996 slopeToUse = slopeUnfix
998 if self.debug.enabled:
999 fig = plt.figure()
1000 ax = fig.add_subplot(111)
1001 ax.plot(np.asarray(ampMeans[ampName]),
1002 np.asarray(ampCoVariances[ampName]), linestyle='None', marker='x', label='data')
1003 if self.config.fixPtcThroughOrigin:
1004 ax.plot(np.asarray(ampMeans[ampName]),
1005 np.asarray(ampMeans[ampName])*slopeToUse, label='Fit through origin')
1006 else:
1007 ax.plot(np.asarray(ampMeans[ampName]),
1008 np.asarray(ampMeans[ampName])*slopeToUse + intercept,
1009 label='Fit (intercept unconstrained')
1011 dataRef.put(fig, "plotBrighterFatterPtc", amp=ampName)
1012 self.log.info('Saved PTC for detector %s amp %s' % (detector.getId(), ampName))
1013 gains[ampName] = 1.0/slopeToUse
1014 # change the fit to use a cubic and match parameters with Lage method
1015 # or better, use the PTC task here too
1016 ptcResults[ampName] = (0, 0, 1, 0)
1018 return BrighterFatterGain(gains, ptcResults), nomGains
1020 def _calcMeansAndVars(self, dataRef, v1, v2):
1021 """Calculate the means, vars, covars, and retrieve the nominal gains,
1022 for each amp in each detector.
1024 This code runs using two visit numbers, and for the detector specified.
1025 It calculates the correlations in the individual amps without
1026 rescaling any gains. This allows a photon transfer curve
1027 to be generated and the gains measured.
1029 Images are assembled with use the isrTask, and basic isr is performed.
1031 Parameters:
1032 -----------
1033 dataRef : `lsst.daf.persistence.butler.Butler.dataRef`
1034 dataRef for the detector for the repo containing the flats to be used
1035 v1 : `int`
1036 First visit of the visit pair
1037 v2 : `int`
1038 Second visit of the visit pair
1040 Returns
1041 -------
1042 means, vars, covars : `tuple` of `dict`
1043 Three dicts, keyed by ampName,
1044 containing the sum of the image-means,
1045 the variance, and the quarter-image of the xcorr.
1046 """
1047 sigma = self.config.nSigmaClipGainCalc
1048 maxLag = self.config.maxLag
1049 border = self.config.nPixBorderGainCalc
1050 biasCorr = self.config.biasCorr
1052 # NB: don't use dataRef.get('raw_detector') due to composites
1053 detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
1055 ampMeans = {}
1057 # manipulate the dataId to get a postISR exposure for each visit
1058 # from the detector obj, restoring its original state afterwards
1059 originalDataId = dataRef.dataId.copy()
1060 dataRef.dataId['expId'] = v1
1061 exp1 = self.isr.runDataRef(dataRef).exposure
1062 dataRef.dataId['expId'] = v2
1063 exp2 = self.isr.runDataRef(dataRef).exposure
1064 dataRef.dataId = originalDataId
1065 exps = [exp1, exp2]
1066 checkExpLengthEqual(exp1, exp2, v1, v2, raiseWithMessage=True)
1068 detector = exps[0].getDetector()
1069 ims = [self._convertImagelikeToFloatImage(exp) for exp in exps]
1071 if self.debug.display:
1072 self.disp1.mtv(ims[0], title=str(v1))
1073 self.disp2.mtv(ims[1], title=str(v2))
1075 sctrl = afwMath.StatisticsControl()
1076 sctrl.setNumSigmaClip(sigma)
1077 for imNum, im in enumerate(ims):
1079 # calculate the sigma-clipped mean, excluding the borders
1080 # safest to apply borders to all amps regardless of edges
1081 # easier, camera-agnostic, and mitigates potentially dodgy
1082 # overscan-biases around edges as well
1083 for amp in detector:
1084 ampName = amp.getName()
1085 ampIm = im[amp.getBBox()]
1086 mean = afwMath.makeStatistics(ampIm[border: -border, border: -border, afwImage.LOCAL],
1087 afwMath.MEANCLIP, sctrl).getValue()
1088 if ampName not in ampMeans.keys():
1089 ampMeans[ampName] = []
1090 ampMeans[ampName].append(mean)
1091 ampIm -= mean
1093 diff = ims[0].clone()
1094 diff -= ims[1]
1096 temp = diff[border: -border, border: -border, afwImage.LOCAL]
1098 # Subtract background. It should be a constant,
1099 # but it isn't always (e.g. some SuprimeCam flats)
1100 # TODO: Check how this looks, and if this is the "right" way to do this
1101 binsize = self.config.backgroundBinSize
1102 nx = temp.getWidth()//binsize
1103 ny = temp.getHeight()//binsize
1104 bctrl = afwMath.BackgroundControl(nx, ny, sctrl, afwMath.MEANCLIP)
1105 bkgd = afwMath.makeBackground(temp, bctrl)
1107 box = diff.getBBox()
1108 box.grow(-border)
1109 diff[box, afwImage.LOCAL] -= bkgd.getImageF(afwMath.Interpolate.CUBIC_SPLINE,
1110 afwMath.REDUCE_INTERP_ORDER)
1112 variances = {}
1113 coVars = {}
1114 for amp in detector:
1115 ampName = amp.getName()
1116 diffAmpIm = diff[amp.getBBox()].clone()
1117 diffAmpImCrop = diffAmpIm[border: -border - maxLag, border: -border - maxLag, afwImage.LOCAL]
1118 diffAmpImCrop -= afwMath.makeStatistics(diffAmpImCrop, afwMath.MEANCLIP, sctrl).getValue()
1119 w, h = diffAmpImCrop.getDimensions()
1120 xcorr = np.zeros((maxLag + 1, maxLag + 1), dtype=np.float64)
1122 # calculate the cross-correlation
1123 for xlag in range(maxLag + 1):
1124 for ylag in range(maxLag + 1):
1125 dim_xy = diffAmpIm[border + xlag: border + xlag + w,
1126 border + ylag: border + ylag + h,
1127 afwImage.LOCAL].clone()
1128 dim_xy -= afwMath.makeStatistics(dim_xy, afwMath.MEANCLIP, sctrl).getValue()
1129 dim_xy *= diffAmpImCrop
1130 xcorr[xlag, ylag] = afwMath.makeStatistics(dim_xy,
1131 afwMath.MEANCLIP, sctrl).getValue()/(biasCorr)
1133 variances[ampName] = xcorr[0, 0]
1134 xcorr_full = self._tileArray(xcorr)
1135 coVars[ampName] = np.sum(xcorr_full)
1137 msg = "M1: " + str(ampMeans[ampName][0])
1138 msg += " M2 " + str(ampMeans[ampName][1])
1139 msg += " M_sum: " + str((ampMeans[ampName][0]) + ampMeans[ampName][1])
1140 msg += " Var " + str(variances[ampName])
1141 msg += " coVar: " + str(coVars[ampName])
1142 self.log.debug(msg)
1144 means = {}
1145 for amp in detector:
1146 ampName = amp.getName()
1147 means[ampName] = ampMeans[ampName][0] + ampMeans[ampName][1]
1149 return means, variances, coVars
1151 def _plotXcorr(self, xcorr, mean, zmax=0.05, title=None, fig=None, saveToFileName=None):
1152 """Plot the correlation functions."""
1153 try:
1154 xcorr = xcorr.getArray()
1155 except Exception:
1156 pass
1158 xcorr /= float(mean)
1159 # xcorr.getArray()[0,0]=abs(xcorr.getArray()[0,0]-1)
1161 if fig is None:
1162 fig = plt.figure()
1163 else:
1164 fig.clf()
1166 ax = fig.add_subplot(111, projection='3d')
1167 ax.azim = 30
1168 ax.elev = 20
1170 nx, ny = np.shape(xcorr)
1172 xpos, ypos = np.meshgrid(np.arange(nx), np.arange(ny))
1173 xpos = xpos.flatten()
1174 ypos = ypos.flatten()
1175 zpos = np.zeros(nx*ny)
1176 dz = xcorr.flatten()
1177 dz[dz > zmax] = zmax
1179 ax.bar3d(xpos, ypos, zpos, 1, 1, dz, color='b', zsort='max', sort_zpos=100)
1180 if xcorr[0, 0] > zmax:
1181 ax.bar3d([0], [0], [zmax], 1, 1, 1e-4, color='c')
1183 ax.set_xlabel("row")
1184 ax.set_ylabel("column")
1185 ax.set_zlabel(r"$\langle{(F_i - \bar{F})(F_i - \bar{F})}\rangle/\bar{F}$")
1187 if title:
1188 fig.suptitle(title)
1189 if saveToFileName:
1190 fig.savefig(saveToFileName)
1192 def _iterativeRegression(self, x, y, fixThroughOrigin=False, nSigmaClip=None, maxIter=None):
1193 """Use linear regression to fit a line, iteratively removing outliers.
1195 Useful when you have a sufficiently large numbers of points on your PTC.
1196 This function iterates until either there are no outliers of
1197 config.nSigmaClip magnitude, or until the specified maximum number
1198 of iterations has been performed.
1200 Parameters:
1201 -----------
1202 x : `numpy.array`
1203 The independent variable. Must be a numpy array, not a list.
1204 y : `numpy.array`
1205 The dependent variable. Must be a numpy array, not a list.
1206 fixThroughOrigin : `bool`, optional
1207 Whether to fix the PTC through the origin or allow an y-intercept.
1208 nSigmaClip : `float`, optional
1209 The number of sigma to clip to.
1210 Taken from the task config if not specified.
1211 maxIter : `int`, optional
1212 The maximum number of iterations allowed.
1213 Taken from the task config if not specified.
1215 Returns:
1216 --------
1217 slope : `float`
1218 The slope of the line of best fit
1219 intercept : `float`
1220 The y-intercept of the line of best fit
1221 """
1222 if not maxIter:
1223 maxIter = self.config.maxIterRegression
1224 if not nSigmaClip:
1225 nSigmaClip = self.config.nSigmaClipRegression
1227 nIter = 0
1228 sctrl = afwMath.StatisticsControl()
1229 sctrl.setNumSigmaClip(nSigmaClip)
1231 if fixThroughOrigin:
1232 while nIter < maxIter:
1233 nIter += 1
1234 self.log.debug("Origin fixed, iteration # %s using %s elements:" % (nIter, np.shape(x)[0]))
1235 TEST = x[:, np.newaxis]
1236 slope, _, _, _ = np.linalg.lstsq(TEST, y)
1237 slope = slope[0]
1238 res = (y - slope * x) / x
1239 resMean = afwMath.makeStatistics(res, afwMath.MEANCLIP, sctrl).getValue()
1240 resStd = np.sqrt(afwMath.makeStatistics(res, afwMath.VARIANCECLIP, sctrl).getValue())
1241 index = np.where((res > (resMean + nSigmaClip*resStd)) |
1242 (res < (resMean - nSigmaClip*resStd)))
1243 self.log.debug("%.3f %.3f %.3f %.3f" % (resMean, resStd, np.max(res), nSigmaClip))
1244 if np.shape(np.where(index))[1] == 0 or (nIter >= maxIter): # run out of points or iters
1245 break
1246 x = np.delete(x, index)
1247 y = np.delete(y, index)
1249 return slope, 0
1251 while nIter < maxIter:
1252 nIter += 1
1253 self.log.debug("Iteration # %s using %s elements:" % (nIter, np.shape(x)[0]))
1254 xx = np.vstack([x, np.ones(len(x))]).T
1255 ret, _, _, _ = np.linalg.lstsq(xx, y)
1256 slope, intercept = ret
1257 res = y - slope*x - intercept
1258 resMean = afwMath.makeStatistics(res, afwMath.MEANCLIP, sctrl).getValue()
1259 resStd = np.sqrt(afwMath.makeStatistics(res, afwMath.VARIANCECLIP, sctrl).getValue())
1260 index = np.where((res > (resMean + nSigmaClip * resStd)) | (res < resMean - nSigmaClip * resStd))
1261 self.log.debug("%.3f %.3f %.3f %.3f" % (resMean, resStd, np.max(res), nSigmaClip))
1262 if np.shape(np.where(index))[1] == 0 or (nIter >= maxIter): # run out of points, or iterations
1263 break
1264 x = np.delete(x, index)
1265 y = np.delete(y, index)
1267 return slope, intercept
1269 def generateKernel(self, corrs, means, objId, rejectLevel=None):
1270 """Generate the full kernel from a list of cross-correlations and means.
1272 Taking a list of quarter-image, gain-corrected cross-correlations,
1273 do a pixel-wise sigma-clipped mean of each,
1274 and tile into the full-sized kernel image.
1276 Each corr in corrs is one quarter of the full cross-correlation,
1277 and has been gain-corrected. Each mean in means is a tuple of the means
1278 of the two individual images, corresponding to that corr.
1280 Parameters:
1281 -----------
1282 corrs : `list` of `numpy.ndarray`, (Ny, Nx)
1283 A list of the quarter-image cross-correlations
1284 means : `list` of `tuples` of `floats`
1285 The means of the input images for each corr in corrs
1286 rejectLevel : `float`, optional
1287 This is essentially is a sanity check parameter.
1288 If this condition is violated there is something unexpected
1289 going on in the image, and it is discarded from the stack before
1290 the clipped-mean is calculated.
1291 If not provided then config.xcorrCheckRejectLevel is used
1293 Returns:
1294 --------
1295 kernel : `numpy.ndarray`, (Ny, Nx)
1296 The output kernel
1297 """
1298 self.log.info('Calculating kernel for %s'%objId)
1300 if not rejectLevel:
1301 rejectLevel = self.config.xcorrCheckRejectLevel
1303 if self.config.correlationQuadraticFit:
1304 xcorrList = []
1305 fluxList = []
1307 for corrNum, ((mean1, mean2), corr) in enumerate(zip(means, corrs)):
1308 msg = 'For item %s, initial corr[0,0] = %g, corr[1,0] = %g'%(corrNum, corr[0, 0], corr[1, 0])
1309 self.log.info(msg)
1310 if self.config.level == 'DETECTOR':
1311 # This is now done in _applyGains() but only if level is not DETECTOR
1312 corr[0, 0] -= (mean1 + mean2)
1313 fullCorr = self._tileArray(corr)
1315 # Craig Lage says he doesn't understand the negative sign, but it needs to be there
1316 xcorrList.append(-fullCorr / 2.0)
1317 flux = (mean1 + mean2) / 2.0
1318 fluxList.append(flux * flux)
1319 # We're using the linear fit algorithm to find a quadratic fit,
1320 # so we square the x-axis.
1321 # The step below does not need to be done, but is included
1322 # so that correlations can be compared
1323 # directly to existing code. We might want to take it out.
1324 corr /= -1.0*(mean1**2 + mean2**2)
1326 if not xcorrList:
1327 raise RuntimeError("Cannot generate kernel because all inputs were discarded. "
1328 "Either the data is bad, or config.xcorrCheckRejectLevel is too low")
1330 # This method fits a quadratic vs flux and keeps only the quadratic term.
1331 meanXcorr = np.zeros_like(fullCorr)
1332 xcorrList = np.asarray(xcorrList)
1334 for i in range(np.shape(meanXcorr)[0]):
1335 for j in range(np.shape(meanXcorr)[1]):
1336 # Note the i,j inversion. This serves the same function as the transpose step in
1337 # the base code. I don't understand why it is there, but I put it in to be consistent.
1338 slopeRaw, interceptRaw, rVal, pVal, stdErr = stats.linregress(np.asarray(fluxList),
1339 xcorrList[:, j, i])
1340 try:
1341 slope, intercept = self._iterativeRegression(np.asarray(fluxList),
1342 xcorrList[:, j, i],
1343 fixThroughOrigin=True)
1344 msg = "(%s,%s):Slope of raw fit: %s, intercept: %s p value: %s" % (i, j, slopeRaw,
1345 interceptRaw, pVal)
1346 self.log.debug(msg)
1347 self.log.debug("(%s,%s):Slope of fixed fit: %s" % (i, j, slope))
1349 meanXcorr[i, j] = slope
1350 except ValueError:
1351 meanXcorr[i, j] = slopeRaw
1353 msg = f"i={i}, j={j}, slope = {slope:.6g}, slopeRaw = {slopeRaw:.6g}"
1354 self.log.debug(msg)
1355 self.log.info('Quad Fit meanXcorr[0,0] = %g, meanXcorr[1,0] = %g'%(meanXcorr[8, 8],
1356 meanXcorr[9, 8]))
1358 else:
1359 # Try to average over a set of possible inputs.
1360 # This generates a simple function of the kernel that
1361 # should be constant across the images, and averages that.
1362 xcorrList = []
1363 sctrl = afwMath.StatisticsControl()
1364 sctrl.setNumSigmaClip(self.config.nSigmaClipKernelGen)
1366 for corrNum, ((mean1, mean2), corr) in enumerate(zip(means, corrs)):
1367 corr[0, 0] -= (mean1 + mean2)
1368 if corr[0, 0] > 0:
1369 self.log.warn('Skipped item %s due to unexpected value of (variance-mean)' % corrNum)
1370 continue
1371 corr /= -1.0*(mean1**2 + mean2**2)
1373 fullCorr = self._tileArray(corr)
1375 xcorrCheck = np.abs(np.sum(fullCorr))/np.sum(np.abs(fullCorr))
1376 if xcorrCheck > rejectLevel:
1377 self.log.warn("Sum of the xcorr is unexpectedly high. Investigate item num %s for %s. \n"
1378 "value = %s" % (corrNum, objId, xcorrCheck))
1379 continue
1380 xcorrList.append(fullCorr)
1382 if not xcorrList:
1383 raise RuntimeError("Cannot generate kernel because all inputs were discarded. "
1384 "Either the data is bad, or config.xcorrCheckRejectLevel is too low")
1386 # stack the individual xcorrs and apply a per-pixel clipped-mean
1387 meanXcorr = np.zeros_like(fullCorr)
1388 xcorrList = np.transpose(xcorrList)
1389 for i in range(np.shape(meanXcorr)[0]):
1390 for j in range(np.shape(meanXcorr)[1]):
1391 meanXcorr[i, j] = afwMath.makeStatistics(xcorrList[i, j],
1392 afwMath.MEANCLIP, sctrl).getValue()
1394 if self.config.correlationModelRadius < (meanXcorr.shape[0] - 1) / 2:
1395 sumToInfinity = self._buildCorrelationModel(meanXcorr, self.config.correlationModelRadius,
1396 self.config.correlationModelSlope)
1397 self.log.info("SumToInfinity = %s" % sumToInfinity)
1398 else:
1399 sumToInfinity = 0.0
1400 if self.config.forceZeroSum:
1401 self.log.info("Forcing sum of correlation matrix to zero")
1402 meanXcorr = self._forceZeroSum(meanXcorr, sumToInfinity)
1404 return meanXcorr, self.successiveOverRelax(meanXcorr)
1406 def successiveOverRelax(self, source, maxIter=None, eLevel=None):
1407 """An implementation of the successive over relaxation (SOR) method.
1409 A numerical method for solving a system of linear equations
1410 with faster convergence than the Gauss-Seidel method.
1412 Parameters:
1413 -----------
1414 source : `numpy.ndarray`
1415 The input array.
1416 maxIter : `int`, optional
1417 Maximum number of iterations to attempt before aborting.
1418 eLevel : `float`, optional
1419 The target error level at which we deem convergence to have
1420 occurred.
1422 Returns:
1423 --------
1424 output : `numpy.ndarray`
1425 The solution.
1426 """
1427 if not maxIter:
1428 maxIter = self.config.maxIterSuccessiveOverRelaxation
1429 if not eLevel:
1430 eLevel = self.config.eLevelSuccessiveOverRelaxation
1432 assert source.shape[0] == source.shape[1], "Input array must be square"
1433 # initialize, and set boundary conditions
1434 func = np.zeros([source.shape[0] + 2, source.shape[1] + 2])
1435 resid = np.zeros([source.shape[0] + 2, source.shape[1] + 2])
1436 rhoSpe = np.cos(np.pi/source.shape[0]) # Here a square grid is assumed
1438 # Calculate the initial error
1439 for i in range(1, func.shape[0] - 1):
1440 for j in range(1, func.shape[1] - 1):
1441 resid[i, j] = (func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
1442 func[i + 1, j] - 4*func[i, j] - source[i - 1, j - 1])
1443 inError = np.sum(np.abs(resid))
1445 # Iterate until convergence
1446 # We perform two sweeps per cycle,
1447 # updating 'odd' and 'even' points separately
1448 nIter = 0
1449 omega = 1.0
1450 dx = 1.0
1451 while nIter < maxIter*2:
1452 outError = 0
1453 if nIter%2 == 0:
1454 for i in range(1, func.shape[0] - 1, 2):
1455 for j in range(1, func.shape[1] - 1, 2):
1456 resid[i, j] = float(func[i, j-1] + func[i, j + 1] + func[i - 1, j] +
1457 func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1458 func[i, j] += omega*resid[i, j]*.25
1459 for i in range(2, func.shape[0] - 1, 2):
1460 for j in range(2, func.shape[1] - 1, 2):
1461 resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
1462 func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1463 func[i, j] += omega*resid[i, j]*.25
1464 else:
1465 for i in range(1, func.shape[0] - 1, 2):
1466 for j in range(2, func.shape[1] - 1, 2):
1467 resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
1468 func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1469 func[i, j] += omega*resid[i, j]*.25
1470 for i in range(2, func.shape[0] - 1, 2):
1471 for j in range(1, func.shape[1] - 1, 2):
1472 resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
1473 func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1474 func[i, j] += omega*resid[i, j]*.25
1475 outError = np.sum(np.abs(resid))
1476 if outError < inError*eLevel:
1477 break
1478 if nIter == 0:
1479 omega = 1.0/(1 - rhoSpe*rhoSpe/2.0)
1480 else:
1481 omega = 1.0/(1 - rhoSpe*rhoSpe*omega/4.0)
1482 nIter += 1
1484 if nIter >= maxIter*2:
1485 self.log.warn("Failure: SuccessiveOverRelaxation did not converge in %s iterations."
1486 "\noutError: %s, inError: %s," % (nIter//2, outError, inError*eLevel))
1487 else:
1488 self.log.info("Success: SuccessiveOverRelaxation converged in %s iterations."
1489 "\noutError: %s, inError: %s", nIter//2, outError, inError*eLevel)
1490 return func[1: -1, 1: -1]
1492 @staticmethod
1493 def _tileArray(in_array):
1494 """Given an input quarter-image, tile/mirror it and return full image.
1496 Given a square input of side-length n, of the form
1498 input = array([[1, 2, 3],
1499 [4, 5, 6],
1500 [7, 8, 9]])
1502 return an array of size 2n-1 as
1504 output = array([[ 9, 8, 7, 8, 9],
1505 [ 6, 5, 4, 5, 6],
1506 [ 3, 2, 1, 2, 3],
1507 [ 6, 5, 4, 5, 6],
1508 [ 9, 8, 7, 8, 9]])
1510 Parameters:
1511 -----------
1512 input : `np.array`
1513 The square input quarter-array
1515 Returns:
1516 --------
1517 output : `np.array`
1518 The full, tiled array
1519 """
1520 assert(in_array.shape[0] == in_array.shape[1])
1521 length = in_array.shape[0] - 1
1522 output = np.zeros((2*length + 1, 2*length + 1))
1524 for i in range(length + 1):
1525 for j in range(length + 1):
1526 output[i + length, j + length] = in_array[i, j]
1527 output[-i + length, j + length] = in_array[i, j]
1528 output[i + length, -j + length] = in_array[i, j]
1529 output[-i + length, -j + length] = in_array[i, j]
1530 return output
1532 @staticmethod
1533 def _forceZeroSum(inputArray, sumToInfinity):
1534 """Given an array of correlations, adjust the
1535 central value to force the sum of the array to be zero.
1537 Parameters:
1538 -----------
1539 input : `np.array`
1540 The square input array, assumed square and with
1541 shape (2n+1) x (2n+1)
1543 Returns:
1544 --------
1545 output : `np.array`
1546 The same array, with the value of the central value
1547 inputArray[n,n] adjusted to force the array sum to be zero.
1548 """
1549 assert(inputArray.shape[0] == inputArray.shape[1])
1550 assert(inputArray.shape[0] % 2 == 1)
1551 center = int((inputArray.shape[0] - 1) / 2)
1552 outputArray = np.copy(inputArray)
1553 outputArray[center, center] -= inputArray.sum() - sumToInfinity
1554 return outputArray
1556 @staticmethod
1557 def _buildCorrelationModel(array, replacementRadius, slope):
1558 """Given an array of correlations, build a model
1559 for correlations beyond replacementRadius pixels from the center
1560 and replace the measured values with the model.
1562 Parameters:
1563 -----------
1564 input : `np.array`
1565 The square input array, assumed square and with
1566 shape (2n+1) x (2n+1)
1568 Returns:
1569 --------
1570 output : `np.array`
1571 The same array, with the outer values
1572 replaced with a smoothed model.
1573 """
1574 assert(array.shape[0] == array.shape[1])
1575 assert(array.shape[0] % 2 == 1)
1576 assert(replacementRadius > 1)
1577 center = int((array.shape[0] - 1) / 2)
1578 # First we check if either the [0,1] or [1,0] correlation is positive.
1579 # If so, the data is seriously messed up. This has happened in some bad amplifiers.
1580 # In this case, we just return the input array unchanged.
1581 if (array[center, center + 1] >= 0.0) or (array[center + 1, center] >= 0.0):
1582 return 0.0
1584 intercept = (np.log10(-array[center, center + 1]) + np.log10(-array[center + 1, center])) / 2.0
1585 preFactor = 10**intercept
1586 slopeFactor = 2.0*abs(slope) - 2.0
1587 sumToInfinity = 2.0*np.pi*preFactor / (slopeFactor*(float(center)+0.5)**slopeFactor)
1588 # sum_to_ininity is the integral of the model beyond what is measured.
1589 # It is used to adjust C00 in the case of forcing zero sum
1591 # Now replace the pixels beyond replacementRadius with the model values
1592 for i in range(array.shape[0]):
1593 for j in range(array.shape[1]):
1594 r2 = float((i-center)*(i-center) + (j-center)*(j-center))
1595 if abs(i-center) < replacementRadius and abs(j-center) < replacementRadius:
1596 continue
1597 else:
1598 newCvalue = -preFactor * r2**slope
1599 array[i, j] = newCvalue
1600 return sumToInfinity
1602 @staticmethod
1603 def _convertImagelikeToFloatImage(imagelikeObject):
1604 """Turn an exposure or masked image of any type into an ImageF."""
1605 for attr in ("getMaskedImage", "getImage"):
1606 if hasattr(imagelikeObject, attr):
1607 imagelikeObject = getattr(imagelikeObject, attr)()
1608 try:
1609 floatImage = imagelikeObject.convertF()
1610 except AttributeError:
1611 raise RuntimeError("Failed to convert image to float")
1612 return floatImage
1615def calcBiasCorr(fluxLevels, imageShape, repeats=1, seed=0, addCorrelations=False,
1616 correlationStrength=0.1, maxLag=10, nSigmaClip=5, border=10, logger=None):
1617 """Calculate the bias induced when sigma-clipping non-Gaussian distributions.
1619 Fill image-pairs of the specified size with Poisson-distributed values,
1620 adding correlations as necessary. Then calculate the cross correlation,
1621 and calculate the bias induced using the cross-correlation image
1622 and the image means.
1624 Parameters:
1625 -----------
1626 fluxLevels : `list` of `int`
1627 The mean flux levels at which to simulate.
1628 Nominal values might be something like [70000, 90000, 110000]
1629 imageShape : `tuple` of `int`
1630 The shape of the image array to simulate, nx by ny pixels.
1631 repeats : `int`, optional
1632 Number of repeats to perform so that results
1633 can be averaged to improve SNR.
1634 seed : `int`, optional
1635 The random seed to use for the Poisson points.
1636 addCorrelations : `bool`, optional
1637 Whether to add brighter-fatter-like correlations to the simulated images
1638 If true, a correlation between x_{i,j} and x_{i+1,j+1} is introduced
1639 by adding a*x_{i,j} to x_{i+1,j+1}
1640 correlationStrength : `float`, optional
1641 The strength of the correlations.
1642 This is the value of the coefficient `a` in the above definition.
1643 maxLag : `int`, optional
1644 The maximum lag to work to in pixels
1645 nSigmaClip : `float`, optional
1646 Number of sigma to clip to when calculating the sigma-clipped mean.
1647 border : `int`, optional
1648 Number of border pixels to mask
1649 logger : `lsst.log.Log`, optional
1650 Logger to use. Instantiated anew if not provided.
1652 Returns:
1653 --------
1654 biases : `dict` [`float`, `list` of `float`]
1655 A dictionary, keyed by flux level, containing a list of the biases
1656 for each repeat at that flux level
1657 means : `dict` [`float`, `list` of `float`]
1658 A dictionary, keyed by flux level, containing a list of the average
1659 mean fluxes (average of the mean of the two images)
1660 for the image pairs at that flux level
1661 xcorrs : `dict` [`float`, `list` of `np.ndarray`]
1662 A dictionary, keyed by flux level, containing a list of the xcorr
1663 images for the image pairs at that flux level
1664 """
1665 if logger is None:
1666 logger = lsstLog.Log.getDefaultLogger()
1668 means = {f: [] for f in fluxLevels}
1669 xcorrs = {f: [] for f in fluxLevels}
1670 biases = {f: [] for f in fluxLevels}
1672 config = MakeBrighterFatterKernelTaskConfig()
1673 config.isrMandatorySteps = [] # no isr but the validation routine is still run
1674 config.isrForbiddenSteps = []
1675 config.nSigmaClipXCorr = nSigmaClip
1676 config.nPixBorderXCorr = border
1677 config.maxLag = maxLag
1678 task = MakeBrighterFatterKernelTask(config=config)
1680 im0 = afwImage.maskedImage.MaskedImageF(imageShape[1], imageShape[0])
1681 im1 = afwImage.maskedImage.MaskedImageF(imageShape[1], imageShape[0])
1683 random = np.random.RandomState(seed)
1685 for rep in range(repeats):
1686 for flux in fluxLevels:
1687 data0 = random.poisson(flux, (imageShape)).astype(float)
1688 data1 = random.poisson(flux, (imageShape)).astype(float)
1689 if addCorrelations:
1690 data0[1:, 1:] += correlationStrength*data0[: -1, : -1]
1691 data1[1:, 1:] += correlationStrength*data1[: -1, : -1]
1692 im0.image.array[:, :] = data0
1693 im1.image.array[:, :] = data1
1695 _xcorr, _means = task._crossCorrelate(im0, im1, runningBiasCorrSim=True)
1697 means[flux].append(_means)
1698 xcorrs[flux].append(_xcorr)
1699 if addCorrelations:
1700 bias = xcorrs[flux][-1][1, 1]/means[flux][-1]*(1 + correlationStrength)/correlationStrength
1701 msg = f"Simulated/expected avg. flux: {flux:.1f}, {(means[flux][-1]/2):.1f}"
1702 logger.info(msg)
1703 logger.info(f"Bias: {bias:.6f}")
1704 else:
1705 bias = xcorrs[flux][-1][0, 0]/means[flux][-1]
1706 msg = f"Simulated/expected avg. flux: {flux:.1f}, {(means[flux][-1]/2):.1f}"
1707 logger.info(msg)
1708 logger.info(f"Bias: {bias:.6f}")
1709 biases[flux].append(bias)
1711 return biases, means, xcorrs