Coverage for python / lsst / cp / pipe / cpBfk.py: 11%
301 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-18 09:18 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-18 09:18 +0000
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
22"""Calculation of brighter-fatter effect correlations and kernels."""
24__all__ = ['BrighterFatterKernelSolveTask',
25 'BrighterFatterKernelSolveConfig']
27import numpy as np
29import lsst.afw.math as afwMath
30import lsst.pex.config as pexConfig
31import lsst.pipe.base as pipeBase
32import lsst.pipe.base.connectionTypes as cT
34from lsst.ip.isr import (BrighterFatterKernel)
35from .utils import (funcPolynomial, irlsFit, extractCalibDate)
36from .cpLinearitySolve import ptcLookup
39class BrighterFatterKernelSolveConnections(pipeBase.PipelineTaskConnections,
40 dimensions=("instrument", "detector")):
41 dummy = cT.Input(
42 name="raw",
43 doc="Dummy exposure.",
44 storageClass='Exposure',
45 dimensions=("instrument", "exposure", "detector"),
46 multiple=True,
47 deferLoad=True,
48 )
49 camera = cT.PrerequisiteInput(
50 name="camera",
51 doc="Camera associated with this data.",
52 storageClass="Camera",
53 dimensions=("instrument", ),
54 isCalibration=True,
55 )
56 inputPtc = cT.PrerequisiteInput(
57 name="ptc",
58 doc="Photon transfer curve dataset.",
59 storageClass="PhotonTransferCurveDataset",
60 dimensions=("instrument", "detector"),
61 isCalibration=True,
62 lookupFunction=ptcLookup,
63 )
64 inputBfkPtc = cT.Input(
65 name="bfkPtc",
66 doc="Input BFK PTC dataset.",
67 storageClass="PhotonTransferCurveDataset",
68 dimensions=("instrument", "detector"),
69 isCalibration=True,
70 )
72 outputBFK = cT.Output(
73 name="brighterFatterKernel",
74 doc="Output measured brighter-fatter kernel.",
75 storageClass="BrighterFatterKernel",
76 dimensions=("instrument", "detector"),
77 isCalibration=True,
78 )
80 def __init__(self, *, config=None):
81 if config.useBfkPtc:
82 del self.inputPtc
83 del self.dummy
84 else:
85 del self.inputBfkPtc
88class BrighterFatterKernelSolveConfig(pipeBase.PipelineTaskConfig,
89 pipelineConnections=BrighterFatterKernelSolveConnections):
90 level = pexConfig.ChoiceField(
91 doc="The level at which to calculate the brighter-fatter kernels",
92 dtype=str,
93 default="AMP",
94 allowed={
95 "AMP": "Every amplifier treated separately",
96 "DETECTOR": "One kernel per detector",
97 }
98 )
99 ignoreAmpsForAveraging = pexConfig.ListField(
100 dtype=str,
101 doc="List of amp names to ignore when averaging the amplifier kernels into the detector"
102 " kernel. Only relevant for level = DETECTOR",
103 default=[]
104 )
105 xcorrCheckRejectLevel = pexConfig.Field(
106 dtype=float,
107 doc="Rejection level for the sum of the input cross-correlations. Arrays which "
108 "sum to greater than this are discarded before the clipped mean is calculated.",
109 default=2.0
110 )
111 nSigmaClip = pexConfig.Field(
112 dtype=float,
113 doc="Number of sigma to clip when calculating means for the cross-correlation",
114 default=5
115 )
116 forceZeroSum = pexConfig.Field(
117 dtype=bool,
118 doc="Force the correlation matrix to have zero sum by adjusting the (0,0) value?"
119 "Defaults to true bsed on recommendation of Broughton et al. 2024.",
120 default=True,
121 )
122 useAmatrix = pexConfig.Field(
123 dtype=bool,
124 doc="Use the PTC 'a' matrix (Astier et al. 2019 equation 20) "
125 "instead of the average of measured covariances?",
126 default=False,
127 )
129 useCovModelSample = pexConfig.Field(
130 dtype=bool,
131 doc="Use the covariance matrix sampled from the full covariance model "
132 "(Astier et al. 2019 equation 20) instead of the average measured covariances?",
133 default=False,
134 )
136 covModelFluxSample = pexConfig.DictField(
137 keytype=str,
138 itemtype=float,
139 doc="Flux level in electrons at which to sample the full covariance"
140 "model if useCovModelSample=True. The same level is applied to all"
141 "amps if this parameter [`dict`] is passed as {'ALL_AMPS': value}",
142 default={'ALL_AMPS': 25000.0},
143 )
144 maxIterSuccessiveOverRelaxation = pexConfig.Field(
145 dtype=int,
146 doc="The maximum number of iterations allowed for the successive over-relaxation method",
147 default=10000
148 )
149 eLevelSuccessiveOverRelaxation = pexConfig.Field(
150 dtype=float,
151 doc="The target residual error for the successive over-relaxation method",
152 default=5.0e-14
153 )
154 correlationQuadraticFit = pexConfig.Field(
155 dtype=bool,
156 doc="Use a quadratic fit to find the correlations instead of simple averaging?",
157 default=False,
158 )
159 correlationModelRadius = pexConfig.Field(
160 dtype=int,
161 doc="Build a model of the correlation coefficients for radii larger than this value in pixels?",
162 default=100,
163 )
164 correlationModelSlope = pexConfig.Field(
165 dtype=float,
166 doc="Slope of the correlation model for radii larger than correlationModelRadius",
167 default=-1.35,
168 )
170 nSigmaTolForValidEdge = pexConfig.Field(
171 dtype=float,
172 doc="A valid kernel will have all pixels in a 3px picture frame (edges) "
173 "be within +/- nSigmaTolForValidEdge of zero.",
174 default=5.0,
175 )
176 fractionK00ForKernelMax = pexConfig.Field(
177 dtype=float,
178 doc="All kernel values must be less than value*abs(K_00) to be valid.",
179 default=0.025,
180 )
182 useBfkPtc = pexConfig.Field(
183 dtype=bool,
184 doc="Use a BFK ptc in a single pipeline?",
185 default=False,
186 )
188 doCheckValidity = pexConfig.Field(
189 dtype=bool,
190 doc="Check the AMP kernels for basic validity criteria? "
191 "Will set bfk.valid for each amp.",
192 default=True,
193 )
196class BrighterFatterKernelSolveTask(pipeBase.PipelineTask):
197 """Measure appropriate Brighter-Fatter Kernel from the PTC dataset.
198 """
200 ConfigClass = BrighterFatterKernelSolveConfig
201 _DefaultName = 'cpBfkMeasure'
203 def runQuantum(self, butlerQC, inputRefs, outputRefs):
204 """Ensure that the input and output dimensions are passed along.
206 Parameters
207 ----------
208 butlerQC : `lsst.daf.butler.QuantumContext`
209 Butler to operate on.
210 inputRefs : `lsst.pipe.base.InputQuantizedConnection`
211 Input data refs to load.
212 ouptutRefs : `lsst.pipe.base.OutputQuantizedConnection`
213 Output data refs to persist.
214 """
215 inputs = butlerQC.get(inputRefs)
217 # Use the dimensions to set calib/provenance information.
218 if self.config.useBfkPtc:
219 inputs["inputDims"] = dict(inputRefs.inputBfkPtc.dataId.required)
220 inputs["inputPtc"] = inputs["inputBfkPtc"]
221 inputs["dummy"] = []
222 del inputs["inputBfkPtc"]
223 else:
224 inputs["inputDims"] = dict(inputRefs.inputPtc.dataId.required)
226 # Add calibration provenance info to header.
227 kwargs = dict()
228 reference = getattr(inputRefs, "inputPtc", None)
230 if reference is not None and hasattr(reference, "run"):
231 runKey = "PTC_RUN"
232 runValue = reference.run
233 idKey = "PTC_UUID"
234 idValue = str(reference.id)
235 dateKey = "PTC_DATE"
236 calib = inputs.get("inputPtc", None)
237 dateValue = extractCalibDate(calib)
239 kwargs[runKey] = runValue
240 kwargs[idKey] = idValue
241 kwargs[dateKey] = dateValue
243 self.log.info("Using " + str(reference.run))
245 outputs = self.run(**inputs)
246 outputs.outputBFK.updateMetadata(setDate=False, **kwargs)
248 butlerQC.put(outputs, outputRefs)
250 def run(self, inputPtc, dummy, camera, inputDims):
251 """Combine covariance information from PTC into brighter-fatter
252 kernels.
254 Parameters
255 ----------
256 inputPtc : `lsst.ip.isr.PhotonTransferCurveDataset`
257 PTC data containing per-amplifier covariance measurements.
258 dummy : `lsst.afw.image.Exposure`
259 The exposure used to select the appropriate PTC dataset.
260 In almost all circumstances, one of the input exposures
261 used to generate the PTC dataset is the best option.
262 camera : `lsst.afw.cameraGeom.Camera`
263 Camera to use for camera geometry information.
264 inputDims : `lsst.daf.butler.DataCoordinate` or `dict`
265 DataIds to use to populate the output calibration.
267 Returns
268 -------
269 results : `lsst.pipe.base.Struct`
270 The resulst struct containing:
272 ``outputBfk``
273 Resulting Brighter-Fatter Kernel
274 (`lsst.ip.isr.BrighterFatterKernel`).
275 """
276 detector = camera[inputDims['detector']]
277 detName = detector.getName()
279 if self.config.level == 'DETECTOR':
280 detectorCorrList = list()
281 detectorFluxes = list()
283 if not inputPtc.ptcFitType == "FULLCOVARIANCE" and self.config.useCovModelSample:
284 raise ValueError("ptcFitType must be FULLCOVARIANCE if useCovModelSample=True.")
286 # Get flux sample dictionary
287 fluxSampleDict = {ampName: 0.0 for ampName in inputPtc.ampNames}
288 for ampName in inputPtc.ampNames:
289 if 'ALL_AMPS' in self.config.covModelFluxSample:
290 fluxSampleDict[ampName] = self.config.covModelFluxSample['ALL_AMPS']
291 elif ampName in self.config.covModelFluxSample:
292 fluxSampleDict[ampName] = self.config.covModelFluxSample[ampName]
294 bfk = BrighterFatterKernel(camera=camera, detectorId=detector.getId(), level=self.config.level)
295 bfk.rawMeans = inputPtc.rawMeans # ADU
296 bfk.rawVariances = inputPtc.rawVars # ADU^2
297 bfk.expIdMask = inputPtc.expIdMask
299 # Use the PTC covariances as the cross-correlations. These
300 # are scaled before the kernel is generated, which performs
301 # the conversion. The input covariances are in (x, y) index
302 # ordering, as is the aMatrix.
303 bfk.badAmps = inputPtc.badAmps
304 matrixSide = int(inputPtc.covMatrixSide)
305 if self.config.useAmatrix:
306 matrixSide = inputPtc.covMatrixSideFullCovFit
307 bfk.shape = (matrixSide*2 + 1, matrixSide*2 + 1)
308 # if useAmatrix is true and the fit side is smaller
309 # than the side used to calculate the covariances
310 # we will need to trim this.
311 for amp in detector:
312 ampName = amp.getName()
313 bfk.rawXcorrs[ampName] = inputPtc.covariances[ampName][:, :matrixSide, :matrixSide]
314 bfk.gain = inputPtc.gain
315 bfk.noise = inputPtc.noise
316 bfk.meanXcorrs = dict()
317 bfk.valid = dict()
318 bfk.updateMetadataFromExposures([inputPtc])
320 for amp in detector:
321 ampName = amp.getName()
322 gain = bfk.gain[ampName]
323 noiseMatrix = inputPtc.noiseMatrix[ampName]
324 mask = inputPtc.expIdMask[ampName]
325 if gain <= 0:
326 # We've received very bad data.
327 self.log.warning("Impossible gain recieved from PTC for %s: %f. Skipping bad amplifier.",
328 ampName, gain)
329 bfk.meanXcorrs[ampName] = np.zeros(bfk.shape)
330 bfk.ampKernels[ampName] = np.zeros(bfk.shape)
331 bfk.rawXcorrs[ampName] = np.zeros((len(mask), matrixSide, matrixSide))
332 bfk.valid[ampName] = False
333 continue
335 # Use inputPtc.expIdMask to get the means, variances, and
336 # covariances that were not masked after PTC. The
337 # covariances may now have the mask already applied.
338 fluxes = np.array(bfk.rawMeans[ampName])[mask]
339 variances = np.array(bfk.rawVariances[ampName])[mask]
340 covModelList = np.array(inputPtc.covariancesModel[ampName])
342 xCorrList = np.array([np.array(xcorr) for xcorr in bfk.rawXcorrs[ampName]])
343 if np.sum(mask) < len(xCorrList):
344 # Only apply the mask if needed.
345 xCorrList = xCorrList[mask]
347 fluxes = np.array([flux*gain for flux in fluxes]) # Now in e^-
348 variances = np.array([variance*gain*gain for variance in variances]) # Now in e^2-
350 # This should duplicate Coulton et al. 2017 Equation 22-29
351 # (arxiv:1711.06273)
352 scaledCorrList = list()
353 corrList = list()
354 truncatedFluxes = list()
355 for xcorrNum, (xcorr, flux, var) in enumerate(zip(xCorrList, fluxes, variances), 1):
356 q = np.array(xcorr) * gain * gain # xcorr now in e^-
357 q *= 2.0 # Remove factor of 1/2 applied in PTC.
358 self.log.info("Amp: %s %d/%d Flux: %f Var: %f Q(0,0): %g Q(1,0): %g Q(0,1): %g",
359 ampName, xcorrNum, len(xCorrList), flux, var, q[0][0], q[1][0], q[0][1])
361 # Normalize by the flux, which removes the (0,0)
362 # component attributable to Poisson noise. This
363 # contains the two "t I delta(x - x')" terms in
364 # Coulton et al. 2017 equation 29
365 q[0][0] -= 2.0*(flux)
367 if q[0][0] > 0.0:
368 self.log.warning("Amp: %s %d skipped due to value of (variance-mean)=%f",
369 ampName, xcorrNum, q[0][0])
370 # If we drop an element of ``scaledCorrList``
371 # (which is what this does), we need to ensure we
372 # drop the flux entry as well.
373 continue
375 # This removes the "t (I_a^2 + I_b^2)" factor in
376 # Coulton et al. 2017 equation 29.
377 # The quadratic fit option needs the correlations unscaled
378 q /= -2.0
379 unscaled = self._tileArray(q)
380 q /= flux**2
381 scaled = self._tileArray(q)
382 xcorrCheck = np.abs(np.sum(scaled))/np.sum(np.abs(scaled))
383 if (xcorrCheck > self.config.xcorrCheckRejectLevel) or not (np.isfinite(xcorrCheck)):
384 self.log.warning("Amp: %s %d skipped due to value of triangle-inequality sum %f",
385 ampName, xcorrNum, xcorrCheck)
386 continue
388 scaledCorrList.append(scaled)
389 corrList.append(unscaled)
390 truncatedFluxes.append(flux)
391 self.log.info("Amp: %s %d/%d Final: %g XcorrCheck: %f",
392 ampName, xcorrNum, len(xCorrList), q[0][0], xcorrCheck)
394 fluxes = np.array(truncatedFluxes)
396 if len(scaledCorrList) == 0:
397 self.log.warning("Amp: %s All inputs rejected for amp!", ampName)
398 bfk.meanXcorrs[ampName] = np.zeros(bfk.shape)
399 bfk.ampKernels[ampName] = np.zeros(bfk.shape)
400 bfk.valid[ampName] = False
401 continue
403 if self.config.useAmatrix:
404 # Use the aMatrix, ignoring the meanXcorr generated above.
405 preKernel = np.pad(self._tileArray(-1.0 * np.array(inputPtc.aMatrix[ampName])), ((1, 1)))
406 elif self.config.correlationQuadraticFit:
407 # Use a quadratic fit to the correlations as a
408 # function of flux.
409 preKernel = self.quadraticCorrelations(corrList, fluxes, f"Amp: {ampName}")
410 elif self.config.useCovModelSample:
411 # Sample the full covariance model at a given flux.
412 # Use the non-truncated fluxes for this
413 mu = bfk.rawMeans[ampName]
414 covTilde = self.sampleCovModel(mu, noiseMatrix, gain,
415 covModelList, fluxSampleDict[ampName],
416 f"Amp: {ampName}")
417 preKernel = np.pad(self._tileArray(-1.0 * covTilde), ((1, 1)))
418 else:
419 # Use a simple average of the measured correlations.
420 preKernel = self.averageCorrelations(scaledCorrList, f"Amp: {ampName}")
422 center = int((bfk.shape[0] - 1) / 2)
424 if self.config.forceZeroSum:
425 totalSum = np.sum(preKernel)
427 if self.config.correlationModelRadius < (preKernel.shape[0] - 1) / 2:
428 # Assume a correlation model of
429 # Corr(r) = -preFactor * r^(2 * slope)
430 preFactor = np.sqrt(preKernel[center, center + 1] * preKernel[center + 1, center])
431 slopeFactor = 2.0 * np.abs(self.config.correlationModelSlope)
432 totalSum += 2.0*np.pi*(preFactor / (slopeFactor*(center + 0.5))**slopeFactor)
434 preKernel[center, center] -= totalSum
435 self.log.info("%s Zero-Sum Scale: %g", ampName, totalSum)
437 finalSum = np.sum(preKernel)
438 bfk.meanXcorrs[ampName] = preKernel
440 postKernel = self.successiveOverRelax(preKernel)
441 bfk.ampKernels[ampName] = postKernel
442 if self.config.level == 'DETECTOR' and ampName not in self.config.ignoreAmpsForAveraging:
443 detectorCorrList.extend(scaledCorrList)
444 detectorFluxes.extend(fluxes)
446 # Check for validity
447 if self.config.doCheckValidity:
448 # We will check 3 boundary conditions to test the
449 # validitity of the kernel:
450 # (1) The edge of the kernel should be consistent with zero.
451 # (2) The kernel should be negative
452 # (3) The center of the kernal should be the absolute minimum.
454 # Since there are typically too few pixels along the edge, we
455 # will use the stddev of the 3px picture frame of the kernel to
456 # set the tolerances of the validity conditions.
457 kernelSize = bfk.ampKernels[ampName].shape[0]
458 kernelCenterValue = bfk.ampKernels[ampName][kernelSize//2, kernelSize//2]
459 xv, yv = np.meshgrid(range(kernelSize), range(kernelSize))
461 # Get a mask for a 3px picture frame
462 valid = True
463 if kernelSize >= 7:
464 # Last 1px
465 edge = (xv == 0)
466 edge += (xv == kernelSize - 1)
467 edge += (yv == 0)
468 edge += (yv == kernelSize - 1)
470 # Last 3 px
471 edges = (xv < 3)
472 edges += (xv > kernelSize - 3 - 1)
473 edges += (yv < 3)
474 edges += (yv > kernelSize - 3 - 1)
476 kernelEdge = bfk.ampKernels[ampName][edge].ravel()
477 kernelEdges = bfk.ampKernels[ampName][edges].ravel()
478 kernelEdgeStd = np.std(kernelEdges)
480 threshold = self.config.nSigmaTolForValidEdge*kernelEdgeStd
482 # Edge should converge to zero
483 valid = np.all(np.isclose(
484 kernelEdge, 0,
485 rtol=threshold,
486 ))
487 if not valid:
488 self.log.warning("%s: the kernel edges did not converge to 0 within "
489 "+/- %s sigma.", ampName, threshold)
491 # The center should be the minimum
492 valid *= np.all(bfk.ampKernels[ampName] >= kernelCenterValue)
493 if not np.all(bfk.ampKernels[ampName] >= kernelCenterValue):
494 self.log.warning("%s: the kernel center is not the absolute minimum.", ampName)
496 # Kernel should be negative
497 negativityThreshold = self.config.fractionK00ForKernelMax*np.abs(kernelCenterValue)
498 valid *= np.all(bfk.ampKernels[ampName] <= negativityThreshold)
499 if not np.all(bfk.ampKernels[ampName] <= negativityThreshold):
500 self.log.warning("%s: the kernel is not negative", ampName)
501 else:
502 raise RuntimeError("%s: The kernel is too small for validity check.", ampName)
504 bfk.valid[ampName] = valid
505 else:
506 # The kernel at this point will be valid
507 bfk.valid[ampName] = True
509 self.log.info("Amp: %s Sum: %g Center Info Pre: %g Post: %g",
510 ampName, finalSum, preKernel[center, center], postKernel[center, center])
512 # Assemble a detector kernel?
513 if self.config.level == 'DETECTOR':
514 if self.config.correlationQuadraticFit:
515 preKernel = self.quadraticCorrelations(detectorCorrList, detectorFluxes, f"Amp: {ampName}")
516 else:
517 preKernel = self.averageCorrelations(detectorCorrList, f"Det: {detName}")
518 finalSum = np.sum(preKernel)
519 center = int((bfk.shape[0] - 1) / 2)
521 postKernel = self.successiveOverRelax(preKernel)
522 bfk.detKernels[detName] = postKernel
523 self.log.info("Det: %s Sum: %g Center Info Pre: %g Post: %g",
524 detName, finalSum, preKernel[center, center], postKernel[center, center])
526 return pipeBase.Struct(
527 outputBFK=bfk,
528 )
530 def averageCorrelations(self, xCorrList, name):
531 """Average input correlations.
533 Parameters
534 ----------
535 xCorrList : `list` [`numpy.array`]
536 List of cross-correlations. These are expected to be
537 square arrays.
538 name : `str`
539 Name for log messages.
541 Returns
542 -------
543 meanXcorr : `numpy.array`, (N, N)
544 The averaged cross-correlation.
545 """
546 meanXcorr = np.zeros_like(xCorrList[0])
547 xCorrList = np.array(xCorrList)
549 sctrl = afwMath.StatisticsControl()
550 sctrl.setNumSigmaClip(self.config.nSigmaClip)
551 for i in range(np.shape(meanXcorr)[0]):
552 for j in range(np.shape(meanXcorr)[1]):
553 meanXcorr[i, j] = afwMath.makeStatistics(xCorrList[:, i, j],
554 afwMath.MEANCLIP, sctrl).getValue()
556 # To match previous definitions, pad by one element.
557 meanXcorr = np.pad(meanXcorr, ((1, 1)))
559 return meanXcorr
561 def quadraticCorrelations(self, xCorrList, fluxList, name):
562 """Measure a quadratic correlation model.
564 Parameters
565 ----------
566 xCorrList : `list` [`numpy.array`]
567 List of cross-correlations. These are expected to be
568 square arrays.
569 fluxList : `numpy.array`, (Nflux,)
570 Associated list of fluxes.
571 name : `str`
572 Name for log messages.
574 Returns
575 -------
576 meanXcorr : `numpy.array`, (N, N)
577 The averaged cross-correlation.
578 """
579 meanXcorr = np.zeros_like(xCorrList[0])
580 fluxList = np.square(fluxList)
581 xCorrList = np.array(xCorrList)
583 for i in range(np.shape(meanXcorr)[0]):
584 for j in range(np.shape(meanXcorr)[1]):
585 # Fit corrlation_i(x, y) = a0 + a1 * (flux_i)^2 We do
586 # not want to transpose, so use (i, j) without
587 # inversion.
588 linearFit, linearFitErr, chiSq, weights = irlsFit([0.0, 1e-4], fluxList,
589 xCorrList[:, i, j], funcPolynomial,
590 scaleResidual=False)
591 meanXcorr[i, j] = linearFit[1] # Discard the intercept.
592 self.log.info("Quad fit meanXcorr[%d,%d] = %g", i, j, linearFit[1])
594 # To match previous definitions, pad by one element.
595 meanXcorr = np.pad(meanXcorr, ((1, 1)))
597 return meanXcorr
599 def sampleCovModel(self, fluxes, noiseMatrix, gain, covModelList, flux, name):
600 """Sample the correlation model and measure
601 widetile{C}_{ij} from Broughton et al. 2023 (eq. 4)
603 Parameters
604 ----------
605 fluxes : `list` [`float`]
606 List of fluxes (in ADU)
607 noiseMatrix : `numpy.array`, (N, N)
608 Noise matrix
609 gain : `float`
610 Amplifier gain
611 covModelList : `numpy.array`, (N, N)
612 List of covariance model matrices. These are
613 expected to be square arrays.
614 flux : `float`
615 Flux in electrons at which to sample the
616 covariance model.
617 name : `str`
618 Name for log messages.
620 Returns
621 -------
622 covTilde : `numpy.array`, (N, N)
623 The calculated C-tilde from Broughton et al. 2023 (eq. 4).
624 """
626 # Get the index of the flux sample
627 # (this must be done in electron units)
628 ix = np.argmin((fluxes*gain - flux)**2)
629 assert len(fluxes) == len(covModelList)
631 # Find the nearest measured flux level
632 # and the full covariance model at that point
633 nearestFlux = fluxes[ix]
634 covModelSample = covModelList[ix]
636 # Calculate flux sample
637 # covTilde returned in ADU units
638 covTilde = (covModelSample - noiseMatrix/gain**2)/(nearestFlux**2)
639 covTilde[0][0] -= (nearestFlux/gain)/(nearestFlux**2)
641 return covTilde
643 @staticmethod
644 def _tileArray(in_array):
645 """Given an input quarter-image, tile/mirror it and return full image.
647 Given a square input of side-length n, of the form
649 input = array([[1, 2, 3],
650 [4, 5, 6],
651 [7, 8, 9]])
653 return an array of size 2n-1 as
655 output = array([[ 9, 8, 7, 8, 9],
656 [ 6, 5, 4, 5, 6],
657 [ 3, 2, 1, 2, 3],
658 [ 6, 5, 4, 5, 6],
659 [ 9, 8, 7, 8, 9]])
661 Parameters
662 ----------
663 input : `np.array`, (N, N)
664 The square input quarter-array
666 Returns
667 -------
668 output : `np.array`, (2*N + 1, 2*N + 1)
669 The full, tiled array
670 """
671 assert in_array.shape[0] == in_array.shape[1]
672 length = in_array.shape[0] - 1
673 output = np.zeros((2*length + 1, 2*length + 1))
675 for i in range(length + 1):
676 for j in range(length + 1):
677 output[i + length, j + length] = in_array[i, j]
678 output[-i + length, j + length] = in_array[i, j]
679 output[i + length, -j + length] = in_array[i, j]
680 output[-i + length, -j + length] = in_array[i, j]
681 return output
683 def successiveOverRelax(self, source, maxIter=None, eLevel=None):
684 """An implementation of the successive over relaxation (SOR) method.
686 A numerical method for solving a system of linear equations
687 with faster convergence than the Gauss-Seidel method.
689 Parameters
690 ----------
691 source : `numpy.ndarray`, (N, N)
692 The input array.
693 maxIter : `int`, optional
694 Maximum number of iterations to attempt before aborting.
695 eLevel : `float`, optional
696 The target error level at which we deem convergence to have
697 occurred.
699 Returns
700 -------
701 output : `numpy.ndarray`, (N, N)
702 The solution.
703 """
704 if not maxIter:
705 maxIter = self.config.maxIterSuccessiveOverRelaxation
706 if not eLevel:
707 eLevel = self.config.eLevelSuccessiveOverRelaxation
709 assert source.shape[0] == source.shape[1], "Input array must be square"
710 # initialize, and set boundary conditions
711 func = np.zeros([source.shape[0] + 2, source.shape[1] + 2])
712 resid = np.zeros([source.shape[0] + 2, source.shape[1] + 2])
713 rhoSpe = np.cos(np.pi/source.shape[0]) # Here a square grid is assumed
715 # Calculate the initial error
716 for i in range(1, func.shape[0] - 1):
717 for j in range(1, func.shape[1] - 1):
718 resid[i, j] = (func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
719 + func[i + 1, j] - 4*func[i, j] - source[i - 1, j - 1])
720 inError = np.sum(np.abs(resid))
722 # Iterate until convergence
723 # We perform two sweeps per cycle,
724 # updating 'odd' and 'even' points separately
725 nIter = 0
726 omega = 1.0
727 dx = 1.0
728 while nIter < maxIter*2:
729 outError = 0
730 if nIter%2 == 0:
731 for i in range(1, func.shape[0] - 1, 2):
732 for j in range(1, func.shape[1] - 1, 2):
733 resid[i, j] = float(func[i, j-1] + func[i, j + 1] + func[i - 1, j]
734 + func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
735 func[i, j] += omega*resid[i, j]*.25
736 for i in range(2, func.shape[0] - 1, 2):
737 for j in range(2, func.shape[1] - 1, 2):
738 resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
739 + func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
740 func[i, j] += omega*resid[i, j]*.25
741 else:
742 for i in range(1, func.shape[0] - 1, 2):
743 for j in range(2, func.shape[1] - 1, 2):
744 resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
745 + func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
746 func[i, j] += omega*resid[i, j]*.25
747 for i in range(2, func.shape[0] - 1, 2):
748 for j in range(1, func.shape[1] - 1, 2):
749 resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
750 + func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
751 func[i, j] += omega*resid[i, j]*.25
752 outError = np.sum(np.abs(resid))
753 if outError < inError*eLevel:
754 break
755 if nIter == 0:
756 omega = 1.0/(1 - rhoSpe*rhoSpe/2.0)
757 else:
758 omega = 1.0/(1 - rhoSpe*rhoSpe*omega/4.0)
759 nIter += 1
761 if nIter >= maxIter*2:
762 self.log.warning("Failure: SuccessiveOverRelaxation did not converge in %s iterations."
763 "\noutError: %s, inError: %s,", nIter//2, outError, inError*eLevel)
764 else:
765 self.log.info("Success: SuccessiveOverRelaxation converged in %s iterations."
766 "\noutError: %s, inError: %s", nIter//2, outError, inError*eLevel)
767 return func[1: -1, 1: -1]