Coverage for python/lsst/cp/pipe/makeBrighterFatterKernel.py: 12%
253 statements
« prev ^ index » next coverage.py v7.4.1, created at 2024-02-18 10:38 +0000
« prev ^ index » next coverage.py v7.4.1, created at 2024-02-18 10:38 +0000
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
22"""Calculation of brighter-fatter effect correlations and kernels."""
24__all__ = ['BrighterFatterKernelSolveTask',
25 'BrighterFatterKernelSolveConfig']
27import numpy as np
29import lsst.afw.math as afwMath
30import lsst.pex.config as pexConfig
31import lsst.pipe.base as pipeBase
32import lsst.pipe.base.connectionTypes as cT
34from lsst.ip.isr import (BrighterFatterKernel)
35from .utils import (funcPolynomial, irlsFit, extractCalibDate)
38class BrighterFatterKernelSolveConnections(pipeBase.PipelineTaskConnections,
39 dimensions=("instrument", "exposure", "detector")):
40 dummy = cT.Input(
41 name="raw",
42 doc="Dummy exposure.",
43 storageClass='Exposure',
44 dimensions=("instrument", "exposure", "detector"),
45 multiple=True,
46 deferLoad=True,
47 )
48 camera = cT.PrerequisiteInput(
49 name="camera",
50 doc="Camera associated with this data.",
51 storageClass="Camera",
52 dimensions=("instrument", ),
53 isCalibration=True,
54 )
55 inputPtc = cT.PrerequisiteInput(
56 name="ptc",
57 doc="Photon transfer curve dataset.",
58 storageClass="PhotonTransferCurveDataset",
59 dimensions=("instrument", "detector"),
60 isCalibration=True,
61 )
63 outputBFK = cT.Output(
64 name="brighterFatterKernel",
65 doc="Output measured brighter-fatter kernel.",
66 storageClass="BrighterFatterKernel",
67 dimensions=("instrument", "detector"),
68 isCalibration=True,
69 )
72class BrighterFatterKernelSolveConfig(pipeBase.PipelineTaskConfig,
73 pipelineConnections=BrighterFatterKernelSolveConnections):
74 level = pexConfig.ChoiceField(
75 doc="The level at which to calculate the brighter-fatter kernels",
76 dtype=str,
77 default="AMP",
78 allowed={
79 "AMP": "Every amplifier treated separately",
80 "DETECTOR": "One kernel per detector",
81 }
82 )
83 ignoreAmpsForAveraging = pexConfig.ListField(
84 dtype=str,
85 doc="List of amp names to ignore when averaging the amplifier kernels into the detector"
86 " kernel. Only relevant for level = DETECTOR",
87 default=[]
88 )
89 xcorrCheckRejectLevel = pexConfig.Field(
90 dtype=float,
91 doc="Rejection level for the sum of the input cross-correlations. Arrays which "
92 "sum to greater than this are discarded before the clipped mean is calculated.",
93 default=2.0
94 )
95 nSigmaClip = pexConfig.Field(
96 dtype=float,
97 doc="Number of sigma to clip when calculating means for the cross-correlation",
98 default=5
99 )
100 forceZeroSum = pexConfig.Field(
101 dtype=bool,
102 doc="Force the correlation matrix to have zero sum by adjusting the (0,0) value?",
103 default=False,
104 )
105 useAmatrix = pexConfig.Field(
106 dtype=bool,
107 doc="Use the PTC 'a' matrix (Astier et al. 2019 equation 20) "
108 "instead of the average of measured covariances?",
109 default=False,
110 )
112 useCovModelSample = pexConfig.Field(
113 dtype=bool,
114 doc="Use the covariance matrix sampled from the full covariance model "
115 "(Astier et al. 2019 equation 20) instead of the average measured covariances?",
116 default=False,
117 )
119 covModelFluxSample = pexConfig.DictField(
120 keytype=str,
121 itemtype=float,
122 doc="Flux level in electrons at which to sample the full covariance"
123 "model if useCovModelSample=True. The same level is applied to all"
124 "amps if this parameter [`dict`] is passed as {'ALL_AMPS': value}",
125 default={'ALL_AMPS': 25000.0},
126 )
127 maxIterSuccessiveOverRelaxation = pexConfig.Field(
128 dtype=int,
129 doc="The maximum number of iterations allowed for the successive over-relaxation method",
130 default=10000
131 )
132 eLevelSuccessiveOverRelaxation = pexConfig.Field(
133 dtype=float,
134 doc="The target residual error for the successive over-relaxation method",
135 default=5.0e-14
136 )
137 correlationQuadraticFit = pexConfig.Field(
138 dtype=bool,
139 doc="Use a quadratic fit to find the correlations instead of simple averaging?",
140 default=False,
141 )
142 correlationModelRadius = pexConfig.Field(
143 dtype=int,
144 doc="Build a model of the correlation coefficients for radii larger than this value in pixels?",
145 default=100,
146 )
147 correlationModelSlope = pexConfig.Field(
148 dtype=float,
149 doc="Slope of the correlation model for radii larger than correlationModelRadius",
150 default=-1.35,
151 )
154class BrighterFatterKernelSolveTask(pipeBase.PipelineTask):
155 """Measure appropriate Brighter-Fatter Kernel from the PTC dataset.
156 """
158 ConfigClass = BrighterFatterKernelSolveConfig
159 _DefaultName = 'cpBfkMeasure'
161 def runQuantum(self, butlerQC, inputRefs, outputRefs):
162 """Ensure that the input and output dimensions are passed along.
164 Parameters
165 ----------
166 butlerQC : `lsst.daf.butler.QuantumContext`
167 Butler to operate on.
168 inputRefs : `lsst.pipe.base.InputQuantizedConnection`
169 Input data refs to load.
170 ouptutRefs : `lsst.pipe.base.OutputQuantizedConnection`
171 Output data refs to persist.
172 """
173 inputs = butlerQC.get(inputRefs)
175 # Use the dimensions to set calib/provenance information.
176 inputs['inputDims'] = dict(inputRefs.inputPtc.dataId.required)
178 # Add calibration provenance info to header.
179 kwargs = dict()
180 reference = getattr(inputRefs, "inputPtc", None)
182 if reference is not None and hasattr(reference, "run"):
183 runKey = "PTC_RUN"
184 runValue = reference.run
185 idKey = "PTC_UUID"
186 idValue = str(reference.id)
187 dateKey = "PTC_DATE"
188 calib = inputs.get("inputPtc", None)
189 dateValue = extractCalibDate(calib)
191 kwargs[runKey] = runValue
192 kwargs[idKey] = idValue
193 kwargs[dateKey] = dateValue
195 self.log.info("Using " + str(reference.run))
197 outputs = self.run(**inputs)
198 outputs.outputBFK.updateMetadata(setDate=False, **kwargs)
200 butlerQC.put(outputs, outputRefs)
202 def run(self, inputPtc, dummy, camera, inputDims):
203 """Combine covariance information from PTC into brighter-fatter
204 kernels.
206 Parameters
207 ----------
208 inputPtc : `lsst.ip.isr.PhotonTransferCurveDataset`
209 PTC data containing per-amplifier covariance measurements.
210 dummy : `lsst.afw.image.Exposure`
211 The exposure used to select the appropriate PTC dataset.
212 In almost all circumstances, one of the input exposures
213 used to generate the PTC dataset is the best option.
214 camera : `lsst.afw.cameraGeom.Camera`
215 Camera to use for camera geometry information.
216 inputDims : `lsst.daf.butler.DataCoordinate` or `dict`
217 DataIds to use to populate the output calibration.
219 Returns
220 -------
221 results : `lsst.pipe.base.Struct`
222 The resulst struct containing:
224 ``outputBfk``
225 Resulting Brighter-Fatter Kernel
226 (`lsst.ip.isr.BrighterFatterKernel`).
227 """
228 if len(dummy) == 0:
229 self.log.warning("No dummy exposure found.")
231 detector = camera[inputDims['detector']]
232 detName = detector.getName()
234 if self.config.level == 'DETECTOR':
235 detectorCorrList = list()
236 detectorFluxes = list()
238 if not inputPtc.ptcFitType == "FULLCOVARIANCE" and self.config.useCovModelSample:
239 raise ValueError("ptcFitType must be FULLCOVARIANCE if useCovModelSample=True.")
241 # Get flux sample dictionary
242 fluxSampleDict = {ampName: 0.0 for ampName in inputPtc.ampNames}
243 for ampName in inputPtc.ampNames:
244 if 'ALL_AMPS' in self.config.covModelFluxSample:
245 fluxSampleDict[ampName] = self.config.covModelFluxSample['ALL_AMPS']
246 elif ampName in self.config.covModelFluxSample:
247 fluxSampleDict[ampName] = self.config.covModelFluxSample[ampName]
249 bfk = BrighterFatterKernel(camera=camera, detectorId=detector.getId(), level=self.config.level)
250 bfk.rawMeans = inputPtc.rawMeans # ADU
251 bfk.rawVariances = inputPtc.rawVars # ADU^2
252 bfk.expIdMask = inputPtc.expIdMask
254 # Use the PTC covariances as the cross-correlations. These
255 # are scaled before the kernel is generated, which performs
256 # the conversion. The input covariances are in (x, y) index
257 # ordering, as is the aMatrix.
258 bfk.rawXcorrs = inputPtc.covariances # ADU^2
259 bfk.badAmps = inputPtc.badAmps
260 bfk.shape = (inputPtc.covMatrixSide*2 + 1, inputPtc.covMatrixSide*2 + 1)
261 bfk.gain = inputPtc.gain
262 bfk.noise = inputPtc.noise
263 bfk.meanXcorrs = dict()
264 bfk.valid = dict()
265 bfk.updateMetadataFromExposures([inputPtc])
267 for amp in detector:
268 ampName = amp.getName()
269 gain = bfk.gain[ampName]
270 noiseMatrix = inputPtc.noiseMatrix[ampName]
271 mask = inputPtc.expIdMask[ampName]
272 if gain <= 0:
273 # We've received very bad data.
274 self.log.warning("Impossible gain recieved from PTC for %s: %f. Skipping bad amplifier.",
275 ampName, gain)
276 bfk.meanXcorrs[ampName] = np.zeros(bfk.shape)
277 bfk.ampKernels[ampName] = np.zeros(bfk.shape)
278 bfk.rawXcorrs[ampName] = np.zeros((len(mask), inputPtc.covMatrixSide, inputPtc.covMatrixSide))
279 bfk.valid[ampName] = False
280 continue
282 # Use inputPtc.expIdMask to get the means, variances, and
283 # covariances that were not masked after PTC. The
284 # covariances may now have the mask already applied.
285 fluxes = np.array(bfk.rawMeans[ampName])[mask]
286 variances = np.array(bfk.rawVariances[ampName])[mask]
287 covModelList = np.array(inputPtc.covariancesModel[ampName])
289 xCorrList = np.array([np.array(xcorr) for xcorr in bfk.rawXcorrs[ampName]])
290 if np.sum(mask) < len(xCorrList):
291 # Only apply the mask if needed.
292 xCorrList = xCorrList[mask]
294 fluxes = np.array([flux*gain for flux in fluxes]) # Now in e^-
295 variances = np.array([variance*gain*gain for variance in variances]) # Now in e^2-
297 # This should duplicate Coulton et al. 2017 Equation 22-29
298 # (arxiv:1711.06273)
299 scaledCorrList = list()
300 corrList = list()
301 truncatedFluxes = list()
302 for xcorrNum, (xcorr, flux, var) in enumerate(zip(xCorrList, fluxes, variances), 1):
303 q = np.array(xcorr) * gain * gain # xcorr now in e^-
304 q *= 2.0 # Remove factor of 1/2 applied in PTC.
305 self.log.info("Amp: %s %d/%d Flux: %f Var: %f Q(0,0): %g Q(1,0): %g Q(0,1): %g",
306 ampName, xcorrNum, len(xCorrList), flux, var, q[0][0], q[1][0], q[0][1])
308 # Normalize by the flux, which removes the (0,0)
309 # component attributable to Poisson noise. This
310 # contains the two "t I delta(x - x')" terms in
311 # Coulton et al. 2017 equation 29
312 q[0][0] -= 2.0*(flux)
314 if q[0][0] > 0.0:
315 self.log.warning("Amp: %s %d skipped due to value of (variance-mean)=%f",
316 ampName, xcorrNum, q[0][0])
317 # If we drop an element of ``scaledCorrList``
318 # (which is what this does), we need to ensure we
319 # drop the flux entry as well.
320 continue
322 # This removes the "t (I_a^2 + I_b^2)" factor in
323 # Coulton et al. 2017 equation 29.
324 # The quadratic fit option needs the correlations unscaled
325 q /= -2.0
326 unscaled = self._tileArray(q)
327 q /= flux**2
328 scaled = self._tileArray(q)
329 xcorrCheck = np.abs(np.sum(scaled))/np.sum(np.abs(scaled))
330 if (xcorrCheck > self.config.xcorrCheckRejectLevel) or not (np.isfinite(xcorrCheck)):
331 self.log.warning("Amp: %s %d skipped due to value of triangle-inequality sum %f",
332 ampName, xcorrNum, xcorrCheck)
333 continue
335 scaledCorrList.append(scaled)
336 corrList.append(unscaled)
337 truncatedFluxes.append(flux)
338 self.log.info("Amp: %s %d/%d Final: %g XcorrCheck: %f",
339 ampName, xcorrNum, len(xCorrList), q[0][0], xcorrCheck)
341 fluxes = np.array(truncatedFluxes)
343 if len(scaledCorrList) == 0:
344 self.log.warning("Amp: %s All inputs rejected for amp!", ampName)
345 bfk.meanXcorrs[ampName] = np.zeros(bfk.shape)
346 bfk.ampKernels[ampName] = np.zeros(bfk.shape)
347 bfk.valid[ampName] = False
348 continue
350 if self.config.useAmatrix:
351 # Use the aMatrix, ignoring the meanXcorr generated above.
352 preKernel = np.pad(self._tileArray(-1.0 * np.array(inputPtc.aMatrix[ampName])), ((1, 1)))
353 elif self.config.correlationQuadraticFit:
354 # Use a quadratic fit to the correlations as a
355 # function of flux.
356 preKernel = self.quadraticCorrelations(corrList, fluxes, f"Amp: {ampName}")
357 elif self.config.useCovModelSample:
358 # Sample the full covariance model at a given flux.
359 # Use the non-truncated fluxes for this
360 mu = bfk.rawMeans[ampName]
361 covTilde = self.sampleCovModel(mu, noiseMatrix, gain,
362 covModelList, fluxSampleDict[ampName],
363 f"Amp: {ampName}")
364 preKernel = np.pad(self._tileArray(-1.0 * covTilde), ((1, 1)))
365 else:
366 # Use a simple average of the measured correlations.
367 preKernel = self.averageCorrelations(scaledCorrList, f"Amp: {ampName}")
369 center = int((bfk.shape[0] - 1) / 2)
371 if self.config.forceZeroSum:
372 totalSum = np.sum(preKernel)
374 if self.config.correlationModelRadius < (preKernel.shape[0] - 1) / 2:
375 # Assume a correlation model of
376 # Corr(r) = -preFactor * r^(2 * slope)
377 preFactor = np.sqrt(preKernel[center, center + 1] * preKernel[center + 1, center])
378 slopeFactor = 2.0 * np.abs(self.config.correlationModelSlope)
379 totalSum += 2.0*np.pi*(preFactor / (slopeFactor*(center + 0.5))**slopeFactor)
381 preKernel[center, center] -= totalSum
382 self.log.info("%s Zero-Sum Scale: %g", ampName, totalSum)
384 finalSum = np.sum(preKernel)
385 bfk.meanXcorrs[ampName] = preKernel
387 postKernel = self.successiveOverRelax(preKernel)
388 bfk.ampKernels[ampName] = postKernel
389 if self.config.level == 'DETECTOR' and ampName not in self.config.ignoreAmpsForAveraging:
390 detectorCorrList.extend(scaledCorrList)
391 detectorFluxes.extend(fluxes)
392 bfk.valid[ampName] = True
393 self.log.info("Amp: %s Sum: %g Center Info Pre: %g Post: %g",
394 ampName, finalSum, preKernel[center, center], postKernel[center, center])
396 # Assemble a detector kernel?
397 if self.config.level == 'DETECTOR':
398 if self.config.correlationQuadraticFit:
399 preKernel = self.quadraticCorrelations(detectorCorrList, detectorFluxes, f"Amp: {ampName}")
400 else:
401 preKernel = self.averageCorrelations(detectorCorrList, f"Det: {detName}")
402 finalSum = np.sum(preKernel)
403 center = int((bfk.shape[0] - 1) / 2)
405 postKernel = self.successiveOverRelax(preKernel)
406 bfk.detKernels[detName] = postKernel
407 self.log.info("Det: %s Sum: %g Center Info Pre: %g Post: %g",
408 detName, finalSum, preKernel[center, center], postKernel[center, center])
410 return pipeBase.Struct(
411 outputBFK=bfk,
412 )
414 def averageCorrelations(self, xCorrList, name):
415 """Average input correlations.
417 Parameters
418 ----------
419 xCorrList : `list` [`numpy.array`]
420 List of cross-correlations. These are expected to be
421 square arrays.
422 name : `str`
423 Name for log messages.
425 Returns
426 -------
427 meanXcorr : `numpy.array`, (N, N)
428 The averaged cross-correlation.
429 """
430 meanXcorr = np.zeros_like(xCorrList[0])
431 xCorrList = np.array(xCorrList)
433 sctrl = afwMath.StatisticsControl()
434 sctrl.setNumSigmaClip(self.config.nSigmaClip)
435 for i in range(np.shape(meanXcorr)[0]):
436 for j in range(np.shape(meanXcorr)[1]):
437 meanXcorr[i, j] = afwMath.makeStatistics(xCorrList[:, i, j],
438 afwMath.MEANCLIP, sctrl).getValue()
440 # To match previous definitions, pad by one element.
441 meanXcorr = np.pad(meanXcorr, ((1, 1)))
443 return meanXcorr
445 def quadraticCorrelations(self, xCorrList, fluxList, name):
446 """Measure a quadratic correlation model.
448 Parameters
449 ----------
450 xCorrList : `list` [`numpy.array`]
451 List of cross-correlations. These are expected to be
452 square arrays.
453 fluxList : `numpy.array`, (Nflux,)
454 Associated list of fluxes.
455 name : `str`
456 Name for log messages.
458 Returns
459 -------
460 meanXcorr : `numpy.array`, (N, N)
461 The averaged cross-correlation.
462 """
463 meanXcorr = np.zeros_like(xCorrList[0])
464 fluxList = np.square(fluxList)
465 xCorrList = np.array(xCorrList)
467 for i in range(np.shape(meanXcorr)[0]):
468 for j in range(np.shape(meanXcorr)[1]):
469 # Fit corrlation_i(x, y) = a0 + a1 * (flux_i)^2 We do
470 # not want to transpose, so use (i, j) without
471 # inversion.
472 linearFit, linearFitErr, chiSq, weights = irlsFit([0.0, 1e-4], fluxList,
473 xCorrList[:, i, j], funcPolynomial,
474 scaleResidual=False)
475 meanXcorr[i, j] = linearFit[1] # Discard the intercept.
476 self.log.info("Quad fit meanXcorr[%d,%d] = %g", i, j, linearFit[1])
478 # To match previous definitions, pad by one element.
479 meanXcorr = np.pad(meanXcorr, ((1, 1)))
481 return meanXcorr
483 def sampleCovModel(self, fluxes, noiseMatrix, gain, covModelList, flux, name):
484 """Sample the correlation model and measure
485 widetile{C}_{ij} from Broughton et al. 2023 (eq. 4)
487 Parameters
488 ----------
489 fluxes : `list` [`float`]
490 List of fluxes (in ADU)
491 noiseMatrix : `numpy.array`, (N, N)
492 Noise matrix
493 gain : `float`
494 Amplifier gain
495 covModelList : `numpy.array`, (N, N)
496 List of covariance model matrices. These are
497 expected to be square arrays.
498 flux : `float`
499 Flux in electrons at which to sample the
500 covariance model.
501 name : `str`
502 Name for log messages.
504 Returns
505 -------
506 covTilde : `numpy.array`, (N, N)
507 The calculated C-tilde from Broughton et al. 2023 (eq. 4).
508 """
510 # Get the index of the flux sample
511 # (this must be done in electron units)
512 ix = np.argmin((fluxes*gain - flux)**2)
513 assert len(fluxes) == len(covModelList)
515 # Find the nearest measured flux level
516 # and the full covariance model at that point
517 nearestFlux = fluxes[ix]
518 covModelSample = covModelList[ix]
520 # Calculate flux sample
521 # covTilde returned in ADU units
522 covTilde = (covModelSample - noiseMatrix/gain**2)/(nearestFlux**2)
523 covTilde[0][0] -= (nearestFlux/gain)/(nearestFlux**2)
525 return covTilde
527 @staticmethod
528 def _tileArray(in_array):
529 """Given an input quarter-image, tile/mirror it and return full image.
531 Given a square input of side-length n, of the form
533 input = array([[1, 2, 3],
534 [4, 5, 6],
535 [7, 8, 9]])
537 return an array of size 2n-1 as
539 output = array([[ 9, 8, 7, 8, 9],
540 [ 6, 5, 4, 5, 6],
541 [ 3, 2, 1, 2, 3],
542 [ 6, 5, 4, 5, 6],
543 [ 9, 8, 7, 8, 9]])
545 Parameters
546 ----------
547 input : `np.array`, (N, N)
548 The square input quarter-array
550 Returns
551 -------
552 output : `np.array`, (2*N + 1, 2*N + 1)
553 The full, tiled array
554 """
555 assert in_array.shape[0] == in_array.shape[1]
556 length = in_array.shape[0] - 1
557 output = np.zeros((2*length + 1, 2*length + 1))
559 for i in range(length + 1):
560 for j in range(length + 1):
561 output[i + length, j + length] = in_array[i, j]
562 output[-i + length, j + length] = in_array[i, j]
563 output[i + length, -j + length] = in_array[i, j]
564 output[-i + length, -j + length] = in_array[i, j]
565 return output
567 def successiveOverRelax(self, source, maxIter=None, eLevel=None):
568 """An implementation of the successive over relaxation (SOR) method.
570 A numerical method for solving a system of linear equations
571 with faster convergence than the Gauss-Seidel method.
573 Parameters
574 ----------
575 source : `numpy.ndarray`, (N, N)
576 The input array.
577 maxIter : `int`, optional
578 Maximum number of iterations to attempt before aborting.
579 eLevel : `float`, optional
580 The target error level at which we deem convergence to have
581 occurred.
583 Returns
584 -------
585 output : `numpy.ndarray`, (N, N)
586 The solution.
587 """
588 if not maxIter:
589 maxIter = self.config.maxIterSuccessiveOverRelaxation
590 if not eLevel:
591 eLevel = self.config.eLevelSuccessiveOverRelaxation
593 assert source.shape[0] == source.shape[1], "Input array must be square"
594 # initialize, and set boundary conditions
595 func = np.zeros([source.shape[0] + 2, source.shape[1] + 2])
596 resid = np.zeros([source.shape[0] + 2, source.shape[1] + 2])
597 rhoSpe = np.cos(np.pi/source.shape[0]) # Here a square grid is assumed
599 # Calculate the initial error
600 for i in range(1, func.shape[0] - 1):
601 for j in range(1, func.shape[1] - 1):
602 resid[i, j] = (func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
603 + func[i + 1, j] - 4*func[i, j] - source[i - 1, j - 1])
604 inError = np.sum(np.abs(resid))
606 # Iterate until convergence
607 # We perform two sweeps per cycle,
608 # updating 'odd' and 'even' points separately
609 nIter = 0
610 omega = 1.0
611 dx = 1.0
612 while nIter < maxIter*2:
613 outError = 0
614 if nIter%2 == 0:
615 for i in range(1, func.shape[0] - 1, 2):
616 for j in range(1, func.shape[1] - 1, 2):
617 resid[i, j] = float(func[i, j-1] + func[i, j + 1] + func[i - 1, j]
618 + func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
619 func[i, j] += omega*resid[i, j]*.25
620 for i in range(2, func.shape[0] - 1, 2):
621 for j in range(2, func.shape[1] - 1, 2):
622 resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
623 + func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
624 func[i, j] += omega*resid[i, j]*.25
625 else:
626 for i in range(1, func.shape[0] - 1, 2):
627 for j in range(2, func.shape[1] - 1, 2):
628 resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
629 + func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
630 func[i, j] += omega*resid[i, j]*.25
631 for i in range(2, func.shape[0] - 1, 2):
632 for j in range(1, func.shape[1] - 1, 2):
633 resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
634 + func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
635 func[i, j] += omega*resid[i, j]*.25
636 outError = np.sum(np.abs(resid))
637 if outError < inError*eLevel:
638 break
639 if nIter == 0:
640 omega = 1.0/(1 - rhoSpe*rhoSpe/2.0)
641 else:
642 omega = 1.0/(1 - rhoSpe*rhoSpe*omega/4.0)
643 nIter += 1
645 if nIter >= maxIter*2:
646 self.log.warning("Failure: SuccessiveOverRelaxation did not converge in %s iterations."
647 "\noutError: %s, inError: %s,", nIter//2, outError, inError*eLevel)
648 else:
649 self.log.info("Success: SuccessiveOverRelaxation converged in %s iterations."
650 "\noutError: %s, inError: %s", nIter//2, outError, inError*eLevel)
651 return func[1: -1, 1: -1]