22 from collections
import defaultdict
32 from ._lookupStaticCalibration
import lookupStaticCalibration
34 __all__ = [
"CpFlatMeasureTask",
"CpFlatMeasureTaskConfig",
35 "CpFlatNormalizationTask",
"CpFlatNormalizationTaskConfig"]
39 dimensions=(
"instrument",
"exposure",
"detector")):
42 doc=
"Input exposure to measure statistics from.",
43 storageClass=
"Exposure",
44 dimensions=(
"instrument",
"exposure",
"detector"),
46 outputStats = cT.Output(
48 doc=
"Output statistics to write.",
49 storageClass=
"PropertyList",
50 dimensions=(
"instrument",
"exposure",
"detector"),
55 pipelineConnections=CpFlatMeasureConnections):
56 maskNameList = pexConfig.ListField(
58 doc=
"Mask list to exclude from statistics calculations.",
59 default=[
'DETECTED',
'BAD',
'NO_DATA'],
61 doVignette = pexConfig.Field(
63 doc=
"Mask vignetted regions?",
66 numSigmaClip = pexConfig.Field(
68 doc=
"Rejection threshold (sigma) for statistics clipping.",
71 clipMaxIter = pexConfig.Field(
73 doc=
"Max number of clipping iterations to apply.",
79 pipeBase.CmdLineTask):
80 """Apply extra masking and measure image statistics.
82 ConfigClass = CpFlatMeasureTaskConfig
83 _DefaultName =
"cpFlatMeasure"
85 def run(self, inputExp):
86 """Mask ISR processed FLAT exposures to ensure consistent statistics.
90 inputExp : `lsst.afw.image.Exposure`
91 Post-ISR processed exposure to measure.
95 outputStats : `lsst.daf.base.PropertyList`
96 List containing the statistics.
98 if self.config.doVignette:
100 doSetValue=
False, log=self.log)
101 mask = inputExp.getMask()
102 maskVal = mask.getPlaneBitMask(self.config.maskNameList)
103 statsControl = afwMath.StatisticsControl(self.config.numSigmaClip,
104 self.config.clipMaxIter,
106 statsControl.setAndMask(maskVal)
108 outputStats = dafBase.PropertyList()
111 stats = afwMath.makeStatistics(inputExp.getMaskedImage(),
112 afwMath.MEANCLIP | afwMath.STDEVCLIP | afwMath.NPOINT,
114 outputStats[
'DETECTOR_MEDIAN'] = stats.getValue(afwMath.MEANCLIP)
115 outputStats[
'DETECTOR_SIGMA'] = stats.getValue(afwMath.STDEVCLIP)
116 outputStats[
'DETECTOR_N'] = stats.getValue(afwMath.NPOINT)
117 self.log.info(
"Stats: median=%f sigma=%f n=%d",
118 outputStats[
'DETECTOR_MEDIAN'],
119 outputStats[
'DETECTOR_SIGMA'],
120 outputStats[
'DETECTOR_N'])
123 for ampIdx, amp
in enumerate(inputExp.getDetector()):
124 ampName = amp.getName()
125 ampExp = inputExp.Factory(inputExp, amp.getBBox())
126 stats = afwMath.makeStatistics(ampExp.getMaskedImage(),
127 afwMath.MEANCLIP | afwMath.STDEVCLIP | afwMath.NPOINT,
129 outputStats[f
'AMP_NAME_{ampIdx}'] = ampName
130 outputStats[f
'AMP_MEDIAN_{ampIdx}'] = stats.getValue(afwMath.MEANCLIP)
131 outputStats[f
'AMP_SIGMA_{ampIdx}'] = stats.getValue(afwMath.STDEVCLIP)
132 outputStats[f
'AMP_N_{ampIdx}'] = stats.getValue(afwMath.NPOINT)
134 return pipeBase.Struct(
135 outputStats=outputStats
140 dimensions=(
"instrument",
"physical_filter")):
142 name=
"cpFlatProc_metadata",
143 doc=
"Input metadata for each visit/detector in input set.",
144 storageClass=
"PropertyList",
145 dimensions=(
"instrument",
"physical_filter",
"detector",
"exposure"),
148 camera = cT.PrerequisiteInput(
150 doc=
"Input camera to use for gain lookup.",
151 storageClass=
"Camera",
152 dimensions=(
"instrument",),
153 lookupFunction=lookupStaticCalibration,
157 outputScales = cT.Output(
158 name=
"cpFlatNormScales",
159 doc=
"Output combined proposed calibration.",
160 storageClass=
"StructuredDataDict",
161 dimensions=(
"instrument",
"physical_filter"),
166 pipelineConnections=CpFlatNormalizationConnections):
167 level = pexConfig.ChoiceField(
169 doc=
"Which level to apply normalizations.",
172 'DETECTOR':
"Correct using full detector statistics.",
173 'AMP':
"Correct using individual amplifiers.",
176 scaleMaxIter = pexConfig.Field(
178 doc=
"Max number of iterations to use in scale solver.",
184 pipeBase.CmdLineTask):
185 """Rescale merged flat frames to remove unequal screen illumination.
187 ConfigClass = CpFlatNormalizationTaskConfig
188 _DefaultName =
"cpFlatNorm"
191 inputs = butlerQC.get(inputRefs)
195 dimensions = [exp.dataId.byName()
for exp
in inputRefs.inputMDs]
196 inputs[
'inputDims'] = dimensions
198 outputs = self.
runrun(**inputs)
199 butlerQC.put(outputs, outputRefs)
201 def run(self, inputMDs, inputDims, camera):
202 """Normalize FLAT exposures to a consistent level.
206 inputMDs : `list` [`lsst.daf.base.PropertyList`]
207 Amplifier-level metadata used to construct scales.
208 inputDims : `list` [`dict`]
209 List of dictionaries of input data dimensions/values.
210 Each list entry should contain:
213 exposure id value (`int`)
215 detector id value (`int`)
219 outputScales : `dict` [`dict` [`dict` [`float`]]]
220 Dictionary of scales, indexed by detector (`int`),
221 amplifier (`int`), and exposure (`int`).
226 Raised if the input dimensions do not contain detector and
227 exposure, or if the metadata does not contain the expected
230 expSet = sorted(set([d[
'exposure']
for d
in inputDims]))
231 detSet = sorted(set([d[
'detector']
for d
in inputDims]))
233 expMap = {exposureId: idx
for idx, exposureId
in enumerate(expSet)}
234 detMap = {detectorId: idx
for idx, detectorId
in enumerate(detSet)}
238 if self.config.level ==
'DETECTOR':
239 bgMatrix = np.zeros((nDet, nExp))
240 bgCounts = np.ones((nDet, nExp))
241 elif self.config.level ==
'AMP':
242 nAmp = len(camera[detSet[0]])
243 bgMatrix = np.zeros((nDet * nAmp, nExp))
244 bgCounts = np.ones((nDet * nAmp, nExp))
246 for inMetadata, inDimensions
in zip(inputMDs, inputDims):
248 exposureId = inDimensions[
'exposure']
249 detectorId = inDimensions[
'detector']
250 except Exception
as e:
251 raise KeyError(
"Cannot find expected dimensions in %s" % (inDimensions, ))
from e
253 if self.config.level ==
'DETECTOR':
254 detIdx = detMap[detectorId]
255 expIdx = expMap[exposureId]
257 value = inMetadata.get(
'DETECTOR_MEDIAN')
258 count = inMetadata.get(
'DETECTOR_N')
259 except Exception
as e:
260 raise KeyError(
"Cannot read expected metadata string.")
from e
262 if np.isfinite(value):
263 bgMatrix[detIdx][expIdx] = value
264 bgCounts[detIdx][expIdx] = count
266 bgMatrix[detIdx][expIdx] = np.nan
267 bgCounts[detIdx][expIdx] = 1
269 elif self.config.level ==
'AMP':
270 detector = camera[detectorId]
273 detIdx = detMap[detectorId] * nAmp
274 expIdx = expMap[exposureId]
276 for ampIdx, amp
in enumerate(detector):
278 value = inMetadata.get(f
'AMP_MEDIAN_{ampIdx}')
279 count = inMetadata.get(f
'AMP_N_{ampIdx}')
280 except Exception
as e:
281 raise KeyError(
"cannot read expected metadata string.")
from e
283 detAmpIdx = detIdx + ampIdx
284 if np.isfinite(value):
285 bgMatrix[detAmpIdx][expIdx] = value
286 bgCounts[detAmpIdx][expIdx] = count
288 bgMatrix[detAmpIdx][expIdx] = np.nan
289 bgMatrix[detAmpIdx][expIdx] = 1
291 scaleResult = self.
measureScalesmeasureScales(bgMatrix, bgCounts, iterations=self.config.scaleMaxIter)
292 expScales = scaleResult.expScales
293 detScales = scaleResult.detScales
295 outputScales = defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(float))))
299 if self.config.level ==
'DETECTOR':
300 for detIdx, det
in enumerate(detSet):
301 for amp
in camera[det]:
302 for expIdx, exp
in enumerate(expSet):
303 outputScales[
'expScale'][det][amp.getName()][exp] = expScales[expIdx].tolist()
304 outputScales[
'detScale'][det] = detScales[detIdx].tolist()
305 elif self.config.level ==
'AMP':
306 for detIdx, det
in enumerate(detSet):
307 for ampIdx, amp
in enumerate(camera[det]):
308 for expIdx, exp
in enumerate(expSet):
309 outputScales[
'expScale'][det][amp.getName()][exp] = expScales[expIdx].tolist()
310 detAmpIdx = detIdx + ampIdx
311 outputScales[
'detScale'][det][amp.getName()] = detScales[detAmpIdx].tolist()
313 return pipeBase.Struct(
318 """Convert backgrounds to exposure and detector components.
322 bgMatrix : `np.ndarray`, (nDetectors, nExposures)
323 Input backgrounds indexed by exposure (axis=0) and
325 bgCounts : `np.ndarray`, (nDetectors, nExposures), optional
326 Input pixel counts used to in measuring bgMatrix, indexed
328 iterations : `int`, optional
329 Number of iterations to use in decomposition.
333 scaleResult : `lsst.pipe.base.Struct`
334 Result struct containing fields:
337 Output E vector of exposure level scalings
338 (`np.array`, (nExposures)).
340 Output G vector of detector level scalings
341 (`np.array`, (nExposures)).
343 Expected model bgMatrix values, calculated from E and G
344 (`np.ndarray`, (nDetectors, nExposures)).
349 The set of background measurements B[exposure, detector] of
350 flat frame data should be defined by a "Cartesian" product of
351 two vectors, E[exposure] and G[detector]. The E vector
352 represents the total flux incident on the focal plane. In a
353 perfect camera, this is simply the sum along the columns of B
356 However, this simple model ignores differences in detector
357 gains, the vignetting of the detectors, and the illumination
358 pattern of the source lamp. The G vector describes these
359 detector dependent differences, which should be identical over
360 different exposures. For a perfect lamp of unit total
361 intensity, this is simply the sum along the rows of B
362 (np.sum(B, axis=1)). This algorithm divides G by the total
363 flux level, to provide the relative (not absolute) scales
366 The algorithm here, from pipe_drivers/constructCalibs.py and
367 from there from Eugene Magnier/PanSTARRS [1]_, attempts to
368 iteratively solve this decomposition from initial "perfect" E
369 and G vectors. The operation is performed in log space to
370 reduce the multiply and divides to linear additions and
375 .. [1] https://svn.pan-starrs.ifa.hawaii.edu/trac/ipp/browser/trunk/psModules/src/detrend/pmFlatNormalize.c # noqa: E501
378 numExps = bgMatrix.shape[1]
379 numChips = bgMatrix.shape[0]
381 bgCounts = np.ones_like(bgMatrix)
383 logMeas = np.log(bgMatrix)
384 logMeas = np.ma.masked_array(logMeas, ~np.isfinite(logMeas))
385 logG = np.zeros(numChips)
386 logE = np.array([np.average(logMeas[:, iexp] - logG,
387 weights=bgCounts[:, iexp])
for iexp
in range(numExps)])
389 for iter
in range(iterations):
390 logG = np.array([np.average(logMeas[ichip, :] - logE,
391 weights=bgCounts[ichip, :])
for ichip
in range(numChips)])
395 logG[bad] = logG[~bad].mean()
397 logE = np.array([np.average(logMeas[:, iexp] - logG,
398 weights=bgCounts[:, iexp])
for iexp
in range(numExps)])
399 fluxLevel = np.average(np.exp(logG), weights=np.sum(bgCounts, axis=1))
401 logG -= np.log(fluxLevel)
402 self.log.debug(f
"ITER {iter}: Flux: {fluxLevel}")
403 self.log.debug(f
"Exps: {np.exp(logE)}")
404 self.log.debug(f
"{np.mean(logG)}")
406 logE = np.array([np.average(logMeas[:, iexp] - logG,
407 weights=bgCounts[:, iexp])
for iexp
in range(numExps)])
409 bgModel = np.exp(logE[np.newaxis, :] - logG[:, np.newaxis])
410 return pipeBase.Struct(
411 expScales=np.exp(logE),
412 detScales=np.exp(logG),
def measureScales(self, bgMatrix, bgCounts=None, iterations=10)
def run(self, inputMDs, inputDims, camera)
def runQuantum(self, butlerQC, inputRefs, outputRefs)
def VignetteExposure(exposure, polygon=None, doUpdateMask=True, maskPlane='BAD', doSetValue=False, vignetteValue=0.0, log=None)