22 from collections
import defaultdict
31 __all__ = [
"CpFlatMeasureTask",
"CpFlatMeasureTaskConfig",
32 "CpFlatNormalizationTask",
"CpFlatNormalizationTaskConfig"]
36 dimensions=(
"instrument",
"exposure",
"detector")):
39 doc=
"Input exposure to measure statistics from.",
40 storageClass=
"Exposure",
41 dimensions=(
"instrument",
"exposure",
"detector"),
43 outputStats = cT.Output(
45 doc=
"Output statistics to write.",
46 storageClass=
"PropertyList",
47 dimensions=(
"instrument",
"exposure",
"detector"),
52 pipelineConnections=CpFlatMeasureConnections):
53 maskNameList = pexConfig.ListField(
55 doc=
"Mask list to exclude from statistics calculations.",
56 default=[
'DETECTED',
'BAD',
'NO_DATA'],
58 doVignette = pexConfig.Field(
60 doc=
"Mask vignetted regions?",
63 numSigmaClip = pexConfig.Field(
65 doc=
"Rejection threshold (sigma) for statistics clipping.",
68 clipMaxIter = pexConfig.Field(
70 doc=
"Max number of clipping iterations to apply.",
76 pipeBase.CmdLineTask):
77 """Apply extra masking and measure image statistics.
79 ConfigClass = CpFlatMeasureTaskConfig
80 _DefaultName =
"cpFlatMeasure"
82 def run(self, inputExp):
83 """Mask ISR processed FLAT exposures to ensure consistent statistics.
87 inputExp : `lsst.afw.image.Exposure`
88 Post-ISR processed exposure to measure.
92 outputStats : `lsst.daf.base.PropertyList`
93 List containing the statistics.
95 if self.config.doVignette:
97 doSetValue=
False, log=self.log)
98 mask = inputExp.getMask()
99 maskVal = mask.getPlaneBitMask(self.config.maskNameList)
100 statsControl = afwMath.StatisticsControl(self.config.numSigmaClip,
101 self.config.clipMaxIter,
103 statsControl.setAndMask(maskVal)
105 outputStats = dafBase.PropertyList()
108 stats = afwMath.makeStatistics(inputExp.getMaskedImage(),
109 afwMath.MEANCLIP | afwMath.STDEVCLIP | afwMath.NPOINT,
111 outputStats[
'DETECTOR_MEDIAN'] = stats.getValue(afwMath.MEANCLIP)
112 outputStats[
'DETECTOR_SIGMA'] = stats.getValue(afwMath.STDEVCLIP)
113 outputStats[
'DETECTOR_N'] = stats.getValue(afwMath.NPOINT)
114 self.log.info(
"Stats: median=%f sigma=%f n=%d",
115 outputStats[
'DETECTOR_MEDIAN'],
116 outputStats[
'DETECTOR_SIGMA'],
117 outputStats[
'DETECTOR_N'])
120 for ampIdx, amp
in enumerate(inputExp.getDetector()):
121 ampName = amp.getName()
122 ampExp = inputExp.Factory(inputExp, amp.getBBox())
123 stats = afwMath.makeStatistics(ampExp.getMaskedImage(),
124 afwMath.MEANCLIP | afwMath.STDEVCLIP | afwMath.NPOINT,
126 outputStats[f
'AMP_NAME_{ampIdx}'] = ampName
127 outputStats[f
'AMP_MEDIAN_{ampIdx}'] = stats.getValue(afwMath.MEANCLIP)
128 outputStats[f
'AMP_SIGMA_{ampIdx}'] = stats.getValue(afwMath.STDEVCLIP)
129 outputStats[f
'AMP_N_{ampIdx}'] = stats.getValue(afwMath.NPOINT)
131 return pipeBase.Struct(
132 outputStats=outputStats
137 dimensions=(
"instrument",
"physical_filter")):
139 name=
"cpFlatProc_metadata",
140 doc=
"Input metadata for each visit/detector in input set.",
141 storageClass=
"PropertyList",
142 dimensions=(
"instrument",
"physical_filter",
"detector",
"exposure"),
145 camera = cT.PrerequisiteInput(
147 doc=
"Input camera to use for gain lookup.",
148 storageClass=
"Camera",
149 dimensions=(
"instrument",
"calibration_label"),
152 outputScales = cT.Output(
153 name=
"cpFlatNormScales",
154 doc=
"Output combined proposed calibration.",
155 storageClass=
"StructuredDataDict",
156 dimensions=(
"instrument",
"physical_filter"),
161 pipelineConnections=CpFlatNormalizationConnections):
162 level = pexConfig.ChoiceField(
164 doc=
"Which level to apply normalizations.",
167 'DETECTOR':
"Correct using full detector statistics.",
168 'AMP':
"Correct using individual amplifiers.",
171 scaleMaxIter = pexConfig.Field(
173 doc=
"Max number of iterations to use in scale solver.",
179 pipeBase.CmdLineTask):
180 """Rescale merged flat frames to remove unequal screen illumination.
182 ConfigClass = CpFlatNormalizationTaskConfig
183 _DefaultName =
"cpFlatNorm"
186 inputs = butlerQC.get(inputRefs)
190 dimensions = [exp.dataId.byName()
for exp
in inputRefs.inputMDs]
191 inputs[
'inputDims'] = dimensions
193 outputs = self.
run(**inputs)
194 butlerQC.put(outputs, outputRefs)
196 def run(self, inputMDs, inputDims, camera):
197 """Normalize FLAT exposures to a consistent level.
201 inputMDs : `list` [`lsst.daf.base.PropertyList`]
202 Amplifier-level metadata used to construct scales.
203 inputDims : `list` [`dict`]
204 List of dictionaries of input data dimensions/values.
205 Each list entry should contain:
208 exposure id value (`int`)
210 detector id value (`int`)
214 outputScales : `dict` [`dict` [`dict` [`float`]]]
215 Dictionary of scales, indexed by detector (`int`),
216 amplifier (`int`), and exposure (`int`).
221 Raised if the input dimensions do not contain detector and
222 exposure, or if the metadata does not contain the expected
225 expSet = sorted(set([d[
'exposure']
for d
in inputDims]))
226 detSet = sorted(set([d[
'detector']
for d
in inputDims]))
228 expMap = {exposureId: idx
for idx, exposureId
in enumerate(expSet)}
229 detMap = {detectorId: idx
for idx, detectorId
in enumerate(detSet)}
233 if self.config.level ==
'DETECTOR':
234 bgMatrix = np.zeros((nDet, nExp))
235 bgCounts = np.ones((nDet, nExp))
236 elif self.config.level ==
'AMP':
237 nAmp = len(camera[0])
238 bgMatrix = np.zeros((nDet * nAmp, nExp))
239 bgCounts = np.ones((nDet * nAmp, nExp))
241 for inMetadata, inDimensions
in zip(inputMDs, inputDims):
243 exposureId = inDimensions[
'exposure']
244 detectorId = inDimensions[
'detector']
245 except Exception
as e:
246 raise KeyError(
"Cannot find expected dimensions in %s" % (inDimensions, ))
from e
248 if self.config.level ==
'DETECTOR':
249 detId = detMap[detectorId]
250 expId = expMap[exposureId]
252 value = inMetadata.get(
'DETECTOR_MEDIAN')
253 count = inMetadata.get(
'DETECTOR_N')
254 except Exception
as e:
255 raise KeyError(
"Cannot read expected metadata string.")
from e
257 if np.isfinite(value):
258 bgMatrix[detId][expId] = value
259 bgCounts[detId][expId] = count
261 bgMatrix[detId][expId] = np.nan
262 bgCounts[detId][expId] = 1
264 elif self.config.level ==
'AMP':
265 detector = camera[detectorId]
268 detId = detMap[detectorId] * nAmp
269 expId = expMap[exposureId]
271 for ampIdx, amp
in enumerate(detector):
273 value = inMetadata.get(f
'AMP_MEDIAN_{ampIdx}')
274 count = inMetadata.get(f
'AMP_N_{ampIdx}')
275 except Exception
as e:
276 raise KeyError(
"cannot read expected metadata string.")
from e
278 detAmpId = detId + ampIdx
279 if np.isfinite(value):
280 bgMatrix[detAmpId][expId] = value
281 bgCounts[detAmpId][expId] = count
283 bgMatrix[detAmpId][expId] = np.nan
284 bgMatrix[detAmpId][expId] = 1
286 scaleResult = self.
measureScales(bgMatrix, bgCounts, iterations=self.config.scaleMaxIter)
287 expScales = scaleResult.expScales
288 detScales = scaleResult.detScales
290 outputScales = defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(float))))
292 if self.config.level ==
'DETECTOR':
293 for detId, det
in enumerate(detSet):
294 for amp
in camera[detId]:
295 for expId, exp
in enumerate(expSet):
296 outputScales[
'expScale'][det][amp.getName()][exp] = expScales[expId].tolist()
297 outputScales[
'detScale'][det] = detScales[detId].tolist()
298 elif self.config.level ==
'AMP':
299 for detId, det
in enumerate(detSet):
300 for ampIdx, amp
in enumerate(camera[detId]):
301 for expId, exp
in enumerate(expSet):
302 outputScales[
'expScale'][det][amp.getName()][exp] = expScales[expId].tolist()
303 detAmpId = detId + ampIdx
304 outputScales[
'detScale'][det][amp.getName()] = detScales[detAmpId].tolist()
306 return pipeBase.Struct(
307 outputScales=outputScales,
311 """Convert backgrounds to exposure and detector components.
315 bgMatrix : `np.ndarray`, (nDetectors, nExposures)
316 Input backgrounds indexed by exposure (axis=0) and
318 bgCounts : `np.ndarray`, (nDetectors, nExposures), optional
319 Input pixel counts used to in measuring bgMatrix, indexed
321 iterations : `int`, optional
322 Number of iterations to use in decomposition.
326 scaleResult : `lsst.pipe.base.Struct`
327 Result struct containing fields:
330 Output E vector of exposure level scalings
331 (`np.array`, (nExposures)).
333 Output G vector of detector level scalings
334 (`np.array`, (nExposures)).
336 Expected model bgMatrix values, calculated from E and G
337 (`np.ndarray`, (nDetectors, nExposures)).
342 The set of background measurements B[exposure, detector] of
343 flat frame data should be defined by a "Cartesian" product of
344 two vectors, E[exposure] and G[detector]. The E vector
345 represents the total flux incident on the focal plane. In a
346 perfect camera, this is simply the sum along the columns of B
349 However, this simple model ignores differences in detector
350 gains, the vignetting of the detectors, and the illumination
351 pattern of the source lamp. The G vector describes these
352 detector dependent differences, which should be identical over
353 different exposures. For a perfect lamp of unit total
354 intensity, this is simply the sum along the rows of B
355 (np.sum(B, axis=1)). This algorithm divides G by the total
356 flux level, to provide the relative (not absolute) scales
359 The algorithm here, from pipe_drivers/constructCalibs.py and
360 from there from Eugene Magnier/PanSTARRS [1]_, attempts to
361 iteratively solve this decomposition from initial "perfect" E
362 and G vectors. The operation is performed in log space to
363 reduce the multiply and divides to linear additions and
368 .. [1] https://svn.pan-starrs.ifa.hawaii.edu/trac/ipp/browser/trunk/psModules/src/detrend/pmFlatNormalize.c # noqa: E501
371 numExps = bgMatrix.shape[1]
372 numChips = bgMatrix.shape[0]
374 bgCounts = np.ones_like(bgMatrix)
376 logMeas = np.log(bgMatrix)
377 logMeas = np.ma.masked_array(logMeas, ~np.isfinite(logMeas))
378 logG = np.zeros(numChips)
379 logE = np.array([np.average(logMeas[:, iexp] - logG,
380 weights=bgCounts[:, iexp])
for iexp
in range(numExps)])
382 for iter
in range(iterations):
383 logG = np.array([np.average(logMeas[ichip, :] - logE,
384 weights=bgCounts[ichip, :])
for ichip
in range(numChips)])
388 logG[bad] = logG[~bad].mean()
390 logE = np.array([np.average(logMeas[:, iexp] - logG,
391 weights=bgCounts[:, iexp])
for iexp
in range(numExps)])
392 fluxLevel = np.average(np.exp(logG), weights=np.sum(bgCounts, axis=1))
394 logG -= np.log(fluxLevel)
395 self.log.debug(f
"ITER {iter}: Flux: {fluxLevel}")
396 self.log.debug(f
"Exps: {np.exp(logE)}")
397 self.log.debug(f
"{np.mean(logG)}")
399 logE = np.array([np.average(logMeas[:, iexp] - logG,
400 weights=bgCounts[:, iexp])
for iexp
in range(numExps)])
402 bgModel = np.exp(logE[np.newaxis, :] - logG[:, np.newaxis])
403 return pipeBase.Struct(
404 expScales=np.exp(logE),
405 detScales=np.exp(logG),