22 from collections
import defaultdict
32 from ._lookupStaticCalibration
import lookupStaticCalibration
34 __all__ = [
"CpFlatMeasureTask",
"CpFlatMeasureTaskConfig",
35 "CpFlatNormalizationTask",
"CpFlatNormalizationTaskConfig"]
39 dimensions=(
"instrument",
"exposure",
"detector")):
42 doc=
"Input exposure to measure statistics from.",
43 storageClass=
"Exposure",
44 dimensions=(
"instrument",
"exposure",
"detector"),
46 outputStats = cT.Output(
48 doc=
"Output statistics to write.",
49 storageClass=
"PropertyList",
50 dimensions=(
"instrument",
"exposure",
"detector"),
55 pipelineConnections=CpFlatMeasureConnections):
56 maskNameList = pexConfig.ListField(
58 doc=
"Mask list to exclude from statistics calculations.",
59 default=[
'DETECTED',
'BAD',
'NO_DATA'],
61 doVignette = pexConfig.Field(
63 doc=
"Mask vignetted regions?",
66 numSigmaClip = pexConfig.Field(
68 doc=
"Rejection threshold (sigma) for statistics clipping.",
71 clipMaxIter = pexConfig.Field(
73 doc=
"Max number of clipping iterations to apply.",
79 pipeBase.CmdLineTask):
80 """Apply extra masking and measure image statistics.
82 ConfigClass = CpFlatMeasureTaskConfig
83 _DefaultName =
"cpFlatMeasure"
85 def run(self, inputExp):
86 """Mask ISR processed FLAT exposures to ensure consistent statistics.
90 inputExp : `lsst.afw.image.Exposure`
91 Post-ISR processed exposure to measure.
95 outputStats : `lsst.daf.base.PropertyList`
96 List containing the statistics.
98 if self.config.doVignette:
100 doSetValue=
False, log=self.log)
101 mask = inputExp.getMask()
102 maskVal = mask.getPlaneBitMask(self.config.maskNameList)
103 statsControl = afwMath.StatisticsControl(self.config.numSigmaClip,
104 self.config.clipMaxIter,
106 statsControl.setAndMask(maskVal)
108 outputStats = dafBase.PropertyList()
111 stats = afwMath.makeStatistics(inputExp.getMaskedImage(),
112 afwMath.MEANCLIP | afwMath.STDEVCLIP | afwMath.NPOINT,
114 outputStats[
'DETECTOR_MEDIAN'] = stats.getValue(afwMath.MEANCLIP)
115 outputStats[
'DETECTOR_SIGMA'] = stats.getValue(afwMath.STDEVCLIP)
116 outputStats[
'DETECTOR_N'] = stats.getValue(afwMath.NPOINT)
117 self.log.info(
"Stats: median=%f sigma=%f n=%d",
118 outputStats[
'DETECTOR_MEDIAN'],
119 outputStats[
'DETECTOR_SIGMA'],
120 outputStats[
'DETECTOR_N'])
123 for ampIdx, amp
in enumerate(inputExp.getDetector()):
124 ampName = amp.getName()
125 ampExp = inputExp.Factory(inputExp, amp.getBBox())
126 stats = afwMath.makeStatistics(ampExp.getMaskedImage(),
127 afwMath.MEANCLIP | afwMath.STDEVCLIP | afwMath.NPOINT,
129 outputStats[f
'AMP_NAME_{ampIdx}'] = ampName
130 outputStats[f
'AMP_MEDIAN_{ampIdx}'] = stats.getValue(afwMath.MEANCLIP)
131 outputStats[f
'AMP_SIGMA_{ampIdx}'] = stats.getValue(afwMath.STDEVCLIP)
132 outputStats[f
'AMP_N_{ampIdx}'] = stats.getValue(afwMath.NPOINT)
134 return pipeBase.Struct(
135 outputStats=outputStats
140 dimensions=(
"instrument",
"physical_filter")):
142 name=
"cpFlatProc_metadata",
143 doc=
"Input metadata for each visit/detector in input set.",
144 storageClass=
"PropertyList",
145 dimensions=(
"instrument",
"physical_filter",
"detector",
"exposure"),
148 camera = cT.PrerequisiteInput(
150 doc=
"Input camera to use for gain lookup.",
151 storageClass=
"Camera",
152 dimensions=(
"instrument",),
153 lookupFunction=lookupStaticCalibration,
157 outputScales = cT.Output(
158 name=
"cpFlatNormScales",
159 doc=
"Output combined proposed calibration.",
160 storageClass=
"StructuredDataDict",
161 dimensions=(
"instrument",
"physical_filter"),
166 pipelineConnections=CpFlatNormalizationConnections):
167 level = pexConfig.ChoiceField(
169 doc=
"Which level to apply normalizations.",
172 'DETECTOR':
"Correct using full detector statistics.",
173 'AMP':
"Correct using individual amplifiers.",
176 scaleMaxIter = pexConfig.Field(
178 doc=
"Max number of iterations to use in scale solver.",
184 pipeBase.CmdLineTask):
185 """Rescale merged flat frames to remove unequal screen illumination.
187 ConfigClass = CpFlatNormalizationTaskConfig
188 _DefaultName =
"cpFlatNorm"
191 inputs = butlerQC.get(inputRefs)
195 dimensions = [exp.dataId.byName()
for exp
in inputRefs.inputMDs]
196 inputs[
'inputDims'] = dimensions
198 outputs = self.
run(**inputs)
199 butlerQC.put(outputs, outputRefs)
201 def run(self, inputMDs, inputDims, camera):
202 """Normalize FLAT exposures to a consistent level.
206 inputMDs : `list` [`lsst.daf.base.PropertyList`]
207 Amplifier-level metadata used to construct scales.
208 inputDims : `list` [`dict`]
209 List of dictionaries of input data dimensions/values.
210 Each list entry should contain:
213 exposure id value (`int`)
215 detector id value (`int`)
219 outputScales : `dict` [`dict` [`dict` [`float`]]]
220 Dictionary of scales, indexed by detector (`int`),
221 amplifier (`int`), and exposure (`int`).
226 Raised if the input dimensions do not contain detector and
227 exposure, or if the metadata does not contain the expected
230 expSet = sorted(set([d[
'exposure']
for d
in inputDims]))
231 detSet = sorted(set([d[
'detector']
for d
in inputDims]))
233 expMap = {exposureId: idx
for idx, exposureId
in enumerate(expSet)}
234 detMap = {detectorId: idx
for idx, detectorId
in enumerate(detSet)}
238 if self.config.level ==
'DETECTOR':
239 bgMatrix = np.zeros((nDet, nExp))
240 bgCounts = np.ones((nDet, nExp))
241 elif self.config.level ==
'AMP':
242 nAmp = len(camera[0])
243 bgMatrix = np.zeros((nDet * nAmp, nExp))
244 bgCounts = np.ones((nDet * nAmp, nExp))
246 for inMetadata, inDimensions
in zip(inputMDs, inputDims):
248 exposureId = inDimensions[
'exposure']
249 detectorId = inDimensions[
'detector']
250 except Exception
as e:
251 raise KeyError(
"Cannot find expected dimensions in %s" % (inDimensions, ))
from e
253 if self.config.level ==
'DETECTOR':
254 detId = detMap[detectorId]
255 expId = expMap[exposureId]
257 value = inMetadata.get(
'DETECTOR_MEDIAN')
258 count = inMetadata.get(
'DETECTOR_N')
259 except Exception
as e:
260 raise KeyError(
"Cannot read expected metadata string.")
from e
262 if np.isfinite(value):
263 bgMatrix[detId][expId] = value
264 bgCounts[detId][expId] = count
266 bgMatrix[detId][expId] = np.nan
267 bgCounts[detId][expId] = 1
269 elif self.config.level ==
'AMP':
270 detector = camera[detectorId]
273 detId = detMap[detectorId] * nAmp
274 expId = expMap[exposureId]
276 for ampIdx, amp
in enumerate(detector):
278 value = inMetadata.get(f
'AMP_MEDIAN_{ampIdx}')
279 count = inMetadata.get(f
'AMP_N_{ampIdx}')
280 except Exception
as e:
281 raise KeyError(
"cannot read expected metadata string.")
from e
283 detAmpId = detId + ampIdx
284 if np.isfinite(value):
285 bgMatrix[detAmpId][expId] = value
286 bgCounts[detAmpId][expId] = count
288 bgMatrix[detAmpId][expId] = np.nan
289 bgMatrix[detAmpId][expId] = 1
291 scaleResult = self.
measureScales(bgMatrix, bgCounts, iterations=self.config.scaleMaxIter)
292 expScales = scaleResult.expScales
293 detScales = scaleResult.detScales
295 outputScales = defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(float))))
297 if self.config.level ==
'DETECTOR':
298 for detId, det
in enumerate(detSet):
299 for amp
in camera[detId]:
300 for expId, exp
in enumerate(expSet):
301 outputScales[
'expScale'][det][amp.getName()][exp] = expScales[expId].tolist()
302 outputScales[
'detScale'][det] = detScales[detId].tolist()
303 elif self.config.level ==
'AMP':
304 for detId, det
in enumerate(detSet):
305 for ampIdx, amp
in enumerate(camera[detId]):
306 for expId, exp
in enumerate(expSet):
307 outputScales[
'expScale'][det][amp.getName()][exp] = expScales[expId].tolist()
308 detAmpId = detId + ampIdx
309 outputScales[
'detScale'][det][amp.getName()] = detScales[detAmpId].tolist()
311 return pipeBase.Struct(
316 """Convert backgrounds to exposure and detector components.
320 bgMatrix : `np.ndarray`, (nDetectors, nExposures)
321 Input backgrounds indexed by exposure (axis=0) and
323 bgCounts : `np.ndarray`, (nDetectors, nExposures), optional
324 Input pixel counts used to in measuring bgMatrix, indexed
326 iterations : `int`, optional
327 Number of iterations to use in decomposition.
331 scaleResult : `lsst.pipe.base.Struct`
332 Result struct containing fields:
335 Output E vector of exposure level scalings
336 (`np.array`, (nExposures)).
338 Output G vector of detector level scalings
339 (`np.array`, (nExposures)).
341 Expected model bgMatrix values, calculated from E and G
342 (`np.ndarray`, (nDetectors, nExposures)).
347 The set of background measurements B[exposure, detector] of
348 flat frame data should be defined by a "Cartesian" product of
349 two vectors, E[exposure] and G[detector]. The E vector
350 represents the total flux incident on the focal plane. In a
351 perfect camera, this is simply the sum along the columns of B
354 However, this simple model ignores differences in detector
355 gains, the vignetting of the detectors, and the illumination
356 pattern of the source lamp. The G vector describes these
357 detector dependent differences, which should be identical over
358 different exposures. For a perfect lamp of unit total
359 intensity, this is simply the sum along the rows of B
360 (np.sum(B, axis=1)). This algorithm divides G by the total
361 flux level, to provide the relative (not absolute) scales
364 The algorithm here, from pipe_drivers/constructCalibs.py and
365 from there from Eugene Magnier/PanSTARRS [1]_, attempts to
366 iteratively solve this decomposition from initial "perfect" E
367 and G vectors. The operation is performed in log space to
368 reduce the multiply and divides to linear additions and
373 .. [1] https://svn.pan-starrs.ifa.hawaii.edu/trac/ipp/browser/trunk/psModules/src/detrend/pmFlatNormalize.c # noqa: E501
376 numExps = bgMatrix.shape[1]
377 numChips = bgMatrix.shape[0]
379 bgCounts = np.ones_like(bgMatrix)
381 logMeas = np.log(bgMatrix)
382 logMeas = np.ma.masked_array(logMeas, ~np.isfinite(logMeas))
383 logG = np.zeros(numChips)
384 logE = np.array([np.average(logMeas[:, iexp] - logG,
385 weights=bgCounts[:, iexp])
for iexp
in range(numExps)])
387 for iter
in range(iterations):
388 logG = np.array([np.average(logMeas[ichip, :] - logE,
389 weights=bgCounts[ichip, :])
for ichip
in range(numChips)])
393 logG[bad] = logG[~bad].mean()
395 logE = np.array([np.average(logMeas[:, iexp] - logG,
396 weights=bgCounts[:, iexp])
for iexp
in range(numExps)])
397 fluxLevel = np.average(np.exp(logG), weights=np.sum(bgCounts, axis=1))
399 logG -= np.log(fluxLevel)
400 self.log.debug(f
"ITER {iter}: Flux: {fluxLevel}")
401 self.log.debug(f
"Exps: {np.exp(logE)}")
402 self.log.debug(f
"{np.mean(logG)}")
404 logE = np.array([np.average(logMeas[:, iexp] - logG,
405 weights=bgCounts[:, iexp])
for iexp
in range(numExps)])
407 bgModel = np.exp(logE[np.newaxis, :] - logG[:, np.newaxis])
408 return pipeBase.Struct(
409 expScales=np.exp(logE),
410 detScales=np.exp(logG),