Coverage for python/lsst/cp/pipe/utils.py: 11%
Shortcuts on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
Shortcuts on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
23__all__ = ['PairedVisitListTaskRunner', 'SingleVisitListTaskRunner',
24 'NonexistentDatasetTaskDataIdContainer', 'parseCmdlineNumberString',
25 'countMaskedPixels', 'checkExpLengthEqual', 'ddict2dict']
27import re
28import numpy as np
29from scipy.optimize import leastsq
30import numpy.polynomial.polynomial as poly
31from scipy.stats import norm
32import logging
34import lsst.pipe.base as pipeBase
35import lsst.ip.isr as ipIsr
36from lsst.ip.isr import isrMock
37import lsst.afw.image
39import galsim
42def sigmaClipCorrection(nSigClip):
43 """Correct measured sigma to account for clipping.
45 If we clip our input data and then measure sigma, then the
46 measured sigma is smaller than the true value because real
47 points beyond the clip threshold have been removed. This is a
48 small (1.5% at nSigClip=3) effect when nSigClip >~ 3, but the
49 default parameters for measure crosstalk use nSigClip=2.0.
50 This causes the measured sigma to be about 15% smaller than
51 real. This formula corrects the issue, for the symmetric case
52 (upper clip threshold equal to lower clip threshold).
54 Parameters
55 ----------
56 nSigClip : `float`
57 Number of sigma the measurement was clipped by.
59 Returns
60 -------
61 scaleFactor : `float`
62 Scale factor to increase the measured sigma by.
63 """
64 varFactor = 1.0 - (2 * nSigClip * norm.pdf(nSigClip)) / (norm.cdf(nSigClip) - norm.cdf(-nSigClip))
65 return 1.0 / np.sqrt(varFactor)
68def calculateWeightedReducedChi2(measured, model, weightsMeasured, nData, nParsModel):
69 """Calculate weighted reduced chi2.
71 Parameters
72 ----------
74 measured : `list`
75 List with measured data.
77 model : `list`
78 List with modeled data.
80 weightsMeasured : `list`
81 List with weights for the measured data.
83 nData : `int`
84 Number of data points.
86 nParsModel : `int`
87 Number of parameters in the model.
89 Returns
90 -------
92 redWeightedChi2 : `float`
93 Reduced weighted chi2.
94 """
95 wRes = (measured - model)*weightsMeasured
96 return ((wRes*wRes).sum())/(nData-nParsModel)
99def makeMockFlats(expTime, gain=1.0, readNoiseElectrons=5, fluxElectrons=1000,
100 randomSeedFlat1=1984, randomSeedFlat2=666, powerLawBfParams=[],
101 expId1=0, expId2=1):
102 """Create a pair or mock flats with isrMock.
104 Parameters
105 ----------
106 expTime : `float`
107 Exposure time of the flats.
109 gain : `float`, optional
110 Gain, in e/ADU.
112 readNoiseElectrons : `float`, optional
113 Read noise rms, in electrons.
115 fluxElectrons : `float`, optional
116 Flux of flats, in electrons per second.
118 randomSeedFlat1 : `int`, optional
119 Random seed for the normal distrubutions for the mean signal
120 and noise (flat1).
122 randomSeedFlat2 : `int`, optional
123 Random seed for the normal distrubutions for the mean signal
124 and noise (flat2).
126 powerLawBfParams : `list`, optional
127 Parameters for `galsim.cdmodel.PowerLawCD` to simulate the
128 brightter-fatter effect.
130 expId1 : `int`, optional
131 Exposure ID for first flat.
133 expId2 : `int`, optional
134 Exposure ID for second flat.
136 Returns
137 -------
139 flatExp1 : `lsst.afw.image.exposure.ExposureF`
140 First exposure of flat field pair.
142 flatExp2 : `lsst.afw.image.exposure.ExposureF`
143 Second exposure of flat field pair.
145 Notes
146 -----
147 The parameters of `galsim.cdmodel.PowerLawCD` are `n, r0, t0, rx,
148 tx, r, t, alpha`. For more information about their meaning, see
149 the Galsim documentation
150 https://galsim-developers.github.io/GalSim/_build/html/_modules/galsim/cdmodel.html # noqa: W505
151 and Gruen+15 (1501.02802).
153 Example: galsim.cdmodel.PowerLawCD(8, 1.1e-7, 1.1e-7, 1.0e-8,
154 1.0e-8, 1.0e-9, 1.0e-9, 2.0)
155 """
156 flatFlux = fluxElectrons # e/s
157 flatMean = flatFlux*expTime # e
158 readNoise = readNoiseElectrons # e
160 mockImageConfig = isrMock.IsrMock.ConfigClass()
162 mockImageConfig.flatDrop = 0.99999
163 mockImageConfig.isTrimmed = True
165 flatExp1 = isrMock.FlatMock(config=mockImageConfig).run()
166 flatExp2 = flatExp1.clone()
167 (shapeY, shapeX) = flatExp1.getDimensions()
168 flatWidth = np.sqrt(flatMean)
170 rng1 = np.random.RandomState(randomSeedFlat1)
171 flatData1 = rng1.normal(flatMean, flatWidth, (shapeX, shapeY)) + rng1.normal(0.0, readNoise,
172 (shapeX, shapeY))
173 rng2 = np.random.RandomState(randomSeedFlat2)
174 flatData2 = rng2.normal(flatMean, flatWidth, (shapeX, shapeY)) + rng2.normal(0.0, readNoise,
175 (shapeX, shapeY))
176 # Simulate BF with power law model in galsim
177 if len(powerLawBfParams):
178 if not len(powerLawBfParams) == 8:
179 raise RuntimeError("Wrong number of parameters for `galsim.cdmodel.PowerLawCD`. "
180 f"Expected 8; passed {len(powerLawBfParams)}.")
181 cd = galsim.cdmodel.PowerLawCD(*powerLawBfParams)
182 tempFlatData1 = galsim.Image(flatData1)
183 temp2FlatData1 = cd.applyForward(tempFlatData1)
185 tempFlatData2 = galsim.Image(flatData2)
186 temp2FlatData2 = cd.applyForward(tempFlatData2)
188 flatExp1.image.array[:] = temp2FlatData1.array/gain # ADU
189 flatExp2.image.array[:] = temp2FlatData2.array/gain # ADU
190 else:
191 flatExp1.image.array[:] = flatData1/gain # ADU
192 flatExp2.image.array[:] = flatData2/gain # ADU
194 visitInfoExp1 = lsst.afw.image.VisitInfo(exposureId=expId1, exposureTime=expTime)
195 visitInfoExp2 = lsst.afw.image.VisitInfo(exposureId=expId2, exposureTime=expTime)
197 flatExp1.info.id = expId1
198 flatExp1.getInfo().setVisitInfo(visitInfoExp1)
199 flatExp2.info.id = expId2
200 flatExp2.getInfo().setVisitInfo(visitInfoExp2)
202 return flatExp1, flatExp2
205def countMaskedPixels(maskedIm, maskPlane):
206 """Count the number of pixels in a given mask plane.
208 Parameters
209 ----------
210 maskedIm : `~lsst.afw.image.MaskedImage`
211 Masked image to examine.
212 maskPlane : `str`
213 Name of the mask plane to examine.
215 Returns
216 -------
217 nPix : `int`
218 Number of pixels in the requested mask plane.
219 """
220 maskBit = maskedIm.mask.getPlaneBitMask(maskPlane)
221 nPix = np.where(np.bitwise_and(maskedIm.mask.array, maskBit))[0].flatten().size
222 return nPix
225class PairedVisitListTaskRunner(pipeBase.TaskRunner):
226 """Subclass of TaskRunner for handling intrinsically paired visits.
228 This transforms the processed arguments generated by the ArgumentParser
229 into the arguments expected by tasks which take visit pairs for their
230 run() methods.
232 Such tasks' run() methods tend to take two arguments,
233 one of which is the dataRef (as usual), and the other is the list
234 of visit-pairs, in the form of a list of tuples.
235 This list is supplied on the command line as documented,
236 and this class parses that, and passes the parsed version
237 to the run() method.
239 See pipeBase.TaskRunner for more information.
240 """
242 @staticmethod
243 def getTargetList(parsedCmd, **kwargs):
244 """Parse the visit list and pass through explicitly."""
245 visitPairs = []
246 for visitStringPair in parsedCmd.visitPairs:
247 visitStrings = visitStringPair.split(",")
248 if len(visitStrings) != 2:
249 raise RuntimeError("Found {} visits in {} instead of 2".format(len(visitStrings),
250 visitStringPair))
251 try:
252 visits = [int(visit) for visit in visitStrings]
253 except Exception:
254 raise RuntimeError("Could not parse {} as two integer visit numbers".format(visitStringPair))
255 visitPairs.append(visits)
257 return pipeBase.TaskRunner.getTargetList(parsedCmd, visitPairs=visitPairs, **kwargs)
260def parseCmdlineNumberString(inputString):
261 """Parse command line numerical expression sytax and return as list of int
263 Take an input of the form "'1..5:2^123..126'" as a string, and return
264 a list of ints as [1, 3, 5, 123, 124, 125, 126]
266 Parameters
267 ----------
268 inputString : `str`
269 String to be parsed.
271 Returns
272 -------
273 outList : `list` [`int`]
274 List of integers identified in the string.
275 """
276 outList = []
277 for subString in inputString.split("^"):
278 mat = re.search(r"^(\d+)\.\.(\d+)(?::(\d+))?$", subString)
279 if mat:
280 v1 = int(mat.group(1))
281 v2 = int(mat.group(2))
282 v3 = mat.group(3)
283 v3 = int(v3) if v3 else 1
284 for v in range(v1, v2 + 1, v3):
285 outList.append(int(v))
286 else:
287 outList.append(int(subString))
288 return outList
291class SingleVisitListTaskRunner(pipeBase.TaskRunner):
292 """Subclass of TaskRunner for tasks requiring a list of visits per dataRef.
294 This transforms the processed arguments generated by the ArgumentParser
295 into the arguments expected by tasks which require a list of visits
296 to be supplied for each dataRef, as is common in `lsst.cp.pipe` code.
298 Such tasks' run() methods tend to take two arguments,
299 one of which is the dataRef (as usual), and the other is the list
300 of visits.
301 This list is supplied on the command line as documented,
302 and this class parses that, and passes the parsed version
303 to the run() method.
305 See `lsst.pipe.base.TaskRunner` for more information.
306 """
308 @staticmethod
309 def getTargetList(parsedCmd, **kwargs):
310 """Parse the visit list and pass through explicitly."""
311 # if this has been pre-parsed and therefore doesn't have length of one
312 # then something has gone wrong, so execution should stop here.
313 assert len(parsedCmd.visitList) == 1, 'visitList parsing assumptions violated'
314 visits = parseCmdlineNumberString(parsedCmd.visitList[0])
316 return pipeBase.TaskRunner.getTargetList(parsedCmd, visitList=visits, **kwargs)
319class NonexistentDatasetTaskDataIdContainer(pipeBase.DataIdContainer):
320 """A DataIdContainer for the tasks for which the output does
321 not yet exist."""
323 def makeDataRefList(self, namespace):
324 """Compute refList based on idList.
326 This method must be defined as the dataset does not exist before this
327 task is run.
329 Parameters
330 ----------
331 namespace
332 Results of parsing the command-line.
334 Notes
335 -----
336 Not called if ``add_id_argument`` called
337 with ``doMakeDataRefList=False``.
338 Note that this is almost a copy-and-paste of the vanilla
339 implementation, but without checking if the datasets already exist,
340 as this task exists to make them.
341 """
342 if self.datasetType is None:
343 raise RuntimeError("Must call setDatasetType first")
344 butler = namespace.butler
345 for dataId in self.idList:
346 refList = list(butler.subset(datasetType=self.datasetType, level=self.level, dataId=dataId))
347 # exclude nonexistent data
348 # this is a recursive test, e.g. for the sake of "raw" data
349 if not refList:
350 namespace.log.warning("No data found for dataId=%s", dataId)
351 continue
352 self.refList += refList
355def irlsFit(initialParams, dataX, dataY, function, weightsY=None, weightType='Cauchy'):
356 """Iteratively reweighted least squares fit.
358 This uses the `lsst.cp.pipe.utils.fitLeastSq`, but applies weights
359 based on the Cauchy distribution by default. Other weight options
360 are implemented. See e.g. Holland and Welsch, 1977,
361 doi:10.1080/03610927708827533
363 Parameters
364 ----------
365 initialParams : `list` [`float`]
366 Starting parameters.
367 dataX : `numpy.array`, (N,)
368 Abscissa data.
369 dataY : `numpy.array`, (N,)
370 Ordinate data.
371 function : callable
372 Function to fit.
373 weightsY : `numpy.array`, (N,)
374 Weights to apply to the data.
375 weightType : `str`, optional
376 Type of weighting to use. One of Cauchy, Anderson, bisquare,
377 box, Welsch, Huber, logistic, or Fair.
379 Returns
380 -------
381 polyFit : `list` [`float`]
382 Final best fit parameters.
383 polyFitErr : `list` [`float`]
384 Final errors on fit parameters.
385 chiSq : `float`
386 Reduced chi squared.
387 weightsY : `list` [`float`]
388 Final weights used for each point.
390 Raises
391 ------
392 RuntimeError :
393 Raised if an unknown weightType string is passed.
394 """
395 if not weightsY:
396 weightsY = np.ones_like(dataX)
398 polyFit, polyFitErr, chiSq = fitLeastSq(initialParams, dataX, dataY, function, weightsY=weightsY)
399 for iteration in range(10):
400 resid = np.abs(dataY - function(polyFit, dataX)) / np.sqrt(dataY)
401 if weightType == 'Cauchy':
402 # Use Cauchy weighting. This is a soft weight.
403 # At [2, 3, 5, 10] sigma, weights are [.59, .39, .19, .05].
404 Z = resid / 2.385
405 weightsY = 1.0 / (1.0 + np.square(Z))
406 elif weightType == 'Anderson':
407 # Anderson+1972 weighting. This is a hard weight.
408 # At [2, 3, 5, 10] sigma, weights are [.67, .35, 0.0, 0.0].
409 Z = resid / (1.339 * np.pi)
410 weightsY = np.where(Z < 1.0, np.sinc(Z), 0.0)
411 elif weightType == 'bisquare':
412 # Beaton and Tukey (1974) biweight. This is a hard weight.
413 # At [2, 3, 5, 10] sigma, weights are [.81, .59, 0.0, 0.0].
414 Z = resid / 4.685
415 weightsY = np.where(Z < 1.0, 1.0 - np.square(Z), 0.0)
416 elif weightType == 'box':
417 # Hinich and Talwar (1975). This is a hard weight.
418 # At [2, 3, 5, 10] sigma, weights are [1.0, 0.0, 0.0, 0.0].
419 weightsY = np.where(resid < 2.795, 1.0, 0.0)
420 elif weightType == 'Welsch':
421 # Dennis and Welsch (1976). This is a hard weight.
422 # At [2, 3, 5, 10] sigma, weights are [.64, .36, .06, 1e-5].
423 Z = resid / 2.985
424 weightsY = np.exp(-1.0 * np.square(Z))
425 elif weightType == 'Huber':
426 # Huber (1964) weighting. This is a soft weight.
427 # At [2, 3, 5, 10] sigma, weights are [.67, .45, .27, .13].
428 Z = resid / 1.345
429 weightsY = np.where(Z < 1.0, 1.0, 1 / Z)
430 elif weightType == 'logistic':
431 # Logistic weighting. This is a soft weight.
432 # At [2, 3, 5, 10] sigma, weights are [.56, .40, .24, .12].
433 Z = resid / 1.205
434 weightsY = np.tanh(Z) / Z
435 elif weightType == 'Fair':
436 # Fair (1974) weighting. This is a soft weight.
437 # At [2, 3, 5, 10] sigma, weights are [.41, .32, .22, .12].
438 Z = resid / 1.4
439 weightsY = (1.0 / (1.0 + (Z)))
440 else:
441 raise RuntimeError(f"Unknown weighting type: {weightType}")
442 polyFit, polyFitErr, chiSq = fitLeastSq(initialParams, dataX, dataY, function, weightsY=weightsY)
444 return polyFit, polyFitErr, chiSq, weightsY
447def fitLeastSq(initialParams, dataX, dataY, function, weightsY=None):
448 """Do a fit and estimate the parameter errors using using
449 scipy.optimize.leastq.
451 optimize.leastsq returns the fractional covariance matrix. To
452 estimate the standard deviation of the fit parameters, multiply
453 the entries of this matrix by the unweighted reduced chi squared
454 and take the square root of the diagonal elements.
456 Parameters
457 ----------
458 initialParams : `list` [`float`]
459 initial values for fit parameters. For ptcFitType=POLYNOMIAL,
460 its length determines the degree of the polynomial.
462 dataX : `numpy.array`, (N,)
463 Data in the abscissa axis.
465 dataY : `numpy.array`, (N,)
466 Data in the ordinate axis.
468 function : callable object (function)
469 Function to fit the data with.
471 weightsY : `numpy.array`, (N,)
472 Weights of the data in the ordinate axis.
474 Return
475 ------
476 pFitSingleLeastSquares : `list` [`float`]
477 List with fitted parameters.
479 pErrSingleLeastSquares : `list` [`float`]
480 List with errors for fitted parameters.
482 reducedChiSqSingleLeastSquares : `float`
483 Reduced chi squared, unweighted if weightsY is not provided.
484 """
485 if weightsY is None:
486 weightsY = np.ones(len(dataX))
488 def errFunc(p, x, y, weightsY=None):
489 if weightsY is None:
490 weightsY = np.ones(len(x))
491 return (function(p, x) - y)*weightsY
493 pFit, pCov, infoDict, errMessage, success = leastsq(errFunc, initialParams,
494 args=(dataX, dataY, weightsY), full_output=1,
495 epsfcn=0.0001)
497 if (len(dataY) > len(initialParams)) and pCov is not None:
498 reducedChiSq = calculateWeightedReducedChi2(dataY, function(pFit, dataX), weightsY, len(dataY),
499 len(initialParams))
500 pCov *= reducedChiSq
501 else:
502 pCov = np.zeros((len(initialParams), len(initialParams)))
503 pCov[:, :] = np.nan
504 reducedChiSq = np.nan
506 errorVec = []
507 for i in range(len(pFit)):
508 errorVec.append(np.fabs(pCov[i][i])**0.5)
510 pFitSingleLeastSquares = pFit
511 pErrSingleLeastSquares = np.array(errorVec)
513 return pFitSingleLeastSquares, pErrSingleLeastSquares, reducedChiSq
516def fitBootstrap(initialParams, dataX, dataY, function, weightsY=None, confidenceSigma=1.):
517 """Do a fit using least squares and bootstrap to estimate parameter errors.
519 The bootstrap error bars are calculated by fitting 100 random data sets.
521 Parameters
522 ----------
523 initialParams : `list` [`float`]
524 initial values for fit parameters. For ptcFitType=POLYNOMIAL,
525 its length determines the degree of the polynomial.
527 dataX : `numpy.array`, (N,)
528 Data in the abscissa axis.
530 dataY : `numpy.array`, (N,)
531 Data in the ordinate axis.
533 function : callable object (function)
534 Function to fit the data with.
536 weightsY : `numpy.array`, (N,), optional.
537 Weights of the data in the ordinate axis.
539 confidenceSigma : `float`, optional.
540 Number of sigmas that determine confidence interval for the
541 bootstrap errors.
543 Return
544 ------
545 pFitBootstrap : `list` [`float`]
546 List with fitted parameters.
548 pErrBootstrap : `list` [`float`]
549 List with errors for fitted parameters.
551 reducedChiSqBootstrap : `float`
552 Reduced chi squared, unweighted if weightsY is not provided.
553 """
554 if weightsY is None:
555 weightsY = np.ones(len(dataX))
557 def errFunc(p, x, y, weightsY):
558 if weightsY is None:
559 weightsY = np.ones(len(x))
560 return (function(p, x) - y)*weightsY
562 # Fit first time
563 pFit, _ = leastsq(errFunc, initialParams, args=(dataX, dataY, weightsY), full_output=0)
565 # Get the stdev of the residuals
566 residuals = errFunc(pFit, dataX, dataY, weightsY)
567 # 100 random data sets are generated and fitted
568 pars = []
569 for i in range(100):
570 randomDelta = np.random.normal(0., np.fabs(residuals), len(dataY))
571 randomDataY = dataY + randomDelta
572 randomFit, _ = leastsq(errFunc, initialParams,
573 args=(dataX, randomDataY, weightsY), full_output=0)
574 pars.append(randomFit)
575 pars = np.array(pars)
576 meanPfit = np.mean(pars, 0)
578 # confidence interval for parameter estimates
579 errPfit = confidenceSigma*np.std(pars, 0)
580 pFitBootstrap = meanPfit
581 pErrBootstrap = errPfit
583 reducedChiSq = calculateWeightedReducedChi2(dataY, function(pFitBootstrap, dataX), weightsY, len(dataY),
584 len(initialParams))
585 return pFitBootstrap, pErrBootstrap, reducedChiSq
588def funcPolynomial(pars, x):
589 """Polynomial function definition
590 Parameters
591 ----------
592 params : `list`
593 Polynomial coefficients. Its length determines the polynomial order.
595 x : `numpy.array`, (N,)
596 Abscisa array.
598 Returns
599 -------
600 y : `numpy.array`, (N,)
601 Ordinate array after evaluating polynomial of order
602 len(pars)-1 at `x`.
603 """
604 return poly.polyval(x, [*pars])
607def funcAstier(pars, x):
608 """Single brighter-fatter parameter model for PTC; Equation 16 of
609 Astier+19.
611 Parameters
612 ----------
613 params : `list`
614 Parameters of the model: a00 (brightter-fatter), gain (e/ADU),
615 and noise (e^2).
617 x : `numpy.array`, (N,)
618 Signal mu (ADU).
620 Returns
621 -------
622 y : `numpy.array`, (N,)
623 C_00 (variance) in ADU^2.
624 """
625 a00, gain, noise = pars
626 return 0.5/(a00*gain*gain)*(np.exp(2*a00*x*gain)-1) + noise/(gain*gain) # C_00
629def arrangeFlatsByExpTime(exposureList, exposureIdList):
630 """Arrange exposures by exposure time.
632 Parameters
633 ----------
634 exposureList : `list` [`lsst.pipe.base.connections.DeferredDatasetRef`]
635 Input list of exposure references.
637 exposureIdList : `list` [`int`]
638 List of exposure ids as obtained by dataId[`exposure`].
640 Returns
641 ------
642 flatsAtExpTime : `dict` [`float`,
643 `list`[(`lsst.pipe.base.connections.DeferredDatasetRef`,
644 `int`)]]
645 Dictionary that groups references to flat-field exposures
646 (and their IDs) that have the same exposure time (seconds).
647 """
648 flatsAtExpTime = {}
649 assert len(exposureList) == len(exposureIdList), "Different lengths for exp. list and exp. ID lists"
650 for expRef, expId in zip(exposureList, exposureIdList):
651 expTime = expRef.get(component='visitInfo').exposureTime
652 listAtExpTime = flatsAtExpTime.setdefault(expTime, [])
653 listAtExpTime.append((expRef, expId))
655 return flatsAtExpTime
658def arrangeFlatsByExpId(exposureList, exposureIdList):
659 """Arrange exposures by exposure ID.
661 There is no guarantee that this will properly group exposures, but
662 allows a sequence of flats that have different illumination
663 (despite having the same exposure time) to be processed.
665 Parameters
666 ----------
667 exposureList : `list`[`lsst.pipe.base.connections.DeferredDatasetRef`]
668 Input list of exposure references.
670 exposureIdList : `list`[`int`]
671 List of exposure ids as obtained by dataId[`exposure`].
673 Returns
674 ------
675 flatsAtExpId : `dict` [`float`,
676 `list`[(`lsst.pipe.base.connections.DeferredDatasetRef`,
677 `int`)]]
678 Dictionary that groups references to flat-field exposures (and their
679 IDs) sequentially by their exposure id.
681 Notes
682 -----
684 This algorithm sorts the input exposure references by their exposure
685 id, and then assigns each pair of exposure references (exp_j, exp_{j+1})
686 to pair k, such that 2*k = j, where j is the python index of one of the
687 exposure references (starting from zero). By checking for the IndexError
688 while appending, we can ensure that there will only ever be fully
689 populated pairs.
690 """
691 flatsAtExpId = {}
692 assert len(exposureList) == len(exposureIdList), "Different lengths for exp. list and exp. ID lists"
693 # Sort exposures by expIds, which are in the second list `exposureIdList`.
694 sortedExposures = sorted(zip(exposureList, exposureIdList), key=lambda pair: pair[1])
696 for jPair, expTuple in enumerate(sortedExposures):
697 if (jPair + 1) % 2:
698 kPair = jPair // 2
699 listAtExpId = flatsAtExpId.setdefault(kPair, [])
700 try:
701 listAtExpId.append(expTuple)
702 listAtExpId.append(sortedExposures[jPair + 1])
703 except IndexError:
704 pass
706 return flatsAtExpId
709def checkExpLengthEqual(exp1, exp2, v1=None, v2=None, raiseWithMessage=False):
710 """Check the exposure lengths of two exposures are equal.
712 Parameters
713 ----------
714 exp1 : `lsst.afw.image.Exposure`
715 First exposure to check
716 exp2 : `lsst.afw.image.Exposure`
717 Second exposure to check
718 v1 : `int` or `str`, optional
719 First visit of the visit pair
720 v2 : `int` or `str`, optional
721 Second visit of the visit pair
722 raiseWithMessage : `bool`
723 If True, instead of returning a bool, raise a RuntimeError if
724 exposure times are not equal, with a message about which
725 visits mismatch if the information is available.
727 Returns
728 -------
729 success : `bool`
730 This is true if the exposures have equal exposure times.
732 Raises
733 ------
734 RuntimeError
735 Raised if the exposure lengths of the two exposures are not equal
736 """
737 expTime1 = exp1.getInfo().getVisitInfo().getExposureTime()
738 expTime2 = exp2.getInfo().getVisitInfo().getExposureTime()
739 if expTime1 != expTime2:
740 if raiseWithMessage:
741 msg = "Exposure lengths for visit pairs must be equal. " + \
742 "Found %s and %s" % (expTime1, expTime2)
743 if v1 and v2:
744 msg += " for visit pair %s, %s" % (v1, v2)
745 raise RuntimeError(msg)
746 else:
747 return False
748 return True
751def validateIsrConfig(isrTask, mandatory=None, forbidden=None, desirable=None, undesirable=None,
752 checkTrim=True, logName=None):
753 """Check that appropriate ISR settings have been selected for the task.
755 Note that this checks that the task itself is configured correctly rather
756 than checking a config.
758 Parameters
759 ----------
760 isrTask : `lsst.ip.isr.IsrTask`
761 The task whose config is to be validated
763 mandatory : `iterable` [`str`]
764 isr steps that must be set to True. Raises if False or missing
766 forbidden : `iterable` [`str`]
767 isr steps that must be set to False. Raises if True, warns if missing
769 desirable : `iterable` [`str`]
770 isr steps that should probably be set to True. Warns is False,
771 info if missing
773 undesirable : `iterable` [`str`]
774 isr steps that should probably be set to False. Warns is True,
775 info if missing
777 checkTrim : `bool`
778 Check to ensure the isrTask's assembly subtask is trimming the
779 images. This is a separate config as it is very ugly to do
780 this within the normal configuration lists as it is an option
781 of a sub task.
783 Raises
784 ------
785 RuntimeError
786 Raised if ``mandatory`` config parameters are False,
787 or if ``forbidden`` parameters are True.
789 TypeError
790 Raised if parameter ``isrTask`` is an invalid type.
792 Notes
793 -----
794 Logs warnings using an isrValidation logger for desirable/undesirable
795 options that are of the wrong polarity or if keys are missing.
796 """
797 if not isinstance(isrTask, ipIsr.IsrTask):
798 raise TypeError(f'Must supply an instance of lsst.ip.isr.IsrTask not {type(isrTask)}')
800 configDict = isrTask.config.toDict()
802 if logName and isinstance(logName, str):
803 log = logging.getLogger(logName)
804 else:
805 log = logging.getLogger("lsst.isrValidation")
807 if mandatory:
808 for configParam in mandatory:
809 if configParam not in configDict:
810 raise RuntimeError(f"Mandatory parameter {configParam} not found in the isr configuration.")
811 if configDict[configParam] is False:
812 raise RuntimeError(f"Must set config.isr.{configParam} to True for this task.")
814 if forbidden:
815 for configParam in forbidden:
816 if configParam not in configDict:
817 log.warning("Failed to find forbidden key %s in the isr config. The keys in the"
818 " forbidden list should each have an associated Field in IsrConfig:"
819 " check that there is not a typo in this case.", configParam)
820 continue
821 if configDict[configParam] is True:
822 raise RuntimeError(f"Must set config.isr.{configParam} to False for this task.")
824 if desirable:
825 for configParam in desirable:
826 if configParam not in configDict:
827 log.info("Failed to find key %s in the isr config. You probably want"
828 " to set the equivalent for your obs_package to True.", configParam)
829 continue
830 if configDict[configParam] is False:
831 log.warning("Found config.isr.%s set to False for this task."
832 " The cp_pipe Config recommends setting this to True.", configParam)
833 if undesirable:
834 for configParam in undesirable:
835 if configParam not in configDict:
836 log.info("Failed to find key %s in the isr config. You probably want"
837 " to set the equivalent for your obs_package to False.", configParam)
838 continue
839 if configDict[configParam] is True:
840 log.warning("Found config.isr.%s set to True for this task."
841 " The cp_pipe Config recommends setting this to False.", configParam)
843 if checkTrim: # subtask setting, seems non-trivial to combine with above lists
844 if not isrTask.assembleCcd.config.doTrim:
845 raise RuntimeError("Must trim when assembling CCDs. Set config.isr.assembleCcd.doTrim to True")
848def ddict2dict(d):
849 """Convert nested default dictionaries to regular dictionaries.
851 This is needed to prevent yaml persistence issues.
853 Parameters
854 ----------
855 d : `defaultdict`
856 A possibly nested set of `defaultdict`.
858 Returns
859 -------
860 dict : `dict`
861 A possibly nested set of `dict`.
862 """
863 for k, v in d.items():
864 if isinstance(v, dict):
865 d[k] = ddict2dict(v)
866 return dict(d)