Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22 

23__all__ = ['PairedVisitListTaskRunner', 'SingleVisitListTaskRunner', 

24 'NonexistentDatasetTaskDataIdContainer', 'parseCmdlineNumberString', 

25 'countMaskedPixels', 'checkExpLengthEqual', 'ddict2dict'] 

26 

27import re 

28import numpy as np 

29from scipy.optimize import leastsq 

30import numpy.polynomial.polynomial as poly 

31from scipy.stats import norm 

32 

33import lsst.pipe.base as pipeBase 

34import lsst.ip.isr as ipIsr 

35from lsst.ip.isr import isrMock 

36import lsst.log 

37import lsst.afw.image 

38 

39import galsim 

40 

41 

42def sigmaClipCorrection(nSigClip): 

43 """Correct measured sigma to account for clipping. 

44 

45 If we clip our input data and then measure sigma, then the 

46 measured sigma is smaller than the true value because real 

47 points beyond the clip threshold have been removed. This is a 

48 small (1.5% at nSigClip=3) effect when nSigClip >~ 3, but the 

49 default parameters for measure crosstalk use nSigClip=2.0. 

50 This causes the measured sigma to be about 15% smaller than 

51 real. This formula corrects the issue, for the symmetric case 

52 (upper clip threshold equal to lower clip threshold). 

53 

54 Parameters 

55 ---------- 

56 nSigClip : `float` 

57 Number of sigma the measurement was clipped by. 

58 

59 Returns 

60 ------- 

61 scaleFactor : `float` 

62 Scale factor to increase the measured sigma by. 

63 

64 """ 

65 varFactor = 1.0 - (2 * nSigClip * norm.pdf(nSigClip)) / (norm.cdf(nSigClip) - norm.cdf(-nSigClip)) 

66 return 1.0 / np.sqrt(varFactor) 

67 

68 

69def calculateWeightedReducedChi2(measured, model, weightsMeasured, nData, nParsModel): 

70 """Calculate weighted reduced chi2. 

71 

72 Parameters 

73 ---------- 

74 

75 measured : `list` 

76 List with measured data. 

77 

78 model : `list` 

79 List with modeled data. 

80 

81 weightsMeasured : `list` 

82 List with weights for the measured data. 

83 

84 nData : `int` 

85 Number of data points. 

86 

87 nParsModel : `int` 

88 Number of parameters in the model. 

89 

90 Returns 

91 ------- 

92 

93 redWeightedChi2 : `float` 

94 Reduced weighted chi2. 

95 """ 

96 

97 wRes = (measured - model)*weightsMeasured 

98 return ((wRes*wRes).sum())/(nData-nParsModel) 

99 

100 

101def makeMockFlats(expTime, gain=1.0, readNoiseElectrons=5, fluxElectrons=1000, 

102 randomSeedFlat1=1984, randomSeedFlat2=666, powerLawBfParams=[], 

103 expId1=0, expId2=1): 

104 """Create a pair or mock flats with isrMock. 

105 

106 Parameters 

107 ---------- 

108 expTime : `float` 

109 Exposure time of the flats. 

110 

111 gain : `float`, optional 

112 Gain, in e/ADU. 

113 

114 readNoiseElectrons : `float`, optional 

115 Read noise rms, in electrons. 

116 

117 fluxElectrons : `float`, optional 

118 Flux of flats, in electrons per second. 

119 

120 randomSeedFlat1 : `int`, optional 

121 Random seed for the normal distrubutions for the mean signal and noise (flat1). 

122 

123 randomSeedFlat2 : `int`, optional 

124 Random seed for the normal distrubutions for the mean signal and noise (flat2). 

125 

126 powerLawBfParams : `list`, optional 

127 Parameters for `galsim.cdmodel.PowerLawCD` to simulate the brightter-fatter effect. 

128 

129 expId1 : `int`, optional 

130 Exposure ID for first flat. 

131 

132 expId2 : `int`, optional 

133 Exposure ID for second flat. 

134 

135 Returns 

136 ------- 

137 

138 flatExp1 : `lsst.afw.image.exposure.exposure.ExposureF` 

139 First exposure of flat field pair. 

140 

141 flatExp2 : `lsst.afw.image.exposure.exposure.ExposureF` 

142 Second exposure of flat field pair. 

143 

144 Notes 

145 ----- 

146 The parameters of `galsim.cdmodel.PowerLawCD` are `n, r0, t0, rx, tx, r, t, alpha`. For more 

147 information about their meaning, see the Galsim documentation 

148 https://galsim-developers.github.io/GalSim/_build/html/_modules/galsim/cdmodel.html 

149 and Gruen+15 (1501.02802). 

150 

151 Example: galsim.cdmodel.PowerLawCD(8, 1.1e-7, 1.1e-7, 1.0e-8, 1.0e-8, 1.0e-9, 1.0e-9, 2.0) 

152 """ 

153 flatFlux = fluxElectrons # e/s 

154 flatMean = flatFlux*expTime # e 

155 readNoise = readNoiseElectrons # e 

156 

157 mockImageConfig = isrMock.IsrMock.ConfigClass() 

158 

159 mockImageConfig.flatDrop = 0.99999 

160 mockImageConfig.isTrimmed = True 

161 

162 flatExp1 = isrMock.FlatMock(config=mockImageConfig).run() 

163 flatExp2 = flatExp1.clone() 

164 (shapeY, shapeX) = flatExp1.getDimensions() 

165 flatWidth = np.sqrt(flatMean) 

166 

167 rng1 = np.random.RandomState(randomSeedFlat1) 

168 flatData1 = rng1.normal(flatMean, flatWidth, (shapeX, shapeY)) + rng1.normal(0.0, readNoise, 

169 (shapeX, shapeY)) 

170 rng2 = np.random.RandomState(randomSeedFlat2) 

171 flatData2 = rng2.normal(flatMean, flatWidth, (shapeX, shapeY)) + rng2.normal(0.0, readNoise, 

172 (shapeX, shapeY)) 

173 # Simulate BF with power law model in galsim 

174 if len(powerLawBfParams): 

175 if not len(powerLawBfParams) == 8: 

176 raise RuntimeError("Wrong number of parameters for `galsim.cdmodel.PowerLawCD`. " 

177 f"Expected 8; passed {len(powerLawBfParams)}.") 

178 cd = galsim.cdmodel.PowerLawCD(*powerLawBfParams) 

179 tempFlatData1 = galsim.Image(flatData1) 

180 temp2FlatData1 = cd.applyForward(tempFlatData1) 

181 

182 tempFlatData2 = galsim.Image(flatData2) 

183 temp2FlatData2 = cd.applyForward(tempFlatData2) 

184 

185 flatExp1.image.array[:] = temp2FlatData1.array/gain # ADU 

186 flatExp2.image.array[:] = temp2FlatData2.array/gain # ADU 

187 else: 

188 flatExp1.image.array[:] = flatData1/gain # ADU 

189 flatExp2.image.array[:] = flatData2/gain # ADU 

190 

191 visitInfoExp1 = lsst.afw.image.VisitInfo(exposureId=expId1, exposureTime=expTime) 

192 visitInfoExp2 = lsst.afw.image.VisitInfo(exposureId=expId2, exposureTime=expTime) 

193 

194 flatExp1.getInfo().setVisitInfo(visitInfoExp1) 

195 flatExp2.getInfo().setVisitInfo(visitInfoExp2) 

196 

197 return flatExp1, flatExp2 

198 

199 

200def countMaskedPixels(maskedIm, maskPlane): 

201 """Count the number of pixels in a given mask plane.""" 

202 maskBit = maskedIm.mask.getPlaneBitMask(maskPlane) 

203 nPix = np.where(np.bitwise_and(maskedIm.mask.array, maskBit))[0].flatten().size 

204 return nPix 

205 

206 

207class PairedVisitListTaskRunner(pipeBase.TaskRunner): 

208 """Subclass of TaskRunner for handling intrinsically paired visits. 

209 

210 This transforms the processed arguments generated by the ArgumentParser 

211 into the arguments expected by tasks which take visit pairs for their 

212 run() methods. 

213 

214 Such tasks' run() methods tend to take two arguments, 

215 one of which is the dataRef (as usual), and the other is the list 

216 of visit-pairs, in the form of a list of tuples. 

217 This list is supplied on the command line as documented, 

218 and this class parses that, and passes the parsed version 

219 to the run() method. 

220 

221 See pipeBase.TaskRunner for more information. 

222 """ 

223 

224 @staticmethod 

225 def getTargetList(parsedCmd, **kwargs): 

226 """Parse the visit list and pass through explicitly.""" 

227 visitPairs = [] 

228 for visitStringPair in parsedCmd.visitPairs: 

229 visitStrings = visitStringPair.split(",") 

230 if len(visitStrings) != 2: 

231 raise RuntimeError("Found {} visits in {} instead of 2".format(len(visitStrings), 

232 visitStringPair)) 

233 try: 

234 visits = [int(visit) for visit in visitStrings] 

235 except Exception: 

236 raise RuntimeError("Could not parse {} as two integer visit numbers".format(visitStringPair)) 

237 visitPairs.append(visits) 

238 

239 return pipeBase.TaskRunner.getTargetList(parsedCmd, visitPairs=visitPairs, **kwargs) 

240 

241 

242def parseCmdlineNumberString(inputString): 

243 """Parse command line numerical expression sytax and return as list of int 

244 

245 Take an input of the form "'1..5:2^123..126'" as a string, and return 

246 a list of ints as [1, 3, 5, 123, 124, 125, 126] 

247 """ 

248 outList = [] 

249 for subString in inputString.split("^"): 

250 mat = re.search(r"^(\d+)\.\.(\d+)(?::(\d+))?$", subString) 

251 if mat: 

252 v1 = int(mat.group(1)) 

253 v2 = int(mat.group(2)) 

254 v3 = mat.group(3) 

255 v3 = int(v3) if v3 else 1 

256 for v in range(v1, v2 + 1, v3): 

257 outList.append(int(v)) 

258 else: 

259 outList.append(int(subString)) 

260 return outList 

261 

262 

263class SingleVisitListTaskRunner(pipeBase.TaskRunner): 

264 """Subclass of TaskRunner for tasks requiring a list of visits per dataRef. 

265 

266 This transforms the processed arguments generated by the ArgumentParser 

267 into the arguments expected by tasks which require a list of visits 

268 to be supplied for each dataRef, as is common in `lsst.cp.pipe` code. 

269 

270 Such tasks' run() methods tend to take two arguments, 

271 one of which is the dataRef (as usual), and the other is the list 

272 of visits. 

273 This list is supplied on the command line as documented, 

274 and this class parses that, and passes the parsed version 

275 to the run() method. 

276 

277 See `lsst.pipe.base.TaskRunner` for more information. 

278 """ 

279 

280 @staticmethod 

281 def getTargetList(parsedCmd, **kwargs): 

282 """Parse the visit list and pass through explicitly.""" 

283 # if this has been pre-parsed and therefore doesn't have length of one 

284 # then something has gone wrong, so execution should stop here. 

285 assert len(parsedCmd.visitList) == 1, 'visitList parsing assumptions violated' 

286 visits = parseCmdlineNumberString(parsedCmd.visitList[0]) 

287 

288 return pipeBase.TaskRunner.getTargetList(parsedCmd, visitList=visits, **kwargs) 

289 

290 

291class NonexistentDatasetTaskDataIdContainer(pipeBase.DataIdContainer): 

292 """A DataIdContainer for the tasks for which the output does 

293 not yet exist.""" 

294 

295 def makeDataRefList(self, namespace): 

296 """Compute refList based on idList. 

297 

298 This method must be defined as the dataset does not exist before this 

299 task is run. 

300 

301 Parameters 

302 ---------- 

303 namespace 

304 Results of parsing the command-line. 

305 

306 Notes 

307 ----- 

308 Not called if ``add_id_argument`` called 

309 with ``doMakeDataRefList=False``. 

310 Note that this is almost a copy-and-paste of the vanilla 

311 implementation, but without checking if the datasets already exist, 

312 as this task exists to make them. 

313 """ 

314 if self.datasetType is None: 

315 raise RuntimeError("Must call setDatasetType first") 

316 butler = namespace.butler 

317 for dataId in self.idList: 

318 refList = list(butler.subset(datasetType=self.datasetType, level=self.level, dataId=dataId)) 

319 # exclude nonexistent data 

320 # this is a recursive test, e.g. for the sake of "raw" data 

321 if not refList: 

322 namespace.log.warn("No data found for dataId=%s", dataId) 

323 continue 

324 self.refList += refList 

325 

326 

327def irlsFit(initialParams, dataX, dataY, function, weightsY=None, weightType='Cauchy'): 

328 """Iteratively reweighted least squares fit. 

329 

330 This uses the `lsst.cp.pipe.utils.fitLeastSq`, but applies weights 

331 based on the Cauchy distribution by default. Other weight options 

332 are implemented. See e.g. Holland and Welsch, 1977, 

333 doi:10.1080/03610927708827533 

334 

335 Parameters 

336 ---------- 

337 initialParams : `list` [`float`] 

338 Starting parameters. 

339 dataX : `numpy.array` [`float`] 

340 Abscissa data. 

341 dataY : `numpy.array` [`float`] 

342 Ordinate data. 

343 function : callable 

344 Function to fit. 

345 weightsY : `numpy.array` [`float`] 

346 Weights to apply to the data. 

347 weightType : `str`, optional 

348 Type of weighting to use. One of Cauchy, Anderson, bisquare, 

349 box, Welsch, Huber, logistic, or Fair. 

350 

351 Returns 

352 ------- 

353 polyFit : `list` [`float`] 

354 Final best fit parameters. 

355 polyFitErr : `list` [`float`] 

356 Final errors on fit parameters. 

357 chiSq : `float` 

358 Reduced chi squared. 

359 weightsY : `list` [`float`] 

360 Final weights used for each point. 

361 

362 Raises 

363 ------ 

364 RuntimeError : 

365 Raised if an unknown weightType string is passed. 

366 

367 """ 

368 if not weightsY: 

369 weightsY = np.ones_like(dataX) 

370 

371 polyFit, polyFitErr, chiSq = fitLeastSq(initialParams, dataX, dataY, function, weightsY=weightsY) 

372 for iteration in range(10): 

373 resid = np.abs(dataY - function(polyFit, dataX)) / np.sqrt(dataY) 

374 if weightType == 'Cauchy': 

375 # Use Cauchy weighting. This is a soft weight. 

376 # At [2, 3, 5, 10] sigma, weights are [.59, .39, .19, .05]. 

377 Z = resid / 2.385 

378 weightsY = 1.0 / (1.0 + np.square(Z)) 

379 elif weightType == 'Anderson': 

380 # Anderson+1972 weighting. This is a hard weight. 

381 # At [2, 3, 5, 10] sigma, weights are [.67, .35, 0.0, 0.0]. 

382 Z = resid / (1.339 * np.pi) 

383 weightsY = np.where(Z < 1.0, np.sinc(Z), 0.0) 

384 elif weightType == 'bisquare': 

385 # Beaton and Tukey (1974) biweight. This is a hard weight. 

386 # At [2, 3, 5, 10] sigma, weights are [.81, .59, 0.0, 0.0]. 

387 Z = resid / 4.685 

388 weightsY = np.where(Z < 1.0, 1.0 - np.square(Z), 0.0) 

389 elif weightType == 'box': 

390 # Hinich and Talwar (1975). This is a hard weight. 

391 # At [2, 3, 5, 10] sigma, weights are [1.0, 0.0, 0.0, 0.0]. 

392 weightsY = np.where(resid < 2.795, 1.0, 0.0) 

393 elif weightType == 'Welsch': 

394 # Dennis and Welsch (1976). This is a hard weight. 

395 # At [2, 3, 5, 10] sigma, weights are [.64, .36, .06, 1e-5]. 

396 Z = resid / 2.985 

397 weightsY = np.exp(-1.0 * np.square(Z)) 

398 elif weightType == 'Huber': 

399 # Huber (1964) weighting. This is a soft weight. 

400 # At [2, 3, 5, 10] sigma, weights are [.67, .45, .27, .13]. 

401 Z = resid / 1.345 

402 weightsY = np.where(Z < 1.0, 1.0, 1 / Z) 

403 elif weightType == 'logistic': 

404 # Logistic weighting. This is a soft weight. 

405 # At [2, 3, 5, 10] sigma, weights are [.56, .40, .24, .12]. 

406 Z = resid / 1.205 

407 weightsY = np.tanh(Z) / Z 

408 elif weightType == 'Fair': 

409 # Fair (1974) weighting. This is a soft weight. 

410 # At [2, 3, 5, 10] sigma, weights are [.41, .32, .22, .12]. 

411 Z = resid / 1.4 

412 weightsY = (1.0 / (1.0 + (Z))) 

413 else: 

414 raise RuntimeError(f"Unknown weighting type: {weightType}") 

415 polyFit, polyFitErr, chiSq = fitLeastSq(initialParams, dataX, dataY, function, weightsY=weightsY) 

416 

417 return polyFit, polyFitErr, chiSq, weightsY 

418 

419 

420def fitLeastSq(initialParams, dataX, dataY, function, weightsY=None): 

421 """Do a fit and estimate the parameter errors using using scipy.optimize.leastq. 

422 

423 optimize.leastsq returns the fractional covariance matrix. To estimate the 

424 standard deviation of the fit parameters, multiply the entries of this matrix 

425 by the unweighted reduced chi squared and take the square root of the diagonal elements. 

426 

427 Parameters 

428 ---------- 

429 initialParams : `list` of `float` 

430 initial values for fit parameters. For ptcFitType=POLYNOMIAL, its length 

431 determines the degree of the polynomial. 

432 

433 dataX : `numpy.array` of `float` 

434 Data in the abscissa axis. 

435 

436 dataY : `numpy.array` of `float` 

437 Data in the ordinate axis. 

438 

439 function : callable object (function) 

440 Function to fit the data with. 

441 

442 weightsY : `numpy.array` of `float` 

443 Weights of the data in the ordinate axis. 

444 

445 Return 

446 ------ 

447 pFitSingleLeastSquares : `list` of `float` 

448 List with fitted parameters. 

449 

450 pErrSingleLeastSquares : `list` of `float` 

451 List with errors for fitted parameters. 

452 

453 reducedChiSqSingleLeastSquares : `float` 

454 Reduced chi squared, unweighted if weightsY is not provided. 

455 """ 

456 if weightsY is None: 

457 weightsY = np.ones(len(dataX)) 

458 

459 def errFunc(p, x, y, weightsY=None): 

460 if weightsY is None: 

461 weightsY = np.ones(len(x)) 

462 return (function(p, x) - y)*weightsY 

463 

464 pFit, pCov, infoDict, errMessage, success = leastsq(errFunc, initialParams, 

465 args=(dataX, dataY, weightsY), full_output=1, 

466 epsfcn=0.0001) 

467 

468 if (len(dataY) > len(initialParams)) and pCov is not None: 

469 reducedChiSq = calculateWeightedReducedChi2(dataY, function(pFit, dataX), weightsY, len(dataY), 

470 len(initialParams)) 

471 pCov *= reducedChiSq 

472 else: 

473 pCov = np.zeros((len(initialParams), len(initialParams))) 

474 pCov[:, :] = np.nan 

475 reducedChiSq = np.nan 

476 

477 errorVec = [] 

478 for i in range(len(pFit)): 

479 errorVec.append(np.fabs(pCov[i][i])**0.5) 

480 

481 pFitSingleLeastSquares = pFit 

482 pErrSingleLeastSquares = np.array(errorVec) 

483 

484 return pFitSingleLeastSquares, pErrSingleLeastSquares, reducedChiSq 

485 

486 

487def fitBootstrap(initialParams, dataX, dataY, function, weightsY=None, confidenceSigma=1.): 

488 """Do a fit using least squares and bootstrap to estimate parameter errors. 

489 

490 The bootstrap error bars are calculated by fitting 100 random data sets. 

491 

492 Parameters 

493 ---------- 

494 initialParams : `list` of `float` 

495 initial values for fit parameters. For ptcFitType=POLYNOMIAL, its length 

496 determines the degree of the polynomial. 

497 

498 dataX : `numpy.array` of `float` 

499 Data in the abscissa axis. 

500 

501 dataY : `numpy.array` of `float` 

502 Data in the ordinate axis. 

503 

504 function : callable object (function) 

505 Function to fit the data with. 

506 

507 weightsY : `numpy.array` of `float`, optional. 

508 Weights of the data in the ordinate axis. 

509 

510 confidenceSigma : `float`, optional. 

511 Number of sigmas that determine confidence interval for the bootstrap errors. 

512 

513 Return 

514 ------ 

515 pFitBootstrap : `list` of `float` 

516 List with fitted parameters. 

517 

518 pErrBootstrap : `list` of `float` 

519 List with errors for fitted parameters. 

520 

521 reducedChiSqBootstrap : `float` 

522 Reduced chi squared, unweighted if weightsY is not provided. 

523 """ 

524 if weightsY is None: 

525 weightsY = np.ones(len(dataX)) 

526 

527 def errFunc(p, x, y, weightsY): 

528 if weightsY is None: 

529 weightsY = np.ones(len(x)) 

530 return (function(p, x) - y)*weightsY 

531 

532 # Fit first time 

533 pFit, _ = leastsq(errFunc, initialParams, args=(dataX, dataY, weightsY), full_output=0) 

534 

535 # Get the stdev of the residuals 

536 residuals = errFunc(pFit, dataX, dataY, weightsY) 

537 # 100 random data sets are generated and fitted 

538 pars = [] 

539 for i in range(100): 

540 randomDelta = np.random.normal(0., np.fabs(residuals), len(dataY)) 

541 randomDataY = dataY + randomDelta 

542 randomFit, _ = leastsq(errFunc, initialParams, 

543 args=(dataX, randomDataY, weightsY), full_output=0) 

544 pars.append(randomFit) 

545 pars = np.array(pars) 

546 meanPfit = np.mean(pars, 0) 

547 

548 # confidence interval for parameter estimates 

549 errPfit = confidenceSigma*np.std(pars, 0) 

550 pFitBootstrap = meanPfit 

551 pErrBootstrap = errPfit 

552 

553 reducedChiSq = calculateWeightedReducedChi2(dataY, function(pFitBootstrap, dataX), weightsY, len(dataY), 

554 len(initialParams)) 

555 return pFitBootstrap, pErrBootstrap, reducedChiSq 

556 

557 

558def funcPolynomial(pars, x): 

559 """Polynomial function definition 

560 Parameters 

561 ---------- 

562 params : `list` 

563 Polynomial coefficients. Its length determines the polynomial order. 

564 

565 x : `numpy.array` 

566 Abscisa array. 

567 

568 Returns 

569 ------- 

570 Ordinate array after evaluating polynomial of order len(pars)-1 at `x`. 

571 """ 

572 return poly.polyval(x, [*pars]) 

573 

574 

575def funcAstier(pars, x): 

576 """Single brighter-fatter parameter model for PTC; Equation 16 of Astier+19. 

577 

578 Parameters 

579 ---------- 

580 params : `list` 

581 Parameters of the model: a00 (brightter-fatter), gain (e/ADU), and noise (e^2). 

582 

583 x : `numpy.array` 

584 Signal mu (ADU). 

585 

586 Returns 

587 ------- 

588 C_00 (variance) in ADU^2. 

589 """ 

590 a00, gain, noise = pars 

591 return 0.5/(a00*gain*gain)*(np.exp(2*a00*x*gain)-1) + noise/(gain*gain) # C_00 

592 

593 

594def arrangeFlatsByExpTime(exposureList): 

595 """Arrange exposures by exposure time. 

596 

597 Parameters 

598 ---------- 

599 exposureList : `list`[`lsst.afw.image.exposure.exposure.ExposureF`] 

600 Input list of exposures. 

601 

602 Returns 

603 ------ 

604 flatsAtExpTime : `dict` [`float`, 

605 `list`[`lsst.afw.image.exposure.exposure.ExposureF`]] 

606 Dictionary that groups flat-field exposures that have the same 

607 exposure time (seconds). 

608 """ 

609 flatsAtExpTime = {} 

610 for exp in exposureList: 

611 tempFlat = exp 

612 expTime = tempFlat.getInfo().getVisitInfo().getExposureTime() 

613 listAtExpTime = flatsAtExpTime.setdefault(expTime, []) 

614 listAtExpTime.append(tempFlat) 

615 

616 return flatsAtExpTime 

617 

618 

619def arrangeFlatsByExpId(exposureList): 

620 """Arrange exposures by exposure ID. 

621 

622 There is no guarantee that this will properly group exposures, but 

623 allows a sequence of flats that have different illumination 

624 (despite having the same exposure time) to be processed. 

625 

626 Parameters 

627 ---------- 

628 exposureList : `list`[`lsst.afw.image.exposure.exposure.ExposureF`] 

629 Input list of exposures. 

630 

631 Returns 

632 ------ 

633 flatsAtExpId : `dict` [`float`, 

634 `list`[`lsst.afw.image.exposure.exposure.ExposureF`]] 

635 Dictionary that groups flat-field exposures sequentially by 

636 their exposure id. 

637 

638 Notes 

639 ----- 

640 

641 This algorithm sorts the input exposures by their exposure id, and 

642 then assigns each pair of exposures (exp_j, exp_{j+1}) to pair k, 

643 such that 2*k = j, where j is the python index of one of the 

644 exposures (starting from zero). By checking for the IndexError 

645 while appending, we can ensure that there will only ever be fully 

646 populated pairs. 

647 """ 

648 flatsAtExpId = {} 

649 sortedExposures = sorted(exposureList, key=lambda exp: exp.getInfo().getVisitInfo().getExposureId()) 

650 

651 for jPair, exp in enumerate(sortedExposures): 

652 if (jPair + 1) % 2: 

653 kPair = jPair // 2 

654 listAtExpId = flatsAtExpId.setdefault(kPair, []) 

655 try: 

656 listAtExpId.append(exp) 

657 listAtExpId.append(sortedExposures[jPair + 1]) 

658 except IndexError: 

659 pass 

660 

661 return flatsAtExpId 

662 

663 

664def checkExpLengthEqual(exp1, exp2, v1=None, v2=None, raiseWithMessage=False): 

665 """Check the exposure lengths of two exposures are equal. 

666 

667 Parameters: 

668 ----------- 

669 exp1 : `lsst.afw.image.exposure.ExposureF` 

670 First exposure to check 

671 exp2 : `lsst.afw.image.exposure.ExposureF` 

672 Second exposure to check 

673 v1 : `int` or `str`, optional 

674 First visit of the visit pair 

675 v2 : `int` or `str`, optional 

676 Second visit of the visit pair 

677 raiseWithMessage : `bool` 

678 If True, instead of returning a bool, raise a RuntimeError if exposure 

679 times are not equal, with a message about which visits mismatch if the 

680 information is available. 

681 

682 Raises: 

683 ------- 

684 RuntimeError 

685 Raised if the exposure lengths of the two exposures are not equal 

686 """ 

687 expTime1 = exp1.getInfo().getVisitInfo().getExposureTime() 

688 expTime2 = exp2.getInfo().getVisitInfo().getExposureTime() 

689 if expTime1 != expTime2: 

690 if raiseWithMessage: 

691 msg = "Exposure lengths for visit pairs must be equal. " + \ 

692 "Found %s and %s" % (expTime1, expTime2) 

693 if v1 and v2: 

694 msg += " for visit pair %s, %s" % (v1, v2) 

695 raise RuntimeError(msg) 

696 else: 

697 return False 

698 return True 

699 

700 

701def validateIsrConfig(isrTask, mandatory=None, forbidden=None, desirable=None, undesirable=None, 

702 checkTrim=True, logName=None): 

703 """Check that appropriate ISR settings have been selected for the task. 

704 

705 Note that this checks that the task itself is configured correctly rather 

706 than checking a config. 

707 

708 Parameters 

709 ---------- 

710 isrTask : `lsst.ip.isr.IsrTask` 

711 The task whose config is to be validated 

712 

713 mandatory : `iterable` of `str` 

714 isr steps that must be set to True. Raises if False or missing 

715 

716 forbidden : `iterable` of `str` 

717 isr steps that must be set to False. Raises if True, warns if missing 

718 

719 desirable : `iterable` of `str` 

720 isr steps that should probably be set to True. Warns is False, info if 

721 missing 

722 

723 undesirable : `iterable` of `str` 

724 isr steps that should probably be set to False. Warns is True, info if 

725 missing 

726 

727 checkTrim : `bool` 

728 Check to ensure the isrTask's assembly subtask is trimming the images. 

729 This is a separate config as it is very ugly to do this within the 

730 normal configuration lists as it is an option of a sub task. 

731 

732 Raises 

733 ------ 

734 RuntimeError 

735 Raised if ``mandatory`` config parameters are False, 

736 or if ``forbidden`` parameters are True. 

737 

738 TypeError 

739 Raised if parameter ``isrTask`` is an invalid type. 

740 

741 Notes 

742 ----- 

743 Logs warnings using an isrValidation logger for desirable/undesirable 

744 options that are of the wrong polarity or if keys are missing. 

745 """ 

746 if not isinstance(isrTask, ipIsr.IsrTask): 

747 raise TypeError(f'Must supply an instance of lsst.ip.isr.IsrTask not {type(isrTask)}') 

748 

749 configDict = isrTask.config.toDict() 

750 

751 if logName and isinstance(logName, str): 

752 log = lsst.log.getLogger(logName) 

753 else: 

754 log = lsst.log.getLogger("isrValidation") 

755 

756 if mandatory: 

757 for configParam in mandatory: 

758 if configParam not in configDict: 

759 raise RuntimeError(f"Mandatory parameter {configParam} not found in the isr configuration.") 

760 if configDict[configParam] is False: 

761 raise RuntimeError(f"Must set config.isr.{configParam} to True for this task.") 

762 

763 if forbidden: 

764 for configParam in forbidden: 

765 if configParam not in configDict: 

766 log.warn(f"Failed to find forbidden key {configParam} in the isr config. The keys in the" 

767 " forbidden list should each have an associated Field in IsrConfig:" 

768 " check that there is not a typo in this case.") 

769 continue 

770 if configDict[configParam] is True: 

771 raise RuntimeError(f"Must set config.isr.{configParam} to False for this task.") 

772 

773 if desirable: 

774 for configParam in desirable: 

775 if configParam not in configDict: 

776 log.info(f"Failed to find key {configParam} in the isr config. You probably want" 

777 " to set the equivalent for your obs_package to True.") 

778 continue 

779 if configDict[configParam] is False: 

780 log.warn(f"Found config.isr.{configParam} set to False for this task." 

781 " The cp_pipe Config recommends setting this to True.") 

782 if undesirable: 

783 for configParam in undesirable: 

784 if configParam not in configDict: 

785 log.info(f"Failed to find key {configParam} in the isr config. You probably want" 

786 " to set the equivalent for your obs_package to False.") 

787 continue 

788 if configDict[configParam] is True: 

789 log.warn(f"Found config.isr.{configParam} set to True for this task." 

790 " The cp_pipe Config recommends setting this to False.") 

791 

792 if checkTrim: # subtask setting, seems non-trivial to combine with above lists 

793 if not isrTask.assembleCcd.config.doTrim: 

794 raise RuntimeError("Must trim when assembling CCDs. Set config.isr.assembleCcd.doTrim to True") 

795 

796 

797def ddict2dict(d): 

798 """Convert nested default dictionaries to regular dictionaries. 

799 

800 This is needed to prevent yaml persistence issues. 

801 

802 Parameters 

803 ---------- 

804 d : `defaultdict` 

805 A possibly nested set of `defaultdict`. 

806 

807 Returns 

808 ------- 

809 dict : `dict` 

810 A possibly nested set of `dict`. 

811 """ 

812 for k, v in d.items(): 

813 if isinstance(v, dict): 

814 d[k] = ddict2dict(v) 

815 return dict(d)