Coverage for python/lsst/cp/pipe/cpCombine.py: 21%

237 statements  

« prev     ^ index     » next       coverage.py v7.4.3, created at 2024-03-01 14:30 +0000

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21import numpy as np 

22import time 

23 

24import lsst.geom as geom 

25import lsst.pex.config as pexConfig 

26import lsst.pipe.base as pipeBase 

27import lsst.pipe.base.connectionTypes as cT 

28import lsst.afw.math as afwMath 

29import lsst.afw.image as afwImage 

30 

31from lsst.ip.isr.vignette import maskVignettedRegion 

32 

33from astro_metadata_translator import merge_headers, ObservationGroup 

34from astro_metadata_translator.serialize import dates_to_fits 

35 

36 

37__all__ = ["CalibStatsConfig", "CalibStatsTask", 

38 "CalibCombineConfig", "CalibCombineConnections", "CalibCombineTask", 

39 "CalibCombineByFilterConfig", "CalibCombineByFilterConnections", "CalibCombineByFilterTask"] 

40 

41 

42# CalibStatsConfig/CalibStatsTask from pipe_base/constructCalibs.py 

43class CalibStatsConfig(pexConfig.Config): 

44 """Parameters controlling the measurement of background 

45 statistics. 

46 """ 

47 

48 stat = pexConfig.Field( 

49 dtype=str, 

50 default="MEANCLIP", 

51 doc="Statistic name to use to estimate background (from `~lsst.afw.math.Property`)", 

52 ) 

53 clip = pexConfig.Field( 

54 dtype=float, 

55 default=3.0, 

56 doc="Clipping threshold for background", 

57 ) 

58 nIter = pexConfig.Field( 

59 dtype=int, 

60 default=3, 

61 doc="Clipping iterations for background", 

62 ) 

63 mask = pexConfig.ListField( 

64 dtype=str, 

65 default=["DETECTED", "BAD", "NO_DATA"], 

66 doc="Mask planes to reject", 

67 ) 

68 

69 

70class CalibStatsTask(pipeBase.Task): 

71 """Measure statistics on the background 

72 

73 This can be useful for scaling the background, e.g., for flats and 

74 fringe frames. 

75 """ 

76 

77 ConfigClass = CalibStatsConfig 

78 

79 def run(self, exposureOrImage): 

80 """Measure a particular statistic on an image (of some sort). 

81 

82 Parameters 

83 ---------- 

84 exposureOrImage : `lsst.afw.image.Exposure`, 

85 `lsst.afw.image.MaskedImage`, or 

86 `lsst.afw.image.Image` 

87 Exposure or image to calculate statistics on. 

88 

89 Returns 

90 ------- 

91 results : `float` 

92 Resulting statistic value. 

93 """ 

94 stats = afwMath.StatisticsControl(self.config.clip, self.config.nIter, 

95 afwImage.Mask.getPlaneBitMask(self.config.mask)) 

96 try: 

97 image = exposureOrImage.getMaskedImage() 

98 except Exception: 

99 try: 

100 image = exposureOrImage.getImage() 

101 except Exception: 

102 image = exposureOrImage 

103 statType = afwMath.stringToStatisticsProperty(self.config.stat) 

104 return afwMath.makeStatistics(image, statType, stats).getValue() 

105 

106 

107class CalibCombineConnections(pipeBase.PipelineTaskConnections, 

108 dimensions=("instrument", "detector")): 

109 inputExpHandles = cT.Input( 

110 name="cpInputs", 

111 doc="Input pre-processed exposures to combine.", 

112 storageClass="Exposure", 

113 dimensions=("instrument", "detector", "exposure"), 

114 multiple=True, 

115 deferLoad=True, 

116 ) 

117 inputScales = cT.Input( 

118 name="cpScales", 

119 doc="Input scale factors to use.", 

120 storageClass="StructuredDataDict", 

121 dimensions=("instrument", ), 

122 multiple=False, 

123 ) 

124 

125 outputData = cT.Output( 

126 name="cpProposal", 

127 doc="Output combined proposed calibration to be validated and certified..", 

128 storageClass="ExposureF", 

129 dimensions=("instrument", "detector"), 

130 isCalibration=True, 

131 ) 

132 

133 def __init__(self, *, config=None): 

134 super().__init__(config=config) 

135 

136 if config and config.exposureScaling != "InputList": 

137 self.inputs.discard("inputScales") 

138 

139 

140# CalibCombineConfig/CalibCombineTask from pipe_base/constructCalibs.py 

141class CalibCombineConfig(pipeBase.PipelineTaskConfig, 

142 pipelineConnections=CalibCombineConnections): 

143 """Configuration for combining calib exposures. 

144 """ 

145 

146 calibrationType = pexConfig.Field( 

147 dtype=str, 

148 default="calibration", 

149 doc="Name of calibration to be generated.", 

150 ) 

151 

152 exposureScaling = pexConfig.ChoiceField( 

153 dtype=str, 

154 allowed={ 

155 "Unity": "Do not scale inputs. Scale factor is 1.0.", 

156 "ExposureTime": "Scale inputs by their exposure time.", 

157 "DarkTime": "Scale inputs by their dark time.", 

158 "MeanStats": "Scale inputs based on their mean values.", 

159 "InputList": "Scale inputs based on a list of values.", 

160 }, 

161 default="Unity", 

162 doc="Scaling to be applied to each input exposure.", 

163 ) 

164 scalingLevel = pexConfig.ChoiceField( 

165 dtype=str, 

166 allowed={ 

167 "DETECTOR": "Scale by detector.", 

168 "AMP": "Scale by amplifier.", 

169 }, 

170 default="DETECTOR", 

171 doc="Region to scale.", 

172 ) 

173 maxVisitsToCalcErrorFromInputVariance = pexConfig.Field( 

174 dtype=int, 

175 default=5, 

176 doc="Maximum number of visits to estimate variance from input variance, not per-pixel spread", 

177 ) 

178 subregionSize = pexConfig.ListField( 

179 dtype=int, 

180 doc="Width, height of subregion size.", 

181 length=2, 

182 # This is 200 rows for all detectors smaller than 10k in width. 

183 default=(10000, 200), 

184 ) 

185 

186 doVignette = pexConfig.Field( 

187 dtype=bool, 

188 default=False, 

189 doc="Copy vignette polygon to output and censor vignetted pixels?" 

190 ) 

191 

192 distributionPercentiles = pexConfig.ListField( 

193 dtype=float, 

194 default=[0, 5, 16, 50, 84, 95, 100], 

195 doc="Percentile levels to measure on the final combined calibration.", 

196 ) 

197 mask = pexConfig.ListField( 

198 dtype=str, 

199 default=["SAT", "DETECTED", "INTRP"], 

200 doc="Mask planes to respect", 

201 ) 

202 combine = pexConfig.Field( 

203 dtype=str, 

204 default="MEANCLIP", 

205 doc="Statistic name to use for combination (from `~lsst.afw.math.Property`)", 

206 ) 

207 clip = pexConfig.Field( 

208 dtype=float, 

209 default=3.0, 

210 doc="Clipping threshold for combination", 

211 ) 

212 nIter = pexConfig.Field( 

213 dtype=int, 

214 default=3, 

215 doc="Clipping iterations for combination", 

216 ) 

217 stats = pexConfig.ConfigurableField( 

218 target=CalibStatsTask, 

219 doc="Background statistics configuration", 

220 ) 

221 

222 

223class CalibCombineTask(pipeBase.PipelineTask): 

224 """Task to combine calib exposures.""" 

225 

226 ConfigClass = CalibCombineConfig 

227 _DefaultName = "cpCombine" 

228 

229 def __init__(self, **kwargs): 

230 super().__init__(**kwargs) 

231 self.makeSubtask("stats") 

232 

233 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

234 inputs = butlerQC.get(inputRefs) 

235 

236 dimensions = [dict(expHandle.dataId.required) for expHandle in inputRefs.inputExpHandles] 

237 inputs["inputDims"] = dimensions 

238 

239 outputs = self.run(**inputs) 

240 butlerQC.put(outputs, outputRefs) 

241 

242 def run(self, inputExpHandles, inputScales=None, inputDims=None): 

243 """Combine calib exposures for a single detector. 

244 

245 Parameters 

246 ---------- 

247 inputExpHandles : `list` [`lsst.daf.butler.DeferredDatasetHandle`] 

248 Input list of exposure handles to combine. 

249 inputScales : `dict` [`dict` [`dict` [`float`]]], optional 

250 Dictionary of scales, indexed by detector (`int`), 

251 amplifier (`int`), and exposure (`int`). Used for 

252 'inputExps' scaling. 

253 inputDims : `list` [`dict`] 

254 List of dictionaries of input data dimensions/values. 

255 Each list entry should contain: 

256 

257 ``"exposure"`` 

258 exposure id value (`int`) 

259 ``"detector"`` 

260 detector id value (`int`) 

261 

262 Returns 

263 ------- 

264 results : `lsst.pipe.base.Struct` 

265 The results struct containing: 

266 

267 ``outputData`` 

268 Final combined exposure generated from the inputs 

269 (`lsst.afw.image.Exposure`). 

270 

271 Raises 

272 ------ 

273 RuntimeError 

274 Raised if no input data is found. Also raised if 

275 config.exposureScaling == InputList, and a necessary scale 

276 was not found. 

277 """ 

278 width, height = self.getDimensions(inputExpHandles) 

279 stats = afwMath.StatisticsControl(self.config.clip, self.config.nIter, 

280 afwImage.Mask.getPlaneBitMask(self.config.mask)) 

281 numExps = len(inputExpHandles) 

282 if numExps < 1: 

283 raise RuntimeError("No valid input data") 

284 if numExps < self.config.maxVisitsToCalcErrorFromInputVariance: 

285 stats.setCalcErrorFromInputVariance(True) 

286 

287 inputDetector = inputExpHandles[0].get(component="detector") 

288 

289 # Create output exposure for combined data. 

290 combined = afwImage.MaskedImageF(width, height) 

291 combinedExp = afwImage.makeExposure(combined) 

292 

293 # Apply scaling: 

294 expScales = [] 

295 if inputDims is None: 

296 inputDims = [dict() for i in inputExpHandles] 

297 

298 for index, (expHandle, dims) in enumerate(zip(inputExpHandles, inputDims)): 

299 scale = 1.0 

300 visitInfo = expHandle.get(component="visitInfo") 

301 if self.config.exposureScaling == "ExposureTime": 

302 scale = visitInfo.getExposureTime() 

303 elif self.config.exposureScaling == "DarkTime": 

304 scale = visitInfo.getDarkTime() 

305 elif self.config.exposureScaling == "MeanStats": 

306 # Note: there may a bug freeing memory here. TBD. 

307 exp = expHandle.get() 

308 scale = self.stats.run(exp) 

309 del exp 

310 elif self.config.exposureScaling == "InputList": 

311 visitId = dims.get("exposure", None) 

312 detectorId = dims.get("detector", None) 

313 if visitId is None or detectorId is None: 

314 raise RuntimeError(f"Could not identify scaling for input {index} ({dims})") 

315 if detectorId not in inputScales["expScale"]: 

316 raise RuntimeError(f"Could not identify a scaling for input {index}" 

317 f" detector {detectorId}") 

318 

319 if self.config.scalingLevel == "DETECTOR": 

320 if visitId not in inputScales["expScale"][detectorId]: 

321 raise RuntimeError(f"Could not identify a scaling for input {index}" 

322 f"detector {detectorId} visit {visitId}") 

323 scale = inputScales["expScale"][detectorId][visitId] 

324 elif self.config.scalingLevel == "AMP": 

325 scale = [inputScales["expScale"][detectorId][amp.getName()][visitId] 

326 for amp in inputDetector] 

327 else: 

328 raise RuntimeError(f"Unknown scaling level: {self.config.scalingLevel}") 

329 elif self.config.exposureScaling == "Unity": 

330 scale = 1.0 

331 else: 

332 raise RuntimeError(f"Unknown scaling type: {self.config.exposureScaling}.") 

333 

334 expScales.append(scale) 

335 self.log.info("Scaling input %d by %s", index, scale) 

336 

337 self.combine(combinedExp, inputExpHandles, expScales, stats) 

338 

339 self.interpolateNans(combined) 

340 

341 if self.config.doVignette: 

342 polygon = inputExpHandles[0].get(component="validPolygon") 

343 maskVignettedRegion(combined, polygon=polygon, vignetteValue=0.0) 

344 

345 # Combine headers 

346 self.combineHeaders(inputExpHandles, combinedExp, 

347 calibType=self.config.calibrationType, scales=expScales) 

348 

349 # Set the detector 

350 combinedExp.setDetector(inputDetector) 

351 

352 # Do we need to set a filter? 

353 filterLabel = inputExpHandles[0].get(component="filter") 

354 self.setFilter(combinedExp, filterLabel) 

355 

356 # Set QA headers 

357 self.calibStats(combinedExp, self.config.calibrationType) 

358 

359 # Return 

360 return pipeBase.Struct( 

361 outputData=combinedExp, 

362 ) 

363 

364 def getDimensions(self, expHandleList): 

365 """Get dimensions of the inputs. 

366 

367 Parameters 

368 ---------- 

369 expHandleList : `list` [`lsst.daf.butler.DeferredDatasetHandle`] 

370 Exposure handles to check the sizes of. 

371 

372 Returns 

373 ------- 

374 width, height : `int` 

375 Unique set of input dimensions. 

376 """ 

377 dimList = [expHandle.get(component="bbox").getDimensions() for expHandle in expHandleList] 

378 

379 return self.getSize(dimList) 

380 

381 def getSize(self, dimList): 

382 """Determine a consistent size, given a list of image sizes. 

383 

384 Parameters 

385 ---------- 

386 dimList : `list` [`tuple` [`int`, `int`]] 

387 List of dimensions. 

388 

389 Raises 

390 ------ 

391 RuntimeError 

392 If input dimensions are inconsistent. 

393 

394 Returns 

395 ------- 

396 width, height : `int` 

397 Common dimensions. 

398 """ 

399 dim = set((w, h) for w, h in dimList) 

400 if len(dim) != 1: 

401 raise RuntimeError("Inconsistent dimensions: %s" % dim) 

402 return dim.pop() 

403 

404 def applyScale(self, exposure, bbox=None, scale=None): 

405 """Apply scale to input exposure. 

406 

407 This implementation applies a flux scaling: the input exposure is 

408 divided by the provided scale. 

409 

410 Parameters 

411 ---------- 

412 exposure : `lsst.afw.image.Exposure` 

413 Exposure to scale. 

414 bbox : `lsst.geom.Box2I` 

415 BBox matching the segment of the exposure passed in. 

416 scale : `float` or `list` [`float`], optional 

417 Constant scale to divide the exposure by. 

418 """ 

419 if scale is not None: 

420 mi = exposure.getMaskedImage() 

421 if isinstance(scale, list): 

422 # Create a realization of the per-amp scales as an 

423 # image we can take a subset of. This may be slightly 

424 # slower than only populating the region we care 

425 # about, but this avoids needing to do arbitrary 

426 # numbers of offsets, etc. 

427 scaleExp = afwImage.MaskedImageF(exposure.getDetector().getBBox()) 

428 for amp, ampScale in zip(exposure.getDetector(), scale): 

429 scaleExp.image[amp.getBBox()] = ampScale 

430 scale = scaleExp[bbox] 

431 mi /= scale 

432 

433 @staticmethod 

434 def _subBBoxIter(bbox, subregionSize): 

435 """Iterate over subregions of a bbox. 

436 

437 Parameters 

438 ---------- 

439 bbox : `lsst.geom.Box2I` 

440 Bounding box over which to iterate. 

441 subregionSize: `lsst.geom.Extent2I` 

442 Size of sub-bboxes. 

443 

444 Yields 

445 ------ 

446 subBBox : `lsst.geom.Box2I` 

447 Next sub-bounding box of size ``subregionSize`` or 

448 smaller; each ``subBBox`` is contained within ``bbox``, so 

449 it may be smaller than ``subregionSize`` at the edges of 

450 ``bbox``, but it will never be empty. 

451 """ 

452 if bbox.isEmpty(): 

453 raise RuntimeError("bbox %s is empty" % (bbox,)) 

454 if subregionSize[0] < 1 or subregionSize[1] < 1: 

455 raise RuntimeError("subregionSize %s must be nonzero" % (subregionSize,)) 

456 

457 for rowShift in range(0, bbox.getHeight(), subregionSize[1]): 

458 for colShift in range(0, bbox.getWidth(), subregionSize[0]): 

459 subBBox = geom.Box2I(bbox.getMin() + geom.Extent2I(colShift, rowShift), subregionSize) 

460 subBBox.clip(bbox) 

461 if subBBox.isEmpty(): 

462 raise RuntimeError("Bug: empty bbox! bbox=%s, subregionSize=%s, " 

463 "colShift=%s, rowShift=%s" % 

464 (bbox, subregionSize, colShift, rowShift)) 

465 yield subBBox 

466 

467 def combine(self, target, expHandleList, expScaleList, stats): 

468 """Combine multiple images. 

469 

470 Parameters 

471 ---------- 

472 target : `lsst.afw.image.Exposure` 

473 Output exposure to construct. 

474 expHandleList : `list` [`lsst.daf.butler.DeferredDatasetHandle`] 

475 Input exposure handles to combine. 

476 expScaleList : `list` [`float`] 

477 List of scales to apply to each input image. 

478 stats : `lsst.afw.math.StatisticsControl` 

479 Control explaining how to combine the input images. 

480 """ 

481 combineType = afwMath.stringToStatisticsProperty(self.config.combine) 

482 

483 subregionSizeArr = self.config.subregionSize 

484 subregionSize = geom.Extent2I(subregionSizeArr[0], subregionSizeArr[1]) 

485 for subBbox in self._subBBoxIter(target.getBBox(), subregionSize): 

486 images = [] 

487 for expHandle, expScale in zip(expHandleList, expScaleList): 

488 inputExp = expHandle.get(parameters={"bbox": subBbox}) 

489 self.applyScale(inputExp, subBbox, expScale) 

490 images.append(inputExp.getMaskedImage()) 

491 

492 combinedSubregion = afwMath.statisticsStack(images, combineType, stats) 

493 target.maskedImage.assign(combinedSubregion, subBbox) 

494 

495 def combineHeaders(self, expHandleList, calib, calibType="CALIB", scales=None): 

496 """Combine input headers to determine the set of common headers, 

497 supplemented by calibration inputs. The calibration header is 

498 set in-place. 

499 

500 Parameters 

501 ---------- 

502 expHandleList : `list` [`lsst.daf.butler.DeferredDatasetHandle`] 

503 Input list of exposure handles to combine. 

504 calib : `lsst.afw.image.Exposure` 

505 Output calibration to construct headers for. 

506 calibType : `str`, optional 

507 OBSTYPE the output should claim. 

508 scales : `list` [`float`], optional 

509 Scale values applied to each input to record. 

510 

511 Returns 

512 ------- 

513 header : `lsst.daf.base.PropertyList` 

514 Constructed header. 

515 """ 

516 # Header 

517 header = calib.getMetadata() 

518 header.set("OBSTYPE", calibType) 

519 

520 # Keywords we care about 

521 comments = {"TIMESYS": "Time scale for all dates", 

522 "DATE-OBS": "Start date of earliest input observation", 

523 "MJD-OBS": "[d] Start MJD of earliest input observation", 

524 "DATE-END": "End date of oldest input observation", 

525 "MJD-END": "[d] End MJD of oldest input observation", 

526 "MJD-AVG": "[d] MJD midpoint of all input observations", 

527 "DATE-AVG": "Midpoint date of all input observations"} 

528 

529 # Creation date 

530 now = time.localtime() 

531 calibDate = time.strftime("%Y-%m-%d", now) 

532 calibTime = time.strftime("%X %Z", now) 

533 header.set("CALIB_CREATION_DATE", calibDate) 

534 header.set("CALIB_CREATION_TIME", calibTime) 

535 

536 # Merge input headers 

537 inputHeaders = [expHandle.get(component="metadata") for expHandle in expHandleList] 

538 merged = merge_headers(inputHeaders, mode="drop") 

539 

540 # Scan the first header for items that were dropped due to 

541 # conflict, and replace them. 

542 for k, v in merged.items(): 

543 if k not in header: 

544 md = inputHeaders[0] 

545 comment = md.getComment(k) if k in md else None 

546 header.set(k, v, comment=comment) 

547 

548 # Construct list of visits 

549 visitInfoList = [expHandle.get(component="visitInfo") for expHandle in expHandleList] 

550 for i, visit in enumerate(visitInfoList): 

551 if visit is None: 

552 continue 

553 header.set("CPP_INPUT_%d" % (i,), visit.id) 

554 header.set("CPP_INPUT_DATE_%d" % (i,), str(visit.getDate())) 

555 header.set("CPP_INPUT_EXPT_%d" % (i,), visit.getExposureTime()) 

556 if scales is not None: 

557 header.set("CPP_INPUT_SCALE_%d" % (i,), scales[i]) 

558 

559 # Populate a visitInfo. Set the exposure time and dark time 

560 # to 0.0 or 1.0 as appropriate, and copy the instrument name 

561 # from one of the inputs. 

562 expTime = 1.0 

563 if self.config.connections.outputData.lower() == 'bias': 

564 expTime = 0.0 

565 inputVisitInfo = visitInfoList[0] 

566 visitInfo = afwImage.VisitInfo(exposureTime=expTime, darkTime=expTime, 

567 instrumentLabel=inputVisitInfo.instrumentLabel) 

568 calib.getInfo().setVisitInfo(visitInfo) 

569 

570 # Not yet working: DM-22302 

571 # Create an observation group so we can add some standard headers 

572 # independent of the form in the input files. 

573 # Use try block in case we are dealing with unexpected data headers 

574 try: 

575 group = ObservationGroup(visitInfoList, pedantic=False) 

576 except Exception: 

577 self.log.warning("Exception making an obs group for headers. Continuing.") 

578 # Fall back to setting a DATE-OBS from the calibDate 

579 dateCards = {"DATE-OBS": "{}T00:00:00.00".format(calibDate)} 

580 comments["DATE-OBS"] = "Date of start of day of calibration creation" 

581 else: 

582 oldest, newest = group.extremes() 

583 dateCards = dates_to_fits(oldest.datetime_begin, newest.datetime_end) 

584 

585 for k, v in dateCards.items(): 

586 header.set(k, v, comment=comments.get(k, None)) 

587 

588 return header 

589 

590 def interpolateNans(self, exp): 

591 """Interpolate over NANs in the combined image. 

592 

593 NANs can result from masked areas on the CCD. We don't want 

594 them getting into our science images, so we replace them with 

595 the median of the image. 

596 

597 Parameters 

598 ---------- 

599 exp : `lsst.afw.image.Exposure` 

600 Exp to check for NaNs. 

601 """ 

602 array = exp.getImage().getArray() 

603 bad = np.isnan(array) 

604 if np.any(bad): 

605 median = np.median(array[np.logical_not(bad)]) 

606 count = np.sum(bad) 

607 array[bad] = median 

608 self.log.warning("Found and fixed %s NAN pixels", count) 

609 

610 @staticmethod 

611 def setFilter(exp, filterLabel): 

612 """Dummy function that will not assign a filter. 

613 

614 Parameters 

615 ---------- 

616 exp : `lsst.afw.image.Exposure` 

617 Exposure to assign filter to. 

618 filterLabel : `lsst.afw.image.FilterLabel` 

619 Filter to assign. 

620 """ 

621 pass 

622 

623 def calibStats(self, exp, calibrationType): 

624 """Measure bulk statistics for the calibration. 

625 

626 Parameters 

627 ---------- 

628 exp : `lsst.afw.image.Exposure` 

629 Exposure to calculate statistics for. 

630 calibrationType : `str` 

631 Type of calibration to record in header. 

632 """ 

633 metadata = exp.getMetadata() 

634 

635 # percentiles 

636 for amp in exp.getDetector(): 

637 ampImage = exp[amp.getBBox()] 

638 percentileValues = np.nanpercentile(ampImage.image.array, 

639 self.config.distributionPercentiles) 

640 for level, value in zip(self.config.distributionPercentiles, percentileValues): 

641 key = f"LSST CALIB {calibrationType.upper()} {amp.getName()} DISTRIBUTION {level}-PCT" 

642 metadata[key] = value 

643 

644 

645# Create versions of the Connections, Config, and Task that support 

646# filter constraints. 

647class CalibCombineByFilterConnections(CalibCombineConnections, 

648 dimensions=("instrument", "detector", "physical_filter")): 

649 inputScales = cT.Input( 

650 name="cpFilterScales", 

651 doc="Input scale factors to use.", 

652 storageClass="StructuredDataDict", 

653 dimensions=("instrument", "physical_filter"), 

654 multiple=False, 

655 ) 

656 

657 outputData = cT.Output( 

658 name="cpFilterProposal", 

659 doc="Output combined proposed calibration to be validated and certified.", 

660 storageClass="ExposureF", 

661 dimensions=("instrument", "detector", "physical_filter"), 

662 isCalibration=True, 

663 ) 

664 

665 def __init__(self, *, config=None): 

666 super().__init__(config=config) 

667 

668 if config and config.exposureScaling != "InputList": 

669 self.inputs.discard("inputScales") 

670 

671 

672class CalibCombineByFilterConfig(CalibCombineConfig, 

673 pipelineConnections=CalibCombineByFilterConnections): 

674 pass 

675 

676 

677class CalibCombineByFilterTask(CalibCombineTask): 

678 """Task to combine calib exposures.""" 

679 

680 ConfigClass = CalibCombineByFilterConfig 

681 _DefaultName = "cpFilterCombine" 

682 

683 @staticmethod 

684 def setFilter(exp, filterLabel): 

685 """Dummy function that will not assign a filter. 

686 

687 Parameters 

688 ---------- 

689 exp : `lsst.afw.image.Exposure` 

690 Exposure to assign filter to. 

691 filterLabel : `lsst.afw.image.FilterLabel` 

692 Filter to assign. 

693 """ 

694 if filterLabel: 

695 exp.setFilter(filterLabel)