Coverage for python/lsst/cp/pipe/cpCombine.py: 21%

221 statements  

« prev     ^ index     » next       coverage.py v6.5.0, created at 2022-11-06 13:44 -0800

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21import numpy as np 

22import time 

23 

24import lsst.geom as geom 

25import lsst.pex.config as pexConfig 

26import lsst.pipe.base as pipeBase 

27import lsst.pipe.base.connectionTypes as cT 

28import lsst.afw.math as afwMath 

29import lsst.afw.image as afwImage 

30 

31from lsst.ip.isr.vignette import maskVignettedRegion 

32 

33from astro_metadata_translator import merge_headers, ObservationGroup 

34from astro_metadata_translator.serialize import dates_to_fits 

35 

36 

37__all__ = ["CalibStatsConfig", "CalibStatsTask", 

38 "CalibCombineConfig", "CalibCombineConnections", "CalibCombineTask", 

39 "CalibCombineByFilterConfig", "CalibCombineByFilterConnections", "CalibCombineByFilterTask"] 

40 

41 

42# CalibStatsConfig/CalibStatsTask from pipe_base/constructCalibs.py 

43class CalibStatsConfig(pexConfig.Config): 

44 """Parameters controlling the measurement of background 

45 statistics. 

46 """ 

47 

48 stat = pexConfig.Field( 

49 dtype=str, 

50 default="MEANCLIP", 

51 doc="Statistic name to use to estimate background (from `~lsst.afw.math.Property`)", 

52 ) 

53 clip = pexConfig.Field( 

54 dtype=float, 

55 default=3.0, 

56 doc="Clipping threshold for background", 

57 ) 

58 nIter = pexConfig.Field( 

59 dtype=int, 

60 default=3, 

61 doc="Clipping iterations for background", 

62 ) 

63 mask = pexConfig.ListField( 

64 dtype=str, 

65 default=["DETECTED", "BAD", "NO_DATA"], 

66 doc="Mask planes to reject", 

67 ) 

68 

69 

70class CalibStatsTask(pipeBase.Task): 

71 """Measure statistics on the background 

72 

73 This can be useful for scaling the background, e.g., for flats and 

74 fringe frames. 

75 """ 

76 

77 ConfigClass = CalibStatsConfig 

78 

79 def run(self, exposureOrImage): 

80 """Measure a particular statistic on an image (of some sort). 

81 

82 Parameters 

83 ---------- 

84 exposureOrImage : `lsst.afw.image.Exposure`, 

85 `lsst.afw.image.MaskedImage`, or 

86 `lsst.afw.image.Image` 

87 Exposure or image to calculate statistics on. 

88 

89 Returns 

90 ------- 

91 results : float 

92 Resulting statistic value. 

93 """ 

94 stats = afwMath.StatisticsControl(self.config.clip, self.config.nIter, 

95 afwImage.Mask.getPlaneBitMask(self.config.mask)) 

96 try: 

97 image = exposureOrImage.getMaskedImage() 

98 except Exception: 

99 try: 

100 image = exposureOrImage.getImage() 

101 except Exception: 

102 image = exposureOrImage 

103 statType = afwMath.stringToStatisticsProperty(self.config.stat) 

104 return afwMath.makeStatistics(image, statType, stats).getValue() 

105 

106 

107class CalibCombineConnections(pipeBase.PipelineTaskConnections, 

108 dimensions=("instrument", "detector")): 

109 inputExpHandles = cT.Input( 

110 name="cpInputs", 

111 doc="Input pre-processed exposures to combine.", 

112 storageClass="Exposure", 

113 dimensions=("instrument", "detector", "exposure"), 

114 multiple=True, 

115 deferLoad=True, 

116 ) 

117 inputScales = cT.Input( 

118 name="cpScales", 

119 doc="Input scale factors to use.", 

120 storageClass="StructuredDataDict", 

121 dimensions=("instrument", ), 

122 multiple=False, 

123 ) 

124 

125 outputData = cT.Output( 

126 name="cpProposal", 

127 doc="Output combined proposed calibration to be validated and certified..", 

128 storageClass="ExposureF", 

129 dimensions=("instrument", "detector"), 

130 isCalibration=True, 

131 ) 

132 

133 def __init__(self, *, config=None): 

134 super().__init__(config=config) 

135 

136 if config and config.exposureScaling != "InputList": 

137 self.inputs.discard("inputScales") 

138 

139 

140# CalibCombineConfig/CalibCombineTask from pipe_base/constructCalibs.py 

141class CalibCombineConfig(pipeBase.PipelineTaskConfig, 

142 pipelineConnections=CalibCombineConnections): 

143 """Configuration for combining calib exposures. 

144 """ 

145 

146 calibrationType = pexConfig.Field( 

147 dtype=str, 

148 default="calibration", 

149 doc="Name of calibration to be generated.", 

150 ) 

151 

152 exposureScaling = pexConfig.ChoiceField( 

153 dtype=str, 

154 allowed={ 

155 "Unity": "Do not scale inputs. Scale factor is 1.0.", 

156 "ExposureTime": "Scale inputs by their exposure time.", 

157 "DarkTime": "Scale inputs by their dark time.", 

158 "MeanStats": "Scale inputs based on their mean values.", 

159 "InputList": "Scale inputs based on a list of values.", 

160 }, 

161 default="Unity", 

162 doc="Scaling to be applied to each input exposure.", 

163 ) 

164 scalingLevel = pexConfig.ChoiceField( 

165 dtype=str, 

166 allowed={ 

167 "DETECTOR": "Scale by detector.", 

168 "AMP": "Scale by amplifier.", 

169 }, 

170 default="DETECTOR", 

171 doc="Region to scale.", 

172 ) 

173 maxVisitsToCalcErrorFromInputVariance = pexConfig.Field( 

174 dtype=int, 

175 default=5, 

176 doc="Maximum number of visits to estimate variance from input variance, not per-pixel spread", 

177 ) 

178 subregionSize = pexConfig.ListField( 

179 dtype=int, 

180 doc="Width, height of subregion size.", 

181 length=2, 

182 # This is 200 rows for all detectors smaller than 10k in width. 

183 default=(10000, 200), 

184 ) 

185 

186 doVignette = pexConfig.Field( 

187 dtype=bool, 

188 default=False, 

189 doc="Copy vignette polygon to output and censor vignetted pixels?" 

190 ) 

191 

192 mask = pexConfig.ListField( 

193 dtype=str, 

194 default=["SAT", "DETECTED", "INTRP"], 

195 doc="Mask planes to respect", 

196 ) 

197 combine = pexConfig.Field( 

198 dtype=str, 

199 default="MEANCLIP", 

200 doc="Statistic name to use for combination (from `~lsst.afw.math.Property`)", 

201 ) 

202 clip = pexConfig.Field( 

203 dtype=float, 

204 default=3.0, 

205 doc="Clipping threshold for combination", 

206 ) 

207 nIter = pexConfig.Field( 

208 dtype=int, 

209 default=3, 

210 doc="Clipping iterations for combination", 

211 ) 

212 stats = pexConfig.ConfigurableField( 

213 target=CalibStatsTask, 

214 doc="Background statistics configuration", 

215 ) 

216 

217 

218class CalibCombineTask(pipeBase.PipelineTask, 

219 pipeBase.CmdLineTask): 

220 """Task to combine calib exposures.""" 

221 

222 ConfigClass = CalibCombineConfig 

223 _DefaultName = "cpCombine" 

224 

225 def __init__(self, **kwargs): 

226 super().__init__(**kwargs) 

227 self.makeSubtask("stats") 

228 

229 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

230 inputs = butlerQC.get(inputRefs) 

231 

232 dimensions = [expHandle.dataId.byName() for expHandle in inputRefs.inputExpHandles] 

233 inputs["inputDims"] = dimensions 

234 

235 outputs = self.run(**inputs) 

236 butlerQC.put(outputs, outputRefs) 

237 

238 def run(self, inputExpHandles, inputScales=None, inputDims=None): 

239 """Combine calib exposures for a single detector. 

240 

241 Parameters 

242 ---------- 

243 inputExpHandles : `list` [`lsst.daf.butler.DeferredDatasetHandle`] 

244 Input list of exposure handles to combine. 

245 inputScales : `dict` [`dict` [`dict` [`float`]]], optional 

246 Dictionary of scales, indexed by detector (`int`), 

247 amplifier (`int`), and exposure (`int`). Used for 

248 'inputExps' scaling. 

249 inputDims : `list` [`dict`] 

250 List of dictionaries of input data dimensions/values. 

251 Each list entry should contain: 

252 

253 ``"exposure"`` 

254 exposure id value (`int`) 

255 ``"detector"`` 

256 detector id value (`int`) 

257 

258 Returns 

259 ------- 

260 results : `lsst.pipe.base.Struct` 

261 The results struct containing: 

262 

263 ``outputData`` 

264 Final combined exposure generated from the inputs 

265 (`lsst.afw.image.Exposure`). 

266 

267 Raises 

268 ------ 

269 RuntimeError 

270 Raised if no input data is found. Also raised if 

271 config.exposureScaling == InputList, and a necessary scale 

272 was not found. 

273 """ 

274 width, height = self.getDimensions(inputExpHandles) 

275 stats = afwMath.StatisticsControl(self.config.clip, self.config.nIter, 

276 afwImage.Mask.getPlaneBitMask(self.config.mask)) 

277 numExps = len(inputExpHandles) 

278 if numExps < 1: 

279 raise RuntimeError("No valid input data") 

280 if numExps < self.config.maxVisitsToCalcErrorFromInputVariance: 

281 stats.setCalcErrorFromInputVariance(True) 

282 

283 inputDetector = inputExpHandles[0].get(component="detector") 

284 

285 # Create output exposure for combined data. 

286 combined = afwImage.MaskedImageF(width, height) 

287 combinedExp = afwImage.makeExposure(combined) 

288 

289 # Apply scaling: 

290 expScales = [] 

291 if inputDims is None: 

292 inputDims = [dict() for i in inputExpHandles] 

293 

294 for index, (expHandle, dims) in enumerate(zip(inputExpHandles, inputDims)): 

295 scale = 1.0 

296 visitInfo = expHandle.get(component="visitInfo") 

297 if self.config.exposureScaling == "ExposureTime": 

298 scale = visitInfo.getExposureTime() 

299 elif self.config.exposureScaling == "DarkTime": 

300 scale = visitInfo.getDarkTime() 

301 elif self.config.exposureScaling == "MeanStats": 

302 # Note: there may a bug freeing memory here. TBD. 

303 exp = expHandle.get() 

304 scale = self.stats.run(exp) 

305 del exp 

306 elif self.config.exposureScaling == "InputList": 

307 visitId = dims.get("exposure", None) 

308 detectorId = dims.get("detector", None) 

309 if visitId is None or detectorId is None: 

310 raise RuntimeError(f"Could not identify scaling for input {index} ({dims})") 

311 if detectorId not in inputScales["expScale"]: 

312 raise RuntimeError(f"Could not identify a scaling for input {index}" 

313 f" detector {detectorId}") 

314 

315 if self.config.scalingLevel == "DETECTOR": 

316 if visitId not in inputScales["expScale"][detectorId]: 

317 raise RuntimeError(f"Could not identify a scaling for input {index}" 

318 f"detector {detectorId} visit {visitId}") 

319 scale = inputScales["expScale"][detectorId][visitId] 

320 elif self.config.scalingLevel == "AMP": 

321 scale = [inputScales["expScale"][detectorId][amp.getName()][visitId] 

322 for amp in inputDetector] 

323 else: 

324 raise RuntimeError(f"Unknown scaling level: {self.config.scalingLevel}") 

325 elif self.config.exposureScaling == "Unity": 

326 scale = 1.0 

327 else: 

328 raise RuntimeError(f"Unknown scaling type: {self.config.exposureScaling}.") 

329 

330 expScales.append(scale) 

331 self.log.info("Scaling input %d by %s", index, scale) 

332 

333 self.combine(combinedExp, inputExpHandles, expScales, stats) 

334 

335 self.interpolateNans(combined) 

336 

337 if self.config.doVignette: 

338 polygon = inputExpHandles[0].get(component="validPolygon") 

339 maskVignettedRegion(combined, polygon=polygon, vignetteValue=0.0) 

340 

341 # Combine headers 

342 self.combineHeaders(inputExpHandles, combinedExp, 

343 calibType=self.config.calibrationType, scales=expScales) 

344 

345 # Set the detector 

346 combinedExp.setDetector(inputDetector) 

347 

348 # Do we need to set a filter? 

349 filterLabel = inputExpHandles[0].get(component="filterLabel") 

350 self.setFilter(combinedExp, filterLabel) 

351 

352 # Return 

353 return pipeBase.Struct( 

354 outputData=combinedExp, 

355 ) 

356 

357 def getDimensions(self, expHandleList): 

358 """Get dimensions of the inputs. 

359 

360 Parameters 

361 ---------- 

362 expHandleList : `list` [`lsst.daf.butler.DeferredDatasetHandle`] 

363 Exposure handles to check the sizes of. 

364 

365 Returns 

366 ------- 

367 width, height : `int` 

368 Unique set of input dimensions. 

369 """ 

370 dimList = [expHandle.get(component="bbox").getDimensions() for expHandle in expHandleList] 

371 

372 return self.getSize(dimList) 

373 

374 def getSize(self, dimList): 

375 """Determine a consistent size, given a list of image sizes. 

376 

377 Parameters 

378 ----------- 

379 dimList : `list` [`tuple` [`int`, `int`]] 

380 List of dimensions. 

381 

382 Raises 

383 ------ 

384 RuntimeError 

385 If input dimensions are inconsistent. 

386 

387 Returns 

388 -------- 

389 width, height : `int` 

390 Common dimensions. 

391 """ 

392 dim = set((w, h) for w, h in dimList) 

393 if len(dim) != 1: 

394 raise RuntimeError("Inconsistent dimensions: %s" % dim) 

395 return dim.pop() 

396 

397 def applyScale(self, exposure, bbox=None, scale=None): 

398 """Apply scale to input exposure. 

399 

400 This implementation applies a flux scaling: the input exposure is 

401 divided by the provided scale. 

402 

403 Parameters 

404 ---------- 

405 exposure : `lsst.afw.image.Exposure` 

406 Exposure to scale. 

407 bbox : `lsst.geom.Box2I` 

408 BBox matching the segment of the exposure passed in. 

409 scale : `float` or `list` [`float`], optional 

410 Constant scale to divide the exposure by. 

411 """ 

412 if scale is not None: 

413 mi = exposure.getMaskedImage() 

414 if isinstance(scale, list): 

415 # Create a realization of the per-amp scales as an 

416 # image we can take a subset of. This may be slightly 

417 # slower than only populating the region we care 

418 # about, but this avoids needing to do arbitrary 

419 # numbers of offsets, etc. 

420 scaleExp = afwImage.MaskedImageF(exposure.getDetector().getBBox()) 

421 for amp, ampScale in zip(exposure.getDetector(), scale): 

422 scaleExp.image[amp.getBBox()] = ampScale 

423 scale = scaleExp[bbox] 

424 mi /= scale 

425 

426 @staticmethod 

427 def _subBBoxIter(bbox, subregionSize): 

428 """Iterate over subregions of a bbox. 

429 

430 Parameters 

431 ---------- 

432 bbox : `lsst.geom.Box2I` 

433 Bounding box over which to iterate. 

434 subregionSize: `lsst.geom.Extent2I` 

435 Size of sub-bboxes. 

436 

437 Yields 

438 ------ 

439 subBBox : `lsst.geom.Box2I` 

440 Next sub-bounding box of size ``subregionSize`` or 

441 smaller; each ``subBBox`` is contained within ``bbox``, so 

442 it may be smaller than ``subregionSize`` at the edges of 

443 ``bbox``, but it will never be empty. 

444 """ 

445 if bbox.isEmpty(): 

446 raise RuntimeError("bbox %s is empty" % (bbox,)) 

447 if subregionSize[0] < 1 or subregionSize[1] < 1: 

448 raise RuntimeError("subregionSize %s must be nonzero" % (subregionSize,)) 

449 

450 for rowShift in range(0, bbox.getHeight(), subregionSize[1]): 

451 for colShift in range(0, bbox.getWidth(), subregionSize[0]): 

452 subBBox = geom.Box2I(bbox.getMin() + geom.Extent2I(colShift, rowShift), subregionSize) 

453 subBBox.clip(bbox) 

454 if subBBox.isEmpty(): 

455 raise RuntimeError("Bug: empty bbox! bbox=%s, subregionSize=%s, " 

456 "colShift=%s, rowShift=%s" % 

457 (bbox, subregionSize, colShift, rowShift)) 

458 yield subBBox 

459 

460 def combine(self, target, expHandleList, expScaleList, stats): 

461 """Combine multiple images. 

462 

463 Parameters 

464 ---------- 

465 target : `lsst.afw.image.Exposure` 

466 Output exposure to construct. 

467 expHandleList : `list` [`lsst.daf.butler.DeferredDatasetHandle`] 

468 Input exposure handles to combine. 

469 expScaleList : `list` [`float`] 

470 List of scales to apply to each input image. 

471 stats : `lsst.afw.math.StatisticsControl` 

472 Control explaining how to combine the input images. 

473 """ 

474 combineType = afwMath.stringToStatisticsProperty(self.config.combine) 

475 

476 subregionSizeArr = self.config.subregionSize 

477 subregionSize = geom.Extent2I(subregionSizeArr[0], subregionSizeArr[1]) 

478 for subBbox in self._subBBoxIter(target.getBBox(), subregionSize): 

479 images = [] 

480 for expHandle, expScale in zip(expHandleList, expScaleList): 

481 inputExp = expHandle.get(parameters={"bbox": subBbox}) 

482 self.applyScale(inputExp, subBbox, expScale) 

483 images.append(inputExp.getMaskedImage()) 

484 

485 combinedSubregion = afwMath.statisticsStack(images, combineType, stats) 

486 target.maskedImage.assign(combinedSubregion, subBbox) 

487 

488 def combineHeaders(self, expHandleList, calib, calibType="CALIB", scales=None): 

489 """Combine input headers to determine the set of common headers, 

490 supplemented by calibration inputs. 

491 

492 Parameters 

493 ---------- 

494 expHandleList : `list` [`lsst.daf.butler.DeferredDatasetHandle`] 

495 Input list of exposure handles to combine. 

496 calib : `lsst.afw.image.Exposure` 

497 Output calibration to construct headers for. 

498 calibType : `str`, optional 

499 OBSTYPE the output should claim. 

500 scales : `list` [`float`], optional 

501 Scale values applied to each input to record. 

502 

503 Returns 

504 ------- 

505 header : `lsst.daf.base.PropertyList` 

506 Constructed header. 

507 """ 

508 # Header 

509 header = calib.getMetadata() 

510 header.set("OBSTYPE", calibType) 

511 

512 # Keywords we care about 

513 comments = {"TIMESYS": "Time scale for all dates", 

514 "DATE-OBS": "Start date of earliest input observation", 

515 "MJD-OBS": "[d] Start MJD of earliest input observation", 

516 "DATE-END": "End date of oldest input observation", 

517 "MJD-END": "[d] End MJD of oldest input observation", 

518 "MJD-AVG": "[d] MJD midpoint of all input observations", 

519 "DATE-AVG": "Midpoint date of all input observations"} 

520 

521 # Creation date 

522 now = time.localtime() 

523 calibDate = time.strftime("%Y-%m-%d", now) 

524 calibTime = time.strftime("%X %Z", now) 

525 header.set("CALIB_CREATE_DATE", calibDate) 

526 header.set("CALIB_CREATE_TIME", calibTime) 

527 

528 # Merge input headers 

529 inputHeaders = [expHandle.get(component="metadata") for expHandle in expHandleList] 

530 merged = merge_headers(inputHeaders, mode="drop") 

531 

532 # Scan the first header for items that were dropped due to 

533 # conflict, and replace them. 

534 for k, v in merged.items(): 

535 if k not in header: 

536 md = inputHeaders[0] 

537 comment = md.getComment(k) if k in md else None 

538 header.set(k, v, comment=comment) 

539 

540 # Construct list of visits 

541 visitInfoList = [expHandle.get(component="visitInfo") for expHandle in expHandleList] 

542 for i, visit in enumerate(visitInfoList): 

543 if visit is None: 

544 continue 

545 header.set("CPP_INPUT_%d" % (i,), visit.id) 

546 header.set("CPP_INPUT_DATE_%d" % (i,), str(visit.getDate())) 

547 header.set("CPP_INPUT_EXPT_%d" % (i,), visit.getExposureTime()) 

548 if scales is not None: 

549 header.set("CPP_INPUT_SCALE_%d" % (i,), scales[i]) 

550 

551 # Not yet working: DM-22302 

552 # Create an observation group so we can add some standard headers 

553 # independent of the form in the input files. 

554 # Use try block in case we are dealing with unexpected data headers 

555 try: 

556 group = ObservationGroup(visitInfoList, pedantic=False) 

557 except Exception: 

558 self.log.warning("Exception making an obs group for headers. Continuing.") 

559 # Fall back to setting a DATE-OBS from the calibDate 

560 dateCards = {"DATE-OBS": "{}T00:00:00.00".format(calibDate)} 

561 comments["DATE-OBS"] = "Date of start of day of calibration midpoint" 

562 else: 

563 oldest, newest = group.extremes() 

564 dateCards = dates_to_fits(oldest.datetime_begin, newest.datetime_end) 

565 

566 for k, v in dateCards.items(): 

567 header.set(k, v, comment=comments.get(k, None)) 

568 

569 return header 

570 

571 def interpolateNans(self, exp): 

572 """Interpolate over NANs in the combined image. 

573 

574 NANs can result from masked areas on the CCD. We don't want 

575 them getting into our science images, so we replace them with 

576 the median of the image. 

577 

578 Parameters 

579 ---------- 

580 exp : `lsst.afw.image.Exposure` 

581 Exp to check for NaNs. 

582 """ 

583 array = exp.getImage().getArray() 

584 bad = np.isnan(array) 

585 if np.any(bad): 

586 median = np.median(array[np.logical_not(bad)]) 

587 count = np.sum(bad) 

588 array[bad] = median 

589 self.log.warning("Found and fixed %s NAN pixels", count) 

590 

591 @staticmethod 

592 def setFilter(exp, filterLabel): 

593 """Dummy function that will not assign a filter. 

594 

595 Parameters 

596 ---------- 

597 exp : `lsst.afw.image.Exposure` 

598 Exposure to assign filter to. 

599 filterLabel : `lsst.afw.image.FilterLabel` 

600 Filter to assign. 

601 """ 

602 pass 

603 

604 

605# Create versions of the Connections, Config, and Task that support 

606# filter constraints. 

607class CalibCombineByFilterConnections(CalibCombineConnections, 

608 dimensions=("instrument", "detector", "physical_filter")): 

609 inputScales = cT.Input( 

610 name="cpFilterScales", 

611 doc="Input scale factors to use.", 

612 storageClass="StructuredDataDict", 

613 dimensions=("instrument", "physical_filter"), 

614 multiple=False, 

615 ) 

616 

617 outputData = cT.Output( 

618 name="cpFilterProposal", 

619 doc="Output combined proposed calibration to be validated and certified.", 

620 storageClass="ExposureF", 

621 dimensions=("instrument", "detector", "physical_filter"), 

622 isCalibration=True, 

623 ) 

624 

625 def __init__(self, *, config=None): 

626 super().__init__(config=config) 

627 

628 if config and config.exposureScaling != "InputList": 

629 self.inputs.discard("inputScales") 

630 

631 

632class CalibCombineByFilterConfig(CalibCombineConfig, 

633 pipelineConnections=CalibCombineByFilterConnections): 

634 pass 

635 

636 

637class CalibCombineByFilterTask(CalibCombineTask): 

638 """Task to combine calib exposures.""" 

639 

640 ConfigClass = CalibCombineByFilterConfig 

641 _DefaultName = "cpFilterCombine" 

642 

643 @staticmethod 

644 def setFilter(exp, filterLabel): 

645 """Dummy function that will not assign a filter. 

646 

647 Parameters 

648 ---------- 

649 exp : `lsst.afw.image.Exposure` 

650 Exposure to assign filter to. 

651 filterLabel : `lsst.afw.image.FilterLabel` 

652 Filter to assign. 

653 """ 

654 if filterLabel: 

655 exp.setFilter(filterLabel)