Coverage for python/lsst/cp/pipe/linearity.py: 12%

260 statements  

« prev     ^ index     » next       coverage.py v7.5.1, created at 2024-05-09 04:05 -0700

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22 

23__all__ = ["LinearitySolveTask", "LinearitySolveConfig"] 

24 

25import numpy as np 

26import lsst.afw.image as afwImage 

27import lsst.pipe.base as pipeBase 

28import lsst.pipe.base.connectionTypes as cT 

29import lsst.pex.config as pexConfig 

30 

31from lsstDebug import getDebugFrame 

32from lsst.ip.isr import (Linearizer, IsrProvenance) 

33 

34from .utils import (funcPolynomial, irlsFit, AstierSplineLinearityFitter, 

35 extractCalibDate) 

36 

37 

38def ptcLookup(datasetType, registry, quantumDataId, collections): 

39 """Butler lookup function to allow PTC to be found. 

40 

41 Parameters 

42 ---------- 

43 datasetType : `lsst.daf.butler.DatasetType` 

44 Dataset type to look up. 

45 registry : `lsst.daf.butler.Registry` 

46 Registry for the data repository being searched. 

47 quantumDataId : `lsst.daf.butler.DataCoordinate` 

48 Data ID for the quantum of the task this dataset will be passed to. 

49 This must include an "instrument" key, and should also include any 

50 keys that are present in ``datasetType.dimensions``. If it has an 

51 ``exposure`` or ``visit`` key, that's a sign that this function is 

52 not actually needed, as those come with the temporal information that 

53 would allow a real validity-range lookup. 

54 collections : `lsst.daf.butler.registry.CollectionSearch` 

55 Collections passed by the user when generating a QuantumGraph. Ignored 

56 by this function (see notes below). 

57 

58 Returns 

59 ------- 

60 refs : `list` [ `DatasetRef` ] 

61 A zero- or single-element list containing the matching 

62 dataset, if one was found. 

63 

64 Raises 

65 ------ 

66 RuntimeError 

67 Raised if more than one PTC reference is found. 

68 """ 

69 refs = list(registry.queryDatasets(datasetType, dataId=quantumDataId, collections=collections, 

70 findFirst=False)) 

71 if len(refs) >= 2: 

72 RuntimeError("Too many PTC connections found. Incorrect collections supplied?") 

73 

74 return refs 

75 

76 

77class LinearitySolveConnections(pipeBase.PipelineTaskConnections, 

78 dimensions=("instrument", "detector")): 

79 dummy = cT.Input( 

80 name="raw", 

81 doc="Dummy exposure.", 

82 storageClass='Exposure', 

83 dimensions=("instrument", "exposure", "detector"), 

84 multiple=True, 

85 deferLoad=True, 

86 ) 

87 

88 camera = cT.PrerequisiteInput( 

89 name="camera", 

90 doc="Camera Geometry definition.", 

91 storageClass="Camera", 

92 dimensions=("instrument", ), 

93 isCalibration=True, 

94 ) 

95 

96 inputPtc = cT.PrerequisiteInput( 

97 name="ptc", 

98 doc="Input PTC dataset.", 

99 storageClass="PhotonTransferCurveDataset", 

100 dimensions=("instrument", "detector"), 

101 isCalibration=True, 

102 lookupFunction=ptcLookup, 

103 ) 

104 

105 inputPhotodiodeCorrection = cT.Input( 

106 name="pdCorrection", 

107 doc="Input photodiode correction.", 

108 storageClass="IsrCalib", 

109 dimensions=("instrument", ), 

110 isCalibration=True, 

111 ) 

112 

113 outputLinearizer = cT.Output( 

114 name="linearity", 

115 doc="Output linearity measurements.", 

116 storageClass="Linearizer", 

117 dimensions=("instrument", "detector"), 

118 isCalibration=True, 

119 ) 

120 

121 def __init__(self, *, config=None): 

122 if not config.applyPhotodiodeCorrection: 

123 del self.inputPhotodiodeCorrection 

124 

125 

126class LinearitySolveConfig(pipeBase.PipelineTaskConfig, 

127 pipelineConnections=LinearitySolveConnections): 

128 """Configuration for solving the linearity from PTC dataset. 

129 """ 

130 linearityType = pexConfig.ChoiceField( 

131 dtype=str, 

132 doc="Type of linearizer to construct.", 

133 default="Squared", 

134 allowed={ 

135 "LookupTable": "Create a lookup table solution.", 

136 "Polynomial": "Create an arbitrary polynomial solution.", 

137 "Squared": "Create a single order squared solution.", 

138 "Spline": "Create a spline based solution.", 

139 "None": "Create a dummy solution.", 

140 } 

141 ) 

142 polynomialOrder = pexConfig.RangeField( 

143 dtype=int, 

144 doc="Degree of polynomial to fit. Must be at least 2.", 

145 default=3, 

146 min=2, 

147 ) 

148 splineKnots = pexConfig.Field( 

149 dtype=int, 

150 doc="Number of spline knots to use in fit.", 

151 default=10, 

152 ) 

153 maxLookupTableAdu = pexConfig.Field( 

154 dtype=int, 

155 doc="Maximum DN value for a LookupTable linearizer.", 

156 default=2**18, 

157 ) 

158 maxLinearAdu = pexConfig.Field( 

159 dtype=float, 

160 doc="Maximum DN value to use to estimate linear term.", 

161 default=20000.0, 

162 ) 

163 minLinearAdu = pexConfig.Field( 

164 dtype=float, 

165 doc="Minimum DN value to use to estimate linear term.", 

166 default=30.0, 

167 ) 

168 nSigmaClipLinear = pexConfig.Field( 

169 dtype=float, 

170 doc="Maximum deviation from linear solution for Poissonian noise.", 

171 default=5.0, 

172 ) 

173 ignorePtcMask = pexConfig.Field( 

174 dtype=bool, 

175 doc="Ignore the expIdMask set by the PTC solver?", 

176 default=False, 

177 ) 

178 usePhotodiode = pexConfig.Field( 

179 dtype=bool, 

180 doc="Use the photodiode info instead of the raw expTimes?", 

181 default=False, 

182 ) 

183 applyPhotodiodeCorrection = pexConfig.Field( 

184 dtype=bool, 

185 doc="Calculate and apply a correction to the photodiode readings?", 

186 default=False, 

187 ) 

188 splineGroupingColumn = pexConfig.Field( 

189 dtype=str, 

190 doc="Column to use for grouping together points for Spline mode, to allow " 

191 "for different proportionality constants. If not set, no grouping " 

192 "will be done.", 

193 default=None, 

194 optional=True, 

195 ) 

196 splineGroupingMinPoints = pexConfig.Field( 

197 dtype=int, 

198 doc="Minimum number of linearity points to allow grouping together points " 

199 "for Spline mode with splineGroupingColumn. This configuration is here " 

200 "to prevent misuse of the Spline code to avoid over-fitting.", 

201 default=100, 

202 ) 

203 splineFitMinIter = pexConfig.Field( 

204 dtype=int, 

205 doc="Minimum number of iterations for spline fit.", 

206 default=3, 

207 ) 

208 splineFitMaxIter = pexConfig.Field( 

209 dtype=int, 

210 doc="Maximum number of iterations for spline fit.", 

211 default=20, 

212 ) 

213 splineFitMaxRejectionPerIteration = pexConfig.Field( 

214 dtype=int, 

215 doc="Maximum number of rejections per iteration for spline fit.", 

216 default=5, 

217 ) 

218 

219 

220class LinearitySolveTask(pipeBase.PipelineTask): 

221 """Fit the linearity from the PTC dataset. 

222 """ 

223 

224 ConfigClass = LinearitySolveConfig 

225 _DefaultName = 'cpLinearitySolve' 

226 

227 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

228 """Ensure that the input and output dimensions are passed along. 

229 

230 Parameters 

231 ---------- 

232 butlerQC : `lsst.daf.butler.QuantumContext` 

233 Butler to operate on. 

234 inputRefs : `lsst.pipe.base.InputQuantizedConnection` 

235 Input data refs to load. 

236 ouptutRefs : `lsst.pipe.base.OutputQuantizedConnection` 

237 Output data refs to persist. 

238 """ 

239 inputs = butlerQC.get(inputRefs) 

240 

241 # Use the dimensions to set calib/provenance information. 

242 inputs['inputDims'] = dict(inputRefs.inputPtc.dataId.required) 

243 

244 # Add calibration provenance info to header. 

245 kwargs = dict() 

246 reference = getattr(inputRefs, "inputPtc", None) 

247 

248 if reference is not None and hasattr(reference, "run"): 

249 runKey = "PTC_RUN" 

250 runValue = reference.run 

251 idKey = "PTC_UUID" 

252 idValue = str(reference.id) 

253 dateKey = "PTC_DATE" 

254 calib = inputs.get("inputPtc", None) 

255 dateValue = extractCalibDate(calib) 

256 

257 kwargs[runKey] = runValue 

258 kwargs[idKey] = idValue 

259 kwargs[dateKey] = dateValue 

260 

261 self.log.info("Using " + str(reference.run)) 

262 

263 outputs = self.run(**inputs) 

264 outputs.outputLinearizer.updateMetadata(setDate=False, **kwargs) 

265 

266 butlerQC.put(outputs, outputRefs) 

267 

268 def run(self, inputPtc, dummy, camera, inputDims, 

269 inputPhotodiodeCorrection=None): 

270 """Fit non-linearity to PTC data, returning the correct Linearizer 

271 object. 

272 

273 Parameters 

274 ---------- 

275 inputPtc : `lsst.ip.isr.PtcDataset` 

276 Pre-measured PTC dataset. 

277 dummy : `lsst.afw.image.Exposure` 

278 The exposure used to select the appropriate PTC dataset. 

279 In almost all circumstances, one of the input exposures 

280 used to generate the PTC dataset is the best option. 

281 inputPhotodiodeCorrection : `lsst.ip.isr.PhotodiodeCorrection` 

282 Pre-measured photodiode correction used in the case when 

283 applyPhotodiodeCorrection=True. 

284 camera : `lsst.afw.cameraGeom.Camera` 

285 Camera geometry. 

286 inputDims : `lsst.daf.butler.DataCoordinate` or `dict` 

287 DataIds to use to populate the output calibration. 

288 

289 Returns 

290 ------- 

291 results : `lsst.pipe.base.Struct` 

292 The results struct containing: 

293 

294 ``outputLinearizer`` 

295 Final linearizer calibration (`lsst.ip.isr.Linearizer`). 

296 ``outputProvenance`` 

297 Provenance data for the new calibration 

298 (`lsst.ip.isr.IsrProvenance`). 

299 

300 Notes 

301 ----- 

302 This task currently fits only polynomial-defined corrections, 

303 where the correction coefficients are defined such that: 

304 :math:`corrImage = uncorrImage + \\sum_i c_i uncorrImage^(2 + i)` 

305 These :math:`c_i` are defined in terms of the direct polynomial fit: 

306 :math:`meanVector ~ P(x=timeVector) = \\sum_j k_j x^j` 

307 such that :math:`c_(j-2) = -k_j/(k_1^j)` in units of DN^(1-j) (c.f., 

308 Eq. 37 of 2003.05978). The `config.polynomialOrder` or 

309 `config.splineKnots` define the maximum order of :math:`x^j` to fit. 

310 As :math:`k_0` and :math:`k_1` are degenerate with bias level and gain, 

311 they are not included in the non-linearity correction. 

312 """ 

313 if len(dummy) == 0: 

314 self.log.warning("No dummy exposure found.") 

315 

316 detector = camera[inputDims['detector']] 

317 if self.config.linearityType == 'LookupTable': 

318 table = np.zeros((len(detector), self.config.maxLookupTableAdu), dtype=np.float32) 

319 tableIndex = 0 

320 else: 

321 table = None 

322 tableIndex = None # This will fail if we increment it. 

323 

324 # Initialize the linearizer. 

325 linearizer = Linearizer(detector=detector, table=table, log=self.log) 

326 linearizer.updateMetadataFromExposures([inputPtc]) 

327 if self.config.usePhotodiode and self.config.applyPhotodiodeCorrection: 

328 abscissaCorrections = inputPhotodiodeCorrection.abscissaCorrections 

329 

330 if self.config.linearityType == 'Spline': 

331 if self.config.splineGroupingColumn is not None: 

332 if self.config.splineGroupingColumn not in inputPtc.auxValues: 

333 raise ValueError(f"Config requests grouping by {self.config.splineGroupingColumn}, " 

334 "but this column is not available in inputPtc.auxValues.") 

335 groupingValue = inputPtc.auxValues[self.config.splineGroupingColumn] 

336 else: 

337 groupingValue = np.ones(len(inputPtc.rawMeans[inputPtc.ampNames[0]]), dtype=int) 

338 # We set this to have a value to fill the bad amps. 

339 fitOrder = self.config.splineKnots 

340 else: 

341 fitOrder = self.config.polynomialOrder 

342 

343 for i, amp in enumerate(detector): 

344 ampName = amp.getName() 

345 if ampName in inputPtc.badAmps: 

346 linearizer = self.fillBadAmp(linearizer, fitOrder, inputPtc, amp) 

347 self.log.warning("Amp %s in detector %s has no usable PTC information. Skipping!", 

348 ampName, detector.getName()) 

349 continue 

350 

351 # Check for too few points. 

352 if self.config.linearityType == "Spline" \ 

353 and self.config.splineGroupingColumn is not None \ 

354 and len(inputPtc.inputExpIdPairs[ampName]) < self.config.splineGroupingMinPoints: 

355 raise RuntimeError( 

356 "The input PTC has too few points to reliably run with PD grouping. " 

357 "The recommended course of action is to set splineGroupingColumn to None. " 

358 "If you really know what you are doing, you may reduce " 

359 "config.splineGroupingMinPoints.") 

360 

361 if (len(inputPtc.expIdMask[ampName]) == 0) or self.config.ignorePtcMask: 

362 self.log.warning("Mask not found for %s in detector %s in fit. Using all points.", 

363 ampName, detector.getName()) 

364 mask = np.ones(len(inputPtc.expIdMask[ampName]), dtype=bool) 

365 else: 

366 mask = inputPtc.expIdMask[ampName].copy() 

367 

368 if self.config.usePhotodiode: 

369 modExpTimes = inputPtc.photoCharges[ampName].copy() 

370 # Make sure any exposure pairs that do not have photodiode data 

371 # are masked. 

372 mask[~np.isfinite(modExpTimes)] = False 

373 

374 # Get the photodiode correction. 

375 if self.config.applyPhotodiodeCorrection: 

376 for j, pair in enumerate(inputPtc.inputExpIdPairs[ampName]): 

377 try: 

378 correction = abscissaCorrections[str(pair)] 

379 except KeyError: 

380 correction = 0.0 

381 modExpTimes[j] += correction 

382 

383 inputAbscissa = modExpTimes 

384 else: 

385 inputAbscissa = inputPtc.rawExpTimes[ampName].copy() 

386 

387 inputOrdinate = inputPtc.rawMeans[ampName].copy() 

388 

389 mask &= (inputOrdinate < self.config.maxLinearAdu) 

390 mask &= (inputOrdinate > self.config.minLinearAdu) 

391 

392 if mask.sum() < 2: 

393 linearizer = self.fillBadAmp(linearizer, fitOrder, inputPtc, amp) 

394 self.log.warning("Amp %s in detector %s has not enough points for fit. Skipping!", 

395 ampName, detector.getName()) 

396 continue 

397 

398 if self.config.linearityType != 'Spline': 

399 linearFit, linearFitErr, chiSq, weights = irlsFit([0.0, 100.0], inputAbscissa[mask], 

400 inputOrdinate[mask], funcPolynomial) 

401 

402 # Convert this proxy-to-flux fit into an expected linear flux 

403 linearOrdinate = linearFit[0] + linearFit[1] * inputAbscissa 

404 # Exclude low end outliers. 

405 # This is compared to the original values. 

406 threshold = self.config.nSigmaClipLinear * np.sqrt(abs(inputOrdinate)) 

407 

408 mask[np.abs(inputOrdinate - linearOrdinate) >= threshold] = False 

409 

410 if mask.sum() < 2: 

411 linearizer = self.fillBadAmp(linearizer, fitOrder, inputPtc, amp) 

412 self.log.warning("Amp %s in detector %s has not enough points in linear ordinate. " 

413 "Skipping!", ampName, detector.getName()) 

414 continue 

415 

416 self.debugFit('linearFit', inputAbscissa, inputOrdinate, linearOrdinate, mask, ampName) 

417 

418 # Do fits 

419 if self.config.linearityType in ['Polynomial', 'Squared', 'LookupTable']: 

420 polyFit = np.zeros(fitOrder + 1) 

421 polyFit[1] = 1.0 

422 polyFit, polyFitErr, chiSq, weights = irlsFit(polyFit, linearOrdinate[mask], 

423 inputOrdinate[mask], funcPolynomial) 

424 

425 # Truncate the polynomial fit to the squared term. 

426 k1 = polyFit[1] 

427 linearityCoeffs = np.array( 

428 [-coeff/(k1**order) for order, coeff in enumerate(polyFit)] 

429 )[2:] 

430 significant = np.where(np.abs(linearityCoeffs) > 1e-10) 

431 self.log.info("Significant polynomial fits: %s", significant) 

432 

433 modelOrdinate = funcPolynomial(polyFit, linearOrdinate) 

434 

435 self.debugFit( 

436 'polyFit', 

437 inputAbscissa[mask], 

438 inputOrdinate[mask], 

439 modelOrdinate[mask], 

440 None, 

441 ampName, 

442 ) 

443 

444 if self.config.linearityType == 'Squared': 

445 # The first term is the squared term. 

446 linearityCoeffs = linearityCoeffs[0: 1] 

447 elif self.config.linearityType == 'LookupTable': 

448 # Use linear part to get time at which signal is 

449 # maxAduForLookupTableLinearizer DN 

450 tMax = (self.config.maxLookupTableAdu - polyFit[0])/polyFit[1] 

451 timeRange = np.linspace(0, tMax, self.config.maxLookupTableAdu) 

452 signalIdeal = polyFit[0] + polyFit[1]*timeRange 

453 signalUncorrected = funcPolynomial(polyFit, timeRange) 

454 lookupTableRow = signalIdeal - signalUncorrected # LinearizerLookupTable has correction 

455 

456 linearizer.tableData[tableIndex, :] = lookupTableRow 

457 linearityCoeffs = np.array([tableIndex, 0]) 

458 tableIndex += 1 

459 elif self.config.linearityType in ['Spline']: 

460 # This is a spline fit with photodiode data based on a model 

461 # from Pierre Astier. 

462 # This model fits a spline with (optional) nuisance parameters 

463 # to allow for different linearity coefficients with different 

464 # photodiode settings. The minimization is a least-squares 

465 # fit with the residual of 

466 # Sum[(S(mu_i) + mu_i)/(k_j * D_i) - 1]**2, where S(mu_i) is 

467 # an Akima Spline function of mu_i, the observed flat-pair 

468 # mean; D_j is the photo-diode measurement corresponding to 

469 # that flat-pair; and k_j is a constant of proportionality 

470 # which is over index j as it is allowed to 

471 # be different based on different photodiode settings (e.g. 

472 # CCOBCURR). 

473 

474 # The fit has additional constraints to ensure that the spline 

475 # goes through the (0, 0) point, as well as a normalization 

476 # condition so that the average of the spline over the full 

477 # range is 0. The normalization ensures that the spline only 

478 # fits deviations from linearity, rather than the linear 

479 # function itself which is degenerate with the gain. 

480 

481 nodes = np.linspace(0.0, np.max(inputOrdinate[mask]), self.config.splineKnots) 

482 

483 fitter = AstierSplineLinearityFitter( 

484 nodes, 

485 groupingValue, 

486 inputAbscissa, 

487 inputOrdinate, 

488 mask=mask, 

489 log=self.log, 

490 ) 

491 p0 = fitter.estimate_p0() 

492 pars = fitter.fit( 

493 p0, 

494 min_iter=self.config.splineFitMinIter, 

495 max_iter=self.config.splineFitMaxIter, 

496 max_rejection_per_iteration=self.config.splineFitMaxRejectionPerIteration, 

497 n_sigma_clip=self.config.nSigmaClipLinear, 

498 ) 

499 

500 # Confirm that the first parameter is 0, and set it to 

501 # exactly zero. 

502 if not np.isclose(pars[0], 0): 

503 raise RuntimeError("Programmer error! First spline parameter must " 

504 "be consistent with zero.") 

505 pars[0] = 0.0 

506 

507 linearityCoeffs = np.concatenate([nodes, pars[0: len(nodes)]]) 

508 linearFit = np.array([0.0, np.mean(pars[len(nodes):])]) 

509 

510 # We modify the inputAbscissa according to the linearity fits 

511 # here, for proper residual computation. 

512 for j, group_index in enumerate(fitter.group_indices): 

513 inputOrdinate[group_index] /= (pars[len(nodes) + j] / linearFit[1]) 

514 

515 linearOrdinate = linearFit[1] * inputOrdinate 

516 # For the spline fit, reuse the "polyFit -> fitParams" 

517 # field to record the linear coefficients for the groups. 

518 polyFit = pars[len(nodes):] 

519 polyFitErr = np.zeros_like(polyFit) 

520 chiSq = np.nan 

521 

522 # Update mask based on what the fitter rejected. 

523 mask = fitter.mask 

524 else: 

525 polyFit = np.zeros(1) 

526 polyFitErr = np.zeros(1) 

527 chiSq = np.nan 

528 linearityCoeffs = np.zeros(1) 

529 

530 linearizer.linearityType[ampName] = self.config.linearityType 

531 linearizer.linearityCoeffs[ampName] = linearityCoeffs 

532 linearizer.linearityBBox[ampName] = amp.getBBox() 

533 linearizer.fitParams[ampName] = polyFit 

534 linearizer.fitParamsErr[ampName] = polyFitErr 

535 linearizer.fitChiSq[ampName] = chiSq 

536 linearizer.linearFit[ampName] = linearFit 

537 

538 image = afwImage.ImageF(len(inputOrdinate), 1) 

539 image.array[:, :] = inputOrdinate 

540 linearizeFunction = linearizer.getLinearityTypeByName(linearizer.linearityType[ampName]) 

541 linearizeFunction()( 

542 image, 

543 **{'coeffs': linearizer.linearityCoeffs[ampName], 

544 'table': linearizer.tableData, 

545 'log': linearizer.log} 

546 ) 

547 linearizeModel = image.array[0, :] 

548 

549 # The residuals that we record are the final residuals compared to 

550 # a linear model, after everything has been (properly?) linearized. 

551 if mask.sum() < 2: 

552 self.log.warning("Amp %s in detector %s has not enough points in linear ordinate " 

553 "for residuals. Skipping!", ampName, detector.getName()) 

554 residuals = np.full_like(linearizeModel, np.nan) 

555 else: 

556 postLinearFit, _, _, _ = irlsFit( 

557 [0.0, 100.0], 

558 inputAbscissa[mask], 

559 linearizeModel[mask], 

560 funcPolynomial, 

561 ) 

562 residuals = linearizeModel - (postLinearFit[0] + postLinearFit[1] * inputAbscissa) 

563 # We set masked residuals to nan. 

564 residuals[~mask] = np.nan 

565 

566 linearizer.fitResiduals[ampName] = residuals 

567 

568 self.debugFit( 

569 'solution', 

570 inputOrdinate[mask], 

571 linearOrdinate[mask], 

572 linearizeModel[mask], 

573 None, 

574 ampName, 

575 ) 

576 

577 linearizer.hasLinearity = True 

578 linearizer.validate() 

579 linearizer.updateMetadata(camera=camera, detector=detector, filterName='NONE') 

580 linearizer.updateMetadata(setDate=True, setCalibId=True) 

581 provenance = IsrProvenance(calibType='linearizer') 

582 

583 return pipeBase.Struct( 

584 outputLinearizer=linearizer, 

585 outputProvenance=provenance, 

586 ) 

587 

588 def fillBadAmp(self, linearizer, fitOrder, inputPtc, amp): 

589 # Need to fill linearizer with empty values 

590 # if the amp is non-functional 

591 ampName = amp.getName() 

592 nEntries = 1 

593 pEntries = 1 

594 if self.config.linearityType in ['Polynomial']: 

595 nEntries = fitOrder + 1 

596 pEntries = fitOrder + 1 

597 elif self.config.linearityType in ['Spline']: 

598 nEntries = fitOrder * 2 

599 elif self.config.linearityType in ['Squared', 'None']: 

600 nEntries = 1 

601 pEntries = fitOrder + 1 

602 elif self.config.linearityType in ['LookupTable']: 

603 nEntries = 2 

604 pEntries = fitOrder + 1 

605 

606 linearizer.linearityType[ampName] = "None" 

607 linearizer.linearityCoeffs[ampName] = np.zeros(nEntries) 

608 linearizer.linearityBBox[ampName] = amp.getBBox() 

609 linearizer.fitParams[ampName] = np.zeros(pEntries) 

610 linearizer.fitParamsErr[ampName] = np.zeros(pEntries) 

611 linearizer.fitChiSq[ampName] = np.nan 

612 linearizer.fitResiduals[ampName] = np.zeros(len(inputPtc.expIdMask[ampName])) 

613 linearizer.linearFit[ampName] = np.zeros(2) 

614 return linearizer 

615 

616 def debugFit(self, stepname, xVector, yVector, yModel, mask, ampName): 

617 """Debug method for linearity fitting. 

618 

619 Parameters 

620 ---------- 

621 stepname : `str` 

622 A label to use to check if we care to debug at a given 

623 line of code. 

624 xVector : `numpy.array`, (N,) 

625 The values to use as the independent variable in the 

626 linearity fit. 

627 yVector : `numpy.array`, (N,) 

628 The values to use as the dependent variable in the 

629 linearity fit. 

630 yModel : `numpy.array`, (N,) 

631 The values to use as the linearized result. 

632 mask : `numpy.array` [`bool`], (N,) , optional 

633 A mask to indicate which entries of ``xVector`` and 

634 ``yVector`` to keep. 

635 ampName : `str` 

636 Amplifier name to lookup linearity correction values. 

637 """ 

638 frame = getDebugFrame(self._display, stepname) 

639 if frame: 

640 import matplotlib.pyplot as plt 

641 fig, axs = plt.subplots(2) 

642 

643 if mask is None: 

644 mask = np.ones_like(xVector, dtype=bool) 

645 

646 fig.suptitle(f"{stepname} {ampName} {self.config.linearityType}") 

647 if stepname == 'linearFit': 

648 axs[0].set_xlabel("Input Abscissa (time or mondiode)") 

649 axs[0].set_ylabel("Input Ordinate (flux)") 

650 axs[1].set_xlabel("Linear Ordinate (linear flux)") 

651 axs[1].set_ylabel("Flux Difference: (input - linear)") 

652 elif stepname in ('polyFit', 'splineFit'): 

653 axs[0].set_xlabel("Linear Abscissa (linear flux)") 

654 axs[0].set_ylabel("Input Ordinate (flux)") 

655 axs[1].set_xlabel("Linear Ordinate (linear flux)") 

656 axs[1].set_ylabel("Flux Difference: (input - full model fit)") 

657 elif stepname == 'solution': 

658 axs[0].set_xlabel("Input Abscissa (time or mondiode)") 

659 axs[0].set_ylabel("Linear Ordinate (linear flux)") 

660 axs[1].set_xlabel("Model flux (linear flux)") 

661 axs[1].set_ylabel("Flux Difference: (linear - model)") 

662 

663 axs[0].set_yscale('log') 

664 axs[0].set_xscale('log') 

665 axs[0].scatter(xVector, yVector) 

666 axs[0].scatter(xVector[~mask], yVector[~mask], c='red', marker='x') 

667 axs[1].set_xscale('log') 

668 

669 axs[1].scatter(yModel, yVector[mask] - yModel) 

670 fig.tight_layout() 

671 fig.show() 

672 

673 prompt = "Press Enter or c to continue [chpx]..." 

674 while True: 

675 ans = input(prompt).lower() 

676 if ans in ("", " ", "c",): 

677 break 

678 elif ans in ("p", ): 

679 import pdb 

680 pdb.set_trace() 

681 elif ans in ("h", ): 

682 print("[h]elp [c]ontinue [p]db") 

683 elif ans in ('x', ): 

684 exit() 

685 plt.close()