Coverage for python/lsst/cp/pipe/linearity.py: 11%

262 statements  

« prev     ^ index     » next       coverage.py v6.5.0, created at 2023-02-23 03:48 -0800

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22 

23__all__ = ["LinearitySolveTask", "LinearitySolveConfig"] 

24 

25import numpy as np 

26import lsst.afw.image as afwImage 

27import lsst.afw.math as afwMath 

28import lsst.pipe.base as pipeBase 

29import lsst.pipe.base.connectionTypes as cT 

30import lsst.pex.config as pexConfig 

31 

32from lsstDebug import getDebugFrame 

33from lsst.ip.isr import (Linearizer, IsrProvenance) 

34 

35from .utils import (funcPolynomial, irlsFit) 

36from ._lookupStaticCalibration import lookupStaticCalibration 

37 

38 

39def ptcLookup(datasetType, registry, quantumDataId, collections): 

40 """Butler lookup function to allow PTC to be found. 

41 

42 Parameters 

43 ---------- 

44 datasetType : `lsst.daf.butler.DatasetType` 

45 Dataset type to look up. 

46 registry : `lsst.daf.butler.Registry` 

47 Registry for the data repository being searched. 

48 quantumDataId : `lsst.daf.butler.DataCoordinate` 

49 Data ID for the quantum of the task this dataset will be passed to. 

50 This must include an "instrument" key, and should also include any 

51 keys that are present in ``datasetType.dimensions``. If it has an 

52 ``exposure`` or ``visit`` key, that's a sign that this function is 

53 not actually needed, as those come with the temporal information that 

54 would allow a real validity-range lookup. 

55 collections : `lsst.daf.butler.registry.CollectionSearch` 

56 Collections passed by the user when generating a QuantumGraph. Ignored 

57 by this function (see notes below). 

58 

59 Returns 

60 ------- 

61 refs : `list` [ `DatasetRef` ] 

62 A zero- or single-element list containing the matching 

63 dataset, if one was found. 

64 

65 Raises 

66 ------ 

67 RuntimeError 

68 Raised if more than one PTC reference is found. 

69 """ 

70 refs = list(registry.queryDatasets(datasetType, dataId=quantumDataId, collections=collections, 

71 findFirst=False)) 

72 if len(refs) >= 2: 

73 RuntimeError("Too many PTC connections found. Incorrect collections supplied?") 

74 

75 return refs 

76 

77 

78class LinearitySolveConnections(pipeBase.PipelineTaskConnections, 

79 dimensions=("instrument", "detector")): 

80 dummy = cT.Input( 

81 name="raw", 

82 doc="Dummy exposure.", 

83 storageClass='Exposure', 

84 dimensions=("instrument", "exposure", "detector"), 

85 multiple=True, 

86 deferLoad=True, 

87 ) 

88 

89 camera = cT.PrerequisiteInput( 

90 name="camera", 

91 doc="Camera Geometry definition.", 

92 storageClass="Camera", 

93 dimensions=("instrument", ), 

94 isCalibration=True, 

95 lookupFunction=lookupStaticCalibration, 

96 ) 

97 

98 inputPtc = cT.PrerequisiteInput( 

99 name="ptc", 

100 doc="Input PTC dataset.", 

101 storageClass="PhotonTransferCurveDataset", 

102 dimensions=("instrument", "detector"), 

103 isCalibration=True, 

104 lookupFunction=ptcLookup, 

105 ) 

106 

107 inputPhotodiodeData = cT.PrerequisiteInput( 

108 name="photodiode", 

109 doc="Photodiode readings data.", 

110 storageClass="IsrCalib", 

111 dimensions=("instrument", "exposure"), 

112 multiple=True, 

113 deferLoad=True, 

114 minimum=0, 

115 ) 

116 

117 inputPhotodiodeCorrection = cT.Input( 

118 name="pdCorrection", 

119 doc="Input photodiode correction.", 

120 storageClass="IsrCalib", 

121 dimensions=("instrument", ), 

122 isCalibration=True, 

123 ) 

124 

125 outputLinearizer = cT.Output( 

126 name="linearity", 

127 doc="Output linearity measurements.", 

128 storageClass="Linearizer", 

129 dimensions=("instrument", "detector"), 

130 isCalibration=True, 

131 ) 

132 

133 def __init__(self, *, config=None): 

134 super().__init__(config=config) 

135 

136 if config.applyPhotodiodeCorrection is not True: 

137 self.inputs.discard("inputPhotodiodeCorrection") 

138 

139 if config.usePhotodiode is not True: 

140 self.inputs.discard("inputPhotodiodeData") 

141 

142 

143class LinearitySolveConfig(pipeBase.PipelineTaskConfig, 

144 pipelineConnections=LinearitySolveConnections): 

145 """Configuration for solving the linearity from PTC dataset. 

146 """ 

147 linearityType = pexConfig.ChoiceField( 

148 dtype=str, 

149 doc="Type of linearizer to construct.", 

150 default="Squared", 

151 allowed={ 

152 "LookupTable": "Create a lookup table solution.", 

153 "Polynomial": "Create an arbitrary polynomial solution.", 

154 "Squared": "Create a single order squared solution.", 

155 "Spline": "Create a spline based solution.", 

156 "None": "Create a dummy solution.", 

157 } 

158 ) 

159 polynomialOrder = pexConfig.Field( 

160 dtype=int, 

161 doc="Degree of polynomial to fit.", 

162 default=3, 

163 ) 

164 splineKnots = pexConfig.Field( 

165 dtype=int, 

166 doc="Number of spline knots to use in fit.", 

167 default=10, 

168 ) 

169 maxLookupTableAdu = pexConfig.Field( 

170 dtype=int, 

171 doc="Maximum DN value for a LookupTable linearizer.", 

172 default=2**18, 

173 ) 

174 maxLinearAdu = pexConfig.Field( 

175 dtype=float, 

176 doc="Maximum DN value to use to estimate linear term.", 

177 default=20000.0, 

178 ) 

179 minLinearAdu = pexConfig.Field( 

180 dtype=float, 

181 doc="Minimum DN value to use to estimate linear term.", 

182 default=30.0, 

183 ) 

184 nSigmaClipLinear = pexConfig.Field( 

185 dtype=float, 

186 doc="Maximum deviation from linear solution for Poissonian noise.", 

187 default=5.0, 

188 ) 

189 ignorePtcMask = pexConfig.Field( 

190 dtype=bool, 

191 doc="Ignore the expIdMask set by the PTC solver?", 

192 default=False, 

193 ) 

194 usePhotodiode = pexConfig.Field( 

195 dtype=bool, 

196 doc="Use the photodiode info instead of the raw expTimes?", 

197 default=False, 

198 ) 

199 photodiodeIntegrationMethod = pexConfig.ChoiceField( 

200 dtype=str, 

201 doc="Integration method for photodiode monitoring data.", 

202 default="DIRECT_SUM", 

203 allowed={ 

204 "DIRECT_SUM": ("Use numpy's trapz integrator on all photodiode " 

205 "readout entries"), 

206 "TRIMMED_SUM": ("Use numpy's trapz integrator, clipping the " 

207 "leading and trailing entries, which are " 

208 "nominally at zero baseline level."), 

209 } 

210 ) 

211 applyPhotodiodeCorrection = pexConfig.Field( 

212 dtype=bool, 

213 doc="Calculate and apply a correction to the photodiode readings?", 

214 default=False, 

215 ) 

216 

217 

218class LinearitySolveTask(pipeBase.PipelineTask): 

219 """Fit the linearity from the PTC dataset. 

220 """ 

221 

222 ConfigClass = LinearitySolveConfig 

223 _DefaultName = 'cpLinearitySolve' 

224 

225 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

226 """Ensure that the input and output dimensions are passed along. 

227 

228 Parameters 

229 ---------- 

230 butlerQC : `lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext` 

231 Butler to operate on. 

232 inputRefs : `lsst.pipe.base.connections.InputQuantizedConnection` 

233 Input data refs to load. 

234 ouptutRefs : `lsst.pipe.base.connections.OutputQuantizedConnection` 

235 Output data refs to persist. 

236 """ 

237 inputs = butlerQC.get(inputRefs) 

238 

239 # Use the dimensions to set calib/provenance information. 

240 inputs['inputDims'] = inputRefs.inputPtc.dataId.byName() 

241 

242 outputs = self.run(**inputs) 

243 butlerQC.put(outputs, outputRefs) 

244 

245 def run(self, inputPtc, dummy, camera, inputDims, inputPhotodiodeData=None, 

246 inputPhotodiodeCorrection=None): 

247 """Fit non-linearity to PTC data, returning the correct Linearizer 

248 object. 

249 

250 Parameters 

251 ---------- 

252 inputPtc : `lsst.ip.isr.PtcDataset` 

253 Pre-measured PTC dataset. 

254 dummy : `lsst.afw.image.Exposure` 

255 The exposure used to select the appropriate PTC dataset. 

256 In almost all circumstances, one of the input exposures 

257 used to generate the PTC dataset is the best option. 

258 inputPhotodiodeCorrection : `lsst.ip.isr.PhotodiodeCorrection` 

259 Pre-measured photodiode correction used in the case when 

260 applyPhotodiodeCorrection=True. 

261 camera : `lsst.afw.cameraGeom.Camera` 

262 Camera geometry. 

263 inputPhotodiodeData : `dict` [`str`, `lsst.ip.isr.PhotodiodeCalib`] 

264 Photodiode readings data. 

265 inputDims : `lsst.daf.butler.DataCoordinate` or `dict` 

266 DataIds to use to populate the output calibration. 

267 

268 Returns 

269 ------- 

270 results : `lsst.pipe.base.Struct` 

271 The results struct containing: 

272 

273 ``outputLinearizer`` 

274 Final linearizer calibration (`lsst.ip.isr.Linearizer`). 

275 ``outputProvenance`` 

276 Provenance data for the new calibration 

277 (`lsst.ip.isr.IsrProvenance`). 

278 

279 Notes 

280 ----- 

281 This task currently fits only polynomial-defined corrections, 

282 where the correction coefficients are defined such that: 

283 :math:`corrImage = uncorrImage + \\sum_i c_i uncorrImage^(2 + i)` 

284 These :math:`c_i` are defined in terms of the direct polynomial fit: 

285 :math:`meanVector ~ P(x=timeVector) = \\sum_j k_j x^j` 

286 such that :math:`c_(j-2) = -k_j/(k_1^j)` in units of DN^(1-j) (c.f., 

287 Eq. 37 of 2003.05978). The `config.polynomialOrder` or 

288 `config.splineKnots` define the maximum order of :math:`x^j` to fit. 

289 As :math:`k_0` and :math:`k_1` are degenerate with bias level and gain, 

290 they are not included in the non-linearity correction. 

291 """ 

292 if len(dummy) == 0: 

293 self.log.warning("No dummy exposure found.") 

294 

295 detector = camera[inputDims['detector']] 

296 if self.config.linearityType == 'LookupTable': 

297 table = np.zeros((len(detector), self.config.maxLookupTableAdu), dtype=np.float32) 

298 tableIndex = 0 

299 else: 

300 table = None 

301 tableIndex = None # This will fail if we increment it. 

302 

303 if self.config.linearityType == 'Spline': 

304 fitOrder = self.config.splineKnots 

305 else: 

306 fitOrder = self.config.polynomialOrder 

307 

308 # Initialize the linearizer. 

309 linearizer = Linearizer(detector=detector, table=table, log=self.log) 

310 linearizer.updateMetadataFromExposures([inputPtc]) 

311 if self.config.usePhotodiode: 

312 # Compute the photodiode integrals once, outside the loop 

313 # over amps. 

314 monDiodeCharge = {} 

315 for handle in inputPhotodiodeData: 

316 expId = handle.dataId['exposure'] 

317 pd_calib = handle.get() 

318 pd_calib.integrationMethod = self.config.photodiodeIntegrationMethod 

319 monDiodeCharge[expId] = pd_calib.integrate()[0] 

320 if self.config.applyPhotodiodeCorrection: 

321 abscissaCorrections = inputPhotodiodeCorrection.abscissaCorrections 

322 

323 for i, amp in enumerate(detector): 

324 ampName = amp.getName() 

325 if ampName in inputPtc.badAmps: 

326 linearizer = self.fillBadAmp(linearizer, fitOrder, inputPtc, amp) 

327 self.log.warning("Amp %s in detector %s has no usable PTC information. Skipping!", 

328 ampName, detector.getName()) 

329 continue 

330 

331 if (len(inputPtc.expIdMask[ampName]) == 0) or self.config.ignorePtcMask: 

332 self.log.warning("Mask not found for %s in detector %s in fit. Using all points.", 

333 ampName, detector.getName()) 

334 mask = np.repeat(True, len(inputPtc.expIdMask[ampName])) 

335 else: 

336 mask = np.array(inputPtc.expIdMask[ampName], dtype=bool) 

337 

338 if self.config.usePhotodiode: 

339 modExpTimes = [] 

340 for i, pair in enumerate(inputPtc.inputExpIdPairs[ampName]): 

341 pair = pair[0] 

342 modExpTime = 0.0 

343 nExps = 0 

344 for j in range(2): 

345 expId = pair[j] 

346 if expId in monDiodeCharge: 

347 modExpTime += monDiodeCharge[expId] 

348 nExps += 1 

349 if nExps > 0: 

350 modExpTime = modExpTime / nExps 

351 else: 

352 mask[i] = False 

353 

354 # Get the photodiode correction 

355 if self.config.applyPhotodiodeCorrection: 

356 try: 

357 correction = abscissaCorrections[str(pair)] 

358 except KeyError: 

359 correction = 0.0 

360 else: 

361 correction = 0.0 

362 modExpTimes.append(modExpTime + correction) 

363 inputAbscissa = np.array(modExpTimes)[mask] 

364 else: 

365 inputAbscissa = np.array(inputPtc.rawExpTimes[ampName])[mask] 

366 

367 inputOrdinate = np.array(inputPtc.rawMeans[ampName])[mask] 

368 # Determine proxy-to-linear-flux transformation 

369 fluxMask = inputOrdinate < self.config.maxLinearAdu 

370 lowMask = inputOrdinate > self.config.minLinearAdu 

371 fluxMask = fluxMask & lowMask 

372 linearAbscissa = inputAbscissa[fluxMask] 

373 linearOrdinate = inputOrdinate[fluxMask] 

374 if len(linearAbscissa) < 2: 

375 linearizer = self.fillBadAmp(linearizer, fitOrder, inputPtc, amp) 

376 self.log.warning("Amp %s in detector %s has not enough points for linear fit. Skipping!", 

377 ampName, detector.getName()) 

378 continue 

379 

380 linearFit, linearFitErr, chiSq, weights = irlsFit([0.0, 100.0], linearAbscissa, 

381 linearOrdinate, funcPolynomial) 

382 # Convert this proxy-to-flux fit into an expected linear flux 

383 linearOrdinate = linearFit[0] + linearFit[1] * inputAbscissa 

384 # Exclude low end outliers 

385 threshold = self.config.nSigmaClipLinear * np.sqrt(abs(linearOrdinate)) 

386 fluxMask = np.abs(inputOrdinate - linearOrdinate) < threshold 

387 linearOrdinate = linearOrdinate[fluxMask] 

388 fitOrdinate = inputOrdinate[fluxMask] 

389 fitAbscissa = inputAbscissa[fluxMask] 

390 if len(linearOrdinate) < 2: 

391 linearizer = self.fillBadAmp(linearizer, fitOrder, inputPtc, amp) 

392 self.log.warning("Amp %s in detector %s has not enough points in linear ordinate. Skipping!", 

393 ampName, detector.getName()) 

394 continue 

395 

396 self.debugFit('linearFit', inputAbscissa, inputOrdinate, linearOrdinate, fluxMask, ampName) 

397 # Do fits 

398 if self.config.linearityType in ['Polynomial', 'Squared', 'LookupTable']: 

399 polyFit = np.zeros(fitOrder + 1) 

400 polyFit[1] = 1.0 

401 polyFit, polyFitErr, chiSq, weights = irlsFit(polyFit, linearOrdinate, 

402 fitOrdinate, funcPolynomial) 

403 

404 # Truncate the polynomial fit 

405 k1 = polyFit[1] 

406 linearityFit = [-coeff/(k1**order) for order, coeff in enumerate(polyFit)] 

407 significant = np.where(np.abs(linearityFit) > 1e-10, True, False) 

408 self.log.info("Significant polynomial fits: %s", significant) 

409 

410 modelOrdinate = funcPolynomial(polyFit, fitAbscissa) 

411 

412 self.debugFit('polyFit', linearAbscissa, fitOrdinate, modelOrdinate, None, ampName) 

413 

414 if self.config.linearityType == 'Squared': 

415 linearityFit = [linearityFit[2]] 

416 elif self.config.linearityType == 'LookupTable': 

417 # Use linear part to get time at which signal is 

418 # maxAduForLookupTableLinearizer DN 

419 tMax = (self.config.maxLookupTableAdu - polyFit[0])/polyFit[1] 

420 timeRange = np.linspace(0, tMax, self.config.maxLookupTableAdu) 

421 signalIdeal = polyFit[0] + polyFit[1]*timeRange 

422 signalUncorrected = funcPolynomial(polyFit, timeRange) 

423 lookupTableRow = signalIdeal - signalUncorrected # LinearizerLookupTable has correction 

424 

425 linearizer.tableData[tableIndex, :] = lookupTableRow 

426 linearityFit = [tableIndex, 0] 

427 tableIndex += 1 

428 elif self.config.linearityType in ['Spline']: 

429 # See discussion in `lsst.ip.isr.linearize.py` before 

430 # modifying. 

431 numPerBin, binEdges = np.histogram(linearOrdinate, bins=fitOrder) 

432 with np.errstate(invalid="ignore"): 

433 # Algorithm note: With the counts of points per 

434 # bin above, the next histogram calculates the 

435 # values to put in each bin by weighting each 

436 # point by the correction value. 

437 values = np.histogram(linearOrdinate, bins=fitOrder, 

438 weights=(inputOrdinate[fluxMask] - linearOrdinate))[0]/numPerBin 

439 

440 # After this is done, the binCenters are 

441 # calculated by weighting by the value we're 

442 # binning over. This ensures that widely 

443 # spaced/poorly sampled data aren't assigned to 

444 # the midpoint of the bin (as could be done using 

445 # the binEdges above), but to the weighted mean of 

446 # the inputs. Note that both histograms are 

447 # scaled by the count per bin to normalize what 

448 # the histogram returns (a sum of the points 

449 # inside) into an average. 

450 binCenters = np.histogram(linearOrdinate, bins=fitOrder, 

451 weights=linearOrdinate)[0]/numPerBin 

452 values = values[numPerBin > 0] 

453 binCenters = binCenters[numPerBin > 0] 

454 

455 self.debugFit('splineFit', binCenters, np.abs(values), values, None, ampName) 

456 interp = afwMath.makeInterpolate(binCenters.tolist(), values.tolist(), 

457 afwMath.stringToInterpStyle("AKIMA_SPLINE")) 

458 modelOrdinate = linearOrdinate + interp.interpolate(linearOrdinate) 

459 self.debugFit('splineFit', linearOrdinate, fitOrdinate, modelOrdinate, None, ampName) 

460 

461 # If we exclude a lot of points, we may end up with 

462 # less than fitOrder points. Pad out the low-flux end 

463 # to ensure equal lengths. 

464 if len(binCenters) != fitOrder: 

465 padN = fitOrder - len(binCenters) 

466 binCenters = np.pad(binCenters, (padN, 0), 'linear_ramp', 

467 end_values=(binCenters.min() - 1.0, )) 

468 # This stores the correction, which is zero at low values. 

469 values = np.pad(values, (padN, 0)) 

470 

471 # Pack the spline into a single array. 

472 linearityFit = np.concatenate((binCenters.tolist(), values.tolist())).tolist() 

473 polyFit = [0.0] 

474 polyFitErr = [0.0] 

475 chiSq = np.nan 

476 else: 

477 polyFit = [0.0] 

478 polyFitErr = [0.0] 

479 chiSq = np.nan 

480 linearityFit = [0.0] 

481 

482 linearizer.linearityType[ampName] = self.config.linearityType 

483 linearizer.linearityCoeffs[ampName] = np.array(linearityFit) 

484 linearizer.linearityBBox[ampName] = amp.getBBox() 

485 linearizer.fitParams[ampName] = np.array(polyFit) 

486 linearizer.fitParamsErr[ampName] = np.array(polyFitErr) 

487 linearizer.fitChiSq[ampName] = chiSq 

488 linearizer.linearFit[ampName] = linearFit 

489 residuals = fitOrdinate - modelOrdinate 

490 

491 # The residuals only include flux values which are 

492 # not masked out. To be able to access this later and 

493 # associate it with the PTC flux values, we need to 

494 # fill out the residuals with NaNs where the flux 

495 # value is masked. 

496 

497 # First convert mask to a composite of the two masks: 

498 mask[mask] = fluxMask 

499 fullResiduals = np.full(len(mask), np.nan) 

500 fullResiduals[mask] = residuals 

501 linearizer.fitResiduals[ampName] = fullResiduals 

502 image = afwImage.ImageF(len(inputOrdinate), 1) 

503 image.getArray()[:, :] = inputOrdinate 

504 linearizeFunction = linearizer.getLinearityTypeByName(linearizer.linearityType[ampName]) 

505 linearizeFunction()(image, 

506 **{'coeffs': linearizer.linearityCoeffs[ampName], 

507 'table': linearizer.tableData, 

508 'log': linearizer.log}) 

509 linearizeModel = image.getArray()[0, :] 

510 

511 self.debugFit('solution', inputOrdinate[fluxMask], linearOrdinate, 

512 linearizeModel[fluxMask], None, ampName) 

513 

514 linearizer.hasLinearity = True 

515 linearizer.validate() 

516 linearizer.updateMetadata(camera=camera, detector=detector, filterName='NONE') 

517 linearizer.updateMetadata(setDate=True, setCalibId=True) 

518 provenance = IsrProvenance(calibType='linearizer') 

519 

520 return pipeBase.Struct( 

521 outputLinearizer=linearizer, 

522 outputProvenance=provenance, 

523 ) 

524 

525 def fillBadAmp(self, linearizer, fitOrder, inputPtc, amp): 

526 # Need to fill linearizer with empty values 

527 # if the amp is non-functional 

528 ampName = amp.getName() 

529 nEntries = 1 

530 pEntries = 1 

531 if self.config.linearityType in ['Polynomial']: 

532 nEntries = fitOrder + 1 

533 pEntries = fitOrder + 1 

534 elif self.config.linearityType in ['Spline']: 

535 nEntries = fitOrder * 2 

536 elif self.config.linearityType in ['Squared', 'None']: 

537 nEntries = 1 

538 pEntries = fitOrder + 1 

539 elif self.config.linearityType in ['LookupTable']: 

540 nEntries = 2 

541 pEntries = fitOrder + 1 

542 

543 linearizer.linearityType[ampName] = "None" 

544 linearizer.linearityCoeffs[ampName] = np.zeros(nEntries) 

545 linearizer.linearityBBox[ampName] = amp.getBBox() 

546 linearizer.fitParams[ampName] = np.zeros(pEntries) 

547 linearizer.fitParamsErr[ampName] = np.zeros(pEntries) 

548 linearizer.fitChiSq[ampName] = np.nan 

549 linearizer.fitResiduals[ampName] = np.zeros(len(inputPtc.expIdMask[ampName])) 

550 linearizer.linearFit[ampName] = np.zeros(2) 

551 return linearizer 

552 

553 def debugFit(self, stepname, xVector, yVector, yModel, mask, ampName): 

554 """Debug method for linearity fitting. 

555 

556 Parameters 

557 ---------- 

558 stepname : `str` 

559 A label to use to check if we care to debug at a given 

560 line of code. 

561 xVector : `numpy.array`, (N,) 

562 The values to use as the independent variable in the 

563 linearity fit. 

564 yVector : `numpy.array`, (N,) 

565 The values to use as the dependent variable in the 

566 linearity fit. 

567 yModel : `numpy.array`, (N,) 

568 The values to use as the linearized result. 

569 mask : `numpy.array` [`bool`], (N,) , optional 

570 A mask to indicate which entries of ``xVector`` and 

571 ``yVector`` to keep. 

572 ampName : `str` 

573 Amplifier name to lookup linearity correction values. 

574 """ 

575 frame = getDebugFrame(self._display, stepname) 

576 if frame: 

577 import matplotlib.pyplot as plt 

578 fig, axs = plt.subplots(2) 

579 

580 if mask is None: 

581 mask = np.ones_like(xVector, dtype=bool) 

582 

583 fig.suptitle(f"{stepname} {ampName} {self.config.linearityType}") 

584 if stepname == 'linearFit': 

585 axs[0].set_xlabel("Input Abscissa (time or mondiode)") 

586 axs[0].set_ylabel("Input Ordinate (flux)") 

587 axs[1].set_xlabel("Linear Ordinate (linear flux)") 

588 axs[1].set_ylabel("Flux Difference: (input - linear)") 

589 elif stepname in ('polyFit', 'splineFit'): 

590 axs[0].set_xlabel("Linear Abscissa (linear flux)") 

591 axs[0].set_ylabel("Input Ordinate (flux)") 

592 axs[1].set_xlabel("Linear Ordinate (linear flux)") 

593 axs[1].set_ylabel("Flux Difference: (input - full model fit)") 

594 elif stepname == 'solution': 

595 axs[0].set_xlabel("Input Abscissa (time or mondiode)") 

596 axs[0].set_ylabel("Linear Ordinate (linear flux)") 

597 axs[1].set_xlabel("Model flux (linear flux)") 

598 axs[1].set_ylabel("Flux Difference: (linear - model)") 

599 

600 axs[0].set_yscale('log') 

601 axs[0].set_xscale('log') 

602 axs[0].scatter(xVector, yVector) 

603 axs[0].scatter(xVector[~mask], yVector[~mask], c='red', marker='x') 

604 axs[1].set_xscale('log') 

605 

606 axs[1].scatter(yModel, yVector[mask] - yModel) 

607 fig.show() 

608 

609 prompt = "Press Enter or c to continue [chpx]..." 

610 while True: 

611 ans = input(prompt).lower() 

612 if ans in ("", " ", "c",): 

613 break 

614 elif ans in ("p", ): 

615 import pdb 

616 pdb.set_trace() 

617 elif ans in ("h", ): 

618 print("[h]elp [c]ontinue [p]db") 

619 elif ans in ('x', ): 

620 exit() 

621 plt.close()