Coverage for python/lsst/cp/pipe/linearity.py: 11%
272 statements
« prev ^ index » next coverage.py v7.2.6, created at 2023-05-24 02:19 -0700
« prev ^ index » next coverage.py v7.2.6, created at 2023-05-24 02:19 -0700
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
23__all__ = ["LinearitySolveTask", "LinearitySolveConfig"]
25import numpy as np
26import lsst.afw.image as afwImage
27import lsst.afw.math as afwMath
28import lsst.pipe.base as pipeBase
29import lsst.pipe.base.connectionTypes as cT
30import lsst.pex.config as pexConfig
32from lsstDebug import getDebugFrame
33from lsst.ip.isr import (Linearizer, IsrProvenance)
35from .utils import (funcPolynomial, irlsFit)
36from ._lookupStaticCalibration import lookupStaticCalibration
39def ptcLookup(datasetType, registry, quantumDataId, collections):
40 """Butler lookup function to allow PTC to be found.
42 Parameters
43 ----------
44 datasetType : `lsst.daf.butler.DatasetType`
45 Dataset type to look up.
46 registry : `lsst.daf.butler.Registry`
47 Registry for the data repository being searched.
48 quantumDataId : `lsst.daf.butler.DataCoordinate`
49 Data ID for the quantum of the task this dataset will be passed to.
50 This must include an "instrument" key, and should also include any
51 keys that are present in ``datasetType.dimensions``. If it has an
52 ``exposure`` or ``visit`` key, that's a sign that this function is
53 not actually needed, as those come with the temporal information that
54 would allow a real validity-range lookup.
55 collections : `lsst.daf.butler.registry.CollectionSearch`
56 Collections passed by the user when generating a QuantumGraph. Ignored
57 by this function (see notes below).
59 Returns
60 -------
61 refs : `list` [ `DatasetRef` ]
62 A zero- or single-element list containing the matching
63 dataset, if one was found.
65 Raises
66 ------
67 RuntimeError
68 Raised if more than one PTC reference is found.
69 """
70 refs = list(registry.queryDatasets(datasetType, dataId=quantumDataId, collections=collections,
71 findFirst=False))
72 if len(refs) >= 2:
73 RuntimeError("Too many PTC connections found. Incorrect collections supplied?")
75 return refs
78class LinearitySolveConnections(pipeBase.PipelineTaskConnections,
79 dimensions=("instrument", "detector")):
80 dummy = cT.Input(
81 name="raw",
82 doc="Dummy exposure.",
83 storageClass='Exposure',
84 dimensions=("instrument", "exposure", "detector"),
85 multiple=True,
86 deferLoad=True,
87 )
89 camera = cT.PrerequisiteInput(
90 name="camera",
91 doc="Camera Geometry definition.",
92 storageClass="Camera",
93 dimensions=("instrument", ),
94 isCalibration=True,
95 lookupFunction=lookupStaticCalibration,
96 )
98 inputPtc = cT.PrerequisiteInput(
99 name="ptc",
100 doc="Input PTC dataset.",
101 storageClass="PhotonTransferCurveDataset",
102 dimensions=("instrument", "detector"),
103 isCalibration=True,
104 lookupFunction=ptcLookup,
105 )
107 inputPhotodiodeData = cT.PrerequisiteInput(
108 name="photodiode",
109 doc="Photodiode readings data.",
110 storageClass="IsrCalib",
111 dimensions=("instrument", "exposure"),
112 multiple=True,
113 deferLoad=True,
114 minimum=0,
115 )
117 inputPhotodiodeCorrection = cT.Input(
118 name="pdCorrection",
119 doc="Input photodiode correction.",
120 storageClass="IsrCalib",
121 dimensions=("instrument", ),
122 isCalibration=True,
123 )
125 outputLinearizer = cT.Output(
126 name="linearity",
127 doc="Output linearity measurements.",
128 storageClass="Linearizer",
129 dimensions=("instrument", "detector"),
130 isCalibration=True,
131 )
133 def __init__(self, *, config=None):
134 super().__init__(config=config)
136 if config.applyPhotodiodeCorrection is not True:
137 self.inputs.discard("inputPhotodiodeCorrection")
139 if config.usePhotodiode is not True:
140 self.inputs.discard("inputPhotodiodeData")
143class LinearitySolveConfig(pipeBase.PipelineTaskConfig,
144 pipelineConnections=LinearitySolveConnections):
145 """Configuration for solving the linearity from PTC dataset.
146 """
147 linearityType = pexConfig.ChoiceField(
148 dtype=str,
149 doc="Type of linearizer to construct.",
150 default="Squared",
151 allowed={
152 "LookupTable": "Create a lookup table solution.",
153 "Polynomial": "Create an arbitrary polynomial solution.",
154 "Squared": "Create a single order squared solution.",
155 "Spline": "Create a spline based solution.",
156 "None": "Create a dummy solution.",
157 }
158 )
159 polynomialOrder = pexConfig.Field(
160 dtype=int,
161 doc="Degree of polynomial to fit.",
162 default=3,
163 )
164 splineKnots = pexConfig.Field(
165 dtype=int,
166 doc="Number of spline knots to use in fit.",
167 default=10,
168 )
169 maxLookupTableAdu = pexConfig.Field(
170 dtype=int,
171 doc="Maximum DN value for a LookupTable linearizer.",
172 default=2**18,
173 )
174 maxLinearAdu = pexConfig.Field(
175 dtype=float,
176 doc="Maximum DN value to use to estimate linear term.",
177 default=20000.0,
178 )
179 minLinearAdu = pexConfig.Field(
180 dtype=float,
181 doc="Minimum DN value to use to estimate linear term.",
182 default=30.0,
183 )
184 nSigmaClipLinear = pexConfig.Field(
185 dtype=float,
186 doc="Maximum deviation from linear solution for Poissonian noise.",
187 default=5.0,
188 )
189 ignorePtcMask = pexConfig.Field(
190 dtype=bool,
191 doc="Ignore the expIdMask set by the PTC solver?",
192 default=False,
193 )
194 usePhotodiode = pexConfig.Field(
195 dtype=bool,
196 doc="Use the photodiode info instead of the raw expTimes?",
197 default=False,
198 )
199 photodiodeIntegrationMethod = pexConfig.ChoiceField(
200 dtype=str,
201 doc="Integration method for photodiode monitoring data.",
202 default="DIRECT_SUM",
203 allowed={
204 "DIRECT_SUM": ("Use numpy's trapz integrator on all photodiode "
205 "readout entries"),
206 "TRIMMED_SUM": ("Use numpy's trapz integrator, clipping the "
207 "leading and trailing entries, which are "
208 "nominally at zero baseline level."),
209 "CHARGE_SUM": ("Treat the current values as integrated charge "
210 "over the sampling interval and simply sum "
211 "the values, after subtracting a baseline level."),
212 }
213 )
214 photodiodeCurrentScale = pexConfig.Field(
215 dtype=float,
216 doc="Scale factor to apply to photodiode current values for the "
217 "``CHARGE_SUM`` integration method.",
218 default=-1.0,
219 )
220 applyPhotodiodeCorrection = pexConfig.Field(
221 dtype=bool,
222 doc="Calculate and apply a correction to the photodiode readings?",
223 default=False,
224 )
227class LinearitySolveTask(pipeBase.PipelineTask):
228 """Fit the linearity from the PTC dataset.
229 """
231 ConfigClass = LinearitySolveConfig
232 _DefaultName = 'cpLinearitySolve'
234 def runQuantum(self, butlerQC, inputRefs, outputRefs):
235 """Ensure that the input and output dimensions are passed along.
237 Parameters
238 ----------
239 butlerQC : `lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext`
240 Butler to operate on.
241 inputRefs : `lsst.pipe.base.connections.InputQuantizedConnection`
242 Input data refs to load.
243 ouptutRefs : `lsst.pipe.base.connections.OutputQuantizedConnection`
244 Output data refs to persist.
245 """
246 inputs = butlerQC.get(inputRefs)
248 # Use the dimensions to set calib/provenance information.
249 inputs['inputDims'] = inputRefs.inputPtc.dataId.byName()
251 outputs = self.run(**inputs)
252 butlerQC.put(outputs, outputRefs)
254 def run(self, inputPtc, dummy, camera, inputDims, inputPhotodiodeData=None,
255 inputPhotodiodeCorrection=None):
256 """Fit non-linearity to PTC data, returning the correct Linearizer
257 object.
259 Parameters
260 ----------
261 inputPtc : `lsst.ip.isr.PtcDataset`
262 Pre-measured PTC dataset.
263 dummy : `lsst.afw.image.Exposure`
264 The exposure used to select the appropriate PTC dataset.
265 In almost all circumstances, one of the input exposures
266 used to generate the PTC dataset is the best option.
267 inputPhotodiodeCorrection : `lsst.ip.isr.PhotodiodeCorrection`
268 Pre-measured photodiode correction used in the case when
269 applyPhotodiodeCorrection=True.
270 camera : `lsst.afw.cameraGeom.Camera`
271 Camera geometry.
272 inputPhotodiodeData : `dict` [`str`, `lsst.ip.isr.PhotodiodeCalib`]
273 Photodiode readings data.
274 inputDims : `lsst.daf.butler.DataCoordinate` or `dict`
275 DataIds to use to populate the output calibration.
277 Returns
278 -------
279 results : `lsst.pipe.base.Struct`
280 The results struct containing:
282 ``outputLinearizer``
283 Final linearizer calibration (`lsst.ip.isr.Linearizer`).
284 ``outputProvenance``
285 Provenance data for the new calibration
286 (`lsst.ip.isr.IsrProvenance`).
288 Notes
289 -----
290 This task currently fits only polynomial-defined corrections,
291 where the correction coefficients are defined such that:
292 :math:`corrImage = uncorrImage + \\sum_i c_i uncorrImage^(2 + i)`
293 These :math:`c_i` are defined in terms of the direct polynomial fit:
294 :math:`meanVector ~ P(x=timeVector) = \\sum_j k_j x^j`
295 such that :math:`c_(j-2) = -k_j/(k_1^j)` in units of DN^(1-j) (c.f.,
296 Eq. 37 of 2003.05978). The `config.polynomialOrder` or
297 `config.splineKnots` define the maximum order of :math:`x^j` to fit.
298 As :math:`k_0` and :math:`k_1` are degenerate with bias level and gain,
299 they are not included in the non-linearity correction.
300 """
301 if len(dummy) == 0:
302 self.log.warning("No dummy exposure found.")
304 detector = camera[inputDims['detector']]
305 if self.config.linearityType == 'LookupTable':
306 table = np.zeros((len(detector), self.config.maxLookupTableAdu), dtype=np.float32)
307 tableIndex = 0
308 else:
309 table = None
310 tableIndex = None # This will fail if we increment it.
312 if self.config.linearityType == 'Spline':
313 fitOrder = self.config.splineKnots
314 else:
315 fitOrder = self.config.polynomialOrder
317 # Initialize the linearizer.
318 linearizer = Linearizer(detector=detector, table=table, log=self.log)
319 linearizer.updateMetadataFromExposures([inputPtc])
320 if self.config.usePhotodiode:
321 # Compute the photodiode integrals once, outside the loop
322 # over amps.
323 monDiodeCharge = {}
324 for handle in inputPhotodiodeData:
325 expId = handle.dataId['exposure']
326 pd_calib = handle.get()
327 pd_calib.integrationMethod = self.config.photodiodeIntegrationMethod
328 pd_calib.currentScale = self.config.photodiodeCurrentScale
329 monDiodeCharge[expId] = pd_calib.integrate()[0]
330 if self.config.applyPhotodiodeCorrection:
331 abscissaCorrections = inputPhotodiodeCorrection.abscissaCorrections
333 for i, amp in enumerate(detector):
334 ampName = amp.getName()
335 if ampName in inputPtc.badAmps:
336 linearizer = self.fillBadAmp(linearizer, fitOrder, inputPtc, amp)
337 self.log.warning("Amp %s in detector %s has no usable PTC information. Skipping!",
338 ampName, detector.getName())
339 continue
341 if (len(inputPtc.expIdMask[ampName]) == 0) or self.config.ignorePtcMask:
342 self.log.warning("Mask not found for %s in detector %s in fit. Using all points.",
343 ampName, detector.getName())
344 mask = np.repeat(True, len(inputPtc.expIdMask[ampName]))
345 else:
346 mask = np.array(inputPtc.expIdMask[ampName], dtype=bool)
348 if self.config.usePhotodiode:
349 modExpTimes = []
350 for i, pair in enumerate(inputPtc.inputExpIdPairs[ampName]):
351 pair = pair[0]
352 modExpTime = 0.0
353 nExps = 0
354 for j in range(2):
355 expId = pair[j]
356 if expId in monDiodeCharge:
357 modExpTime += monDiodeCharge[expId]
358 nExps += 1
359 if nExps > 0:
360 modExpTime = modExpTime / nExps
361 else:
362 mask[i] = False
364 # Get the photodiode correction
365 if self.config.applyPhotodiodeCorrection:
366 try:
367 correction = abscissaCorrections[str(pair)]
368 except KeyError:
369 correction = 0.0
370 else:
371 correction = 0.0
372 modExpTimes.append(modExpTime + correction)
373 inputAbscissa = np.array(modExpTimes)[mask]
374 else:
375 inputAbscissa = np.array(inputPtc.rawExpTimes[ampName])[mask]
377 inputOrdinate = np.array(inputPtc.rawMeans[ampName])[mask]
378 # Determine proxy-to-linear-flux transformation
379 fluxMask = inputOrdinate < self.config.maxLinearAdu
380 lowMask = inputOrdinate > self.config.minLinearAdu
381 fluxMask = fluxMask & lowMask
382 linearAbscissa = inputAbscissa[fluxMask]
383 linearOrdinate = inputOrdinate[fluxMask]
384 if len(linearAbscissa) < 2:
385 linearizer = self.fillBadAmp(linearizer, fitOrder, inputPtc, amp)
386 self.log.warning("Amp %s in detector %s has not enough points for linear fit. Skipping!",
387 ampName, detector.getName())
388 continue
390 linearFit, linearFitErr, chiSq, weights = irlsFit([0.0, 100.0], linearAbscissa,
391 linearOrdinate, funcPolynomial)
392 # Convert this proxy-to-flux fit into an expected linear flux
393 linearOrdinate = linearFit[0] + linearFit[1] * inputAbscissa
394 # Exclude low end outliers
395 threshold = self.config.nSigmaClipLinear * np.sqrt(abs(linearOrdinate))
396 fluxMask = np.abs(inputOrdinate - linearOrdinate) < threshold
397 linearOrdinate = linearOrdinate[fluxMask]
398 fitOrdinate = inputOrdinate[fluxMask]
399 fitAbscissa = inputAbscissa[fluxMask]
400 if len(linearOrdinate) < 2:
401 linearizer = self.fillBadAmp(linearizer, fitOrder, inputPtc, amp)
402 self.log.warning("Amp %s in detector %s has not enough points in linear ordinate. Skipping!",
403 ampName, detector.getName())
404 continue
406 self.debugFit('linearFit', inputAbscissa, inputOrdinate, linearOrdinate, fluxMask, ampName)
407 # Do fits
408 if self.config.linearityType in ['Polynomial', 'Squared', 'LookupTable']:
409 polyFit = np.zeros(fitOrder + 1)
410 polyFit[1] = 1.0
411 polyFit, polyFitErr, chiSq, weights = irlsFit(polyFit, linearOrdinate,
412 fitOrdinate, funcPolynomial)
414 # Truncate the polynomial fit
415 k1 = polyFit[1]
416 linearityFit = [-coeff/(k1**order) for order, coeff in enumerate(polyFit)]
417 significant = np.where(np.abs(linearityFit) > 1e-10, True, False)
418 self.log.info("Significant polynomial fits: %s", significant)
420 modelOrdinate = funcPolynomial(polyFit, fitAbscissa)
422 self.debugFit('polyFit', linearAbscissa, fitOrdinate, modelOrdinate, None, ampName)
424 if self.config.linearityType == 'Squared':
425 linearityFit = [linearityFit[2]]
426 elif self.config.linearityType == 'LookupTable':
427 # Use linear part to get time at which signal is
428 # maxAduForLookupTableLinearizer DN
429 tMax = (self.config.maxLookupTableAdu - polyFit[0])/polyFit[1]
430 timeRange = np.linspace(0, tMax, self.config.maxLookupTableAdu)
431 signalIdeal = polyFit[0] + polyFit[1]*timeRange
432 signalUncorrected = funcPolynomial(polyFit, timeRange)
433 lookupTableRow = signalIdeal - signalUncorrected # LinearizerLookupTable has correction
435 linearizer.tableData[tableIndex, :] = lookupTableRow
436 linearityFit = [tableIndex, 0]
437 tableIndex += 1
438 elif self.config.linearityType in ['Spline']:
439 # See discussion in `lsst.ip.isr.linearize.py` before
440 # modifying.
441 numPerBin, binEdges = np.histogram(linearOrdinate, bins=fitOrder)
442 with np.errstate(invalid="ignore"):
443 # Algorithm note: With the counts of points per
444 # bin above, the next histogram calculates the
445 # values to put in each bin by weighting each
446 # point by the correction value.
447 values = np.histogram(linearOrdinate, bins=fitOrder,
448 weights=(inputOrdinate[fluxMask] - linearOrdinate))[0]/numPerBin
450 # After this is done, the binCenters are
451 # calculated by weighting by the value we're
452 # binning over. This ensures that widely
453 # spaced/poorly sampled data aren't assigned to
454 # the midpoint of the bin (as could be done using
455 # the binEdges above), but to the weighted mean of
456 # the inputs. Note that both histograms are
457 # scaled by the count per bin to normalize what
458 # the histogram returns (a sum of the points
459 # inside) into an average.
460 binCenters = np.histogram(linearOrdinate, bins=fitOrder,
461 weights=linearOrdinate)[0]/numPerBin
462 values = values[numPerBin > 0]
463 binCenters = binCenters[numPerBin > 0]
465 self.debugFit('splineFit', binCenters, np.abs(values), values, None, ampName)
466 # Anchor the spline to have zero correction at zero
467 # flux as well as at the lowest measured flux bin.
468 if np.any(np.array(binCenters) < 0):
469 raise ValueError("Linearity correction has negative flux values!")
471 if binCenters[0] != 0.0:
472 if values[0] != 0.0:
473 offset = values[0]
474 values -= offset
475 np.concatenate(([0.0], binCenters))
476 np.concatenate(([0.0], values))
478 interp = afwMath.makeInterpolate(binCenters.tolist(), values.tolist(),
479 afwMath.stringToInterpStyle("AKIMA_SPLINE"))
480 modelOrdinate = linearOrdinate + interp.interpolate(linearOrdinate)
481 self.debugFit('splineFit', linearOrdinate, fitOrdinate, modelOrdinate, None, ampName)
483 # If we exclude a lot of points, we may end up with
484 # less than fitOrder points. Pad out the low-flux end
485 # to ensure equal lengths.
486 if len(binCenters) != fitOrder:
487 padN = fitOrder - len(binCenters)
488 binCenters = np.pad(binCenters, (padN, 0), 'linear_ramp',
489 end_values=(binCenters.min() - 1.0, ))
490 # This stores the correction, which is zero at low values.
491 values = np.pad(values, (padN, 0))
493 # Pack the spline into a single array.
494 linearityFit = np.concatenate((binCenters.tolist(), values.tolist())).tolist()
495 polyFit = [0.0]
496 polyFitErr = [0.0]
497 chiSq = np.nan
498 else:
499 polyFit = [0.0]
500 polyFitErr = [0.0]
501 chiSq = np.nan
502 linearityFit = [0.0]
504 linearizer.linearityType[ampName] = self.config.linearityType
505 linearizer.linearityCoeffs[ampName] = np.array(linearityFit)
506 linearizer.linearityBBox[ampName] = amp.getBBox()
507 linearizer.fitParams[ampName] = np.array(polyFit)
508 linearizer.fitParamsErr[ampName] = np.array(polyFitErr)
509 linearizer.fitChiSq[ampName] = chiSq
510 linearizer.linearFit[ampName] = linearFit
511 residuals = fitOrdinate - modelOrdinate
513 # The residuals only include flux values which are
514 # not masked out. To be able to access this later and
515 # associate it with the PTC flux values, we need to
516 # fill out the residuals with NaNs where the flux
517 # value is masked.
519 # First convert mask to a composite of the two masks:
520 mask[mask] = fluxMask
521 fullResiduals = np.full(len(mask), np.nan)
522 fullResiduals[mask] = residuals
523 linearizer.fitResiduals[ampName] = fullResiduals
524 image = afwImage.ImageF(len(inputOrdinate), 1)
525 image.getArray()[:, :] = inputOrdinate
526 linearizeFunction = linearizer.getLinearityTypeByName(linearizer.linearityType[ampName])
527 linearizeFunction()(image,
528 **{'coeffs': linearizer.linearityCoeffs[ampName],
529 'table': linearizer.tableData,
530 'log': linearizer.log})
531 linearizeModel = image.getArray()[0, :]
533 self.debugFit('solution', inputOrdinate[fluxMask], linearOrdinate,
534 linearizeModel[fluxMask], None, ampName)
536 linearizer.hasLinearity = True
537 linearizer.validate()
538 linearizer.updateMetadata(camera=camera, detector=detector, filterName='NONE')
539 linearizer.updateMetadata(setDate=True, setCalibId=True)
540 provenance = IsrProvenance(calibType='linearizer')
542 return pipeBase.Struct(
543 outputLinearizer=linearizer,
544 outputProvenance=provenance,
545 )
547 def fillBadAmp(self, linearizer, fitOrder, inputPtc, amp):
548 # Need to fill linearizer with empty values
549 # if the amp is non-functional
550 ampName = amp.getName()
551 nEntries = 1
552 pEntries = 1
553 if self.config.linearityType in ['Polynomial']:
554 nEntries = fitOrder + 1
555 pEntries = fitOrder + 1
556 elif self.config.linearityType in ['Spline']:
557 nEntries = fitOrder * 2
558 elif self.config.linearityType in ['Squared', 'None']:
559 nEntries = 1
560 pEntries = fitOrder + 1
561 elif self.config.linearityType in ['LookupTable']:
562 nEntries = 2
563 pEntries = fitOrder + 1
565 linearizer.linearityType[ampName] = "None"
566 linearizer.linearityCoeffs[ampName] = np.zeros(nEntries)
567 linearizer.linearityBBox[ampName] = amp.getBBox()
568 linearizer.fitParams[ampName] = np.zeros(pEntries)
569 linearizer.fitParamsErr[ampName] = np.zeros(pEntries)
570 linearizer.fitChiSq[ampName] = np.nan
571 linearizer.fitResiduals[ampName] = np.zeros(len(inputPtc.expIdMask[ampName]))
572 linearizer.linearFit[ampName] = np.zeros(2)
573 return linearizer
575 def debugFit(self, stepname, xVector, yVector, yModel, mask, ampName):
576 """Debug method for linearity fitting.
578 Parameters
579 ----------
580 stepname : `str`
581 A label to use to check if we care to debug at a given
582 line of code.
583 xVector : `numpy.array`, (N,)
584 The values to use as the independent variable in the
585 linearity fit.
586 yVector : `numpy.array`, (N,)
587 The values to use as the dependent variable in the
588 linearity fit.
589 yModel : `numpy.array`, (N,)
590 The values to use as the linearized result.
591 mask : `numpy.array` [`bool`], (N,) , optional
592 A mask to indicate which entries of ``xVector`` and
593 ``yVector`` to keep.
594 ampName : `str`
595 Amplifier name to lookup linearity correction values.
596 """
597 frame = getDebugFrame(self._display, stepname)
598 if frame:
599 import matplotlib.pyplot as plt
600 fig, axs = plt.subplots(2)
602 if mask is None:
603 mask = np.ones_like(xVector, dtype=bool)
605 fig.suptitle(f"{stepname} {ampName} {self.config.linearityType}")
606 if stepname == 'linearFit':
607 axs[0].set_xlabel("Input Abscissa (time or mondiode)")
608 axs[0].set_ylabel("Input Ordinate (flux)")
609 axs[1].set_xlabel("Linear Ordinate (linear flux)")
610 axs[1].set_ylabel("Flux Difference: (input - linear)")
611 elif stepname in ('polyFit', 'splineFit'):
612 axs[0].set_xlabel("Linear Abscissa (linear flux)")
613 axs[0].set_ylabel("Input Ordinate (flux)")
614 axs[1].set_xlabel("Linear Ordinate (linear flux)")
615 axs[1].set_ylabel("Flux Difference: (input - full model fit)")
616 elif stepname == 'solution':
617 axs[0].set_xlabel("Input Abscissa (time or mondiode)")
618 axs[0].set_ylabel("Linear Ordinate (linear flux)")
619 axs[1].set_xlabel("Model flux (linear flux)")
620 axs[1].set_ylabel("Flux Difference: (linear - model)")
622 axs[0].set_yscale('log')
623 axs[0].set_xscale('log')
624 axs[0].scatter(xVector, yVector)
625 axs[0].scatter(xVector[~mask], yVector[~mask], c='red', marker='x')
626 axs[1].set_xscale('log')
628 axs[1].scatter(yModel, yVector[mask] - yModel)
629 fig.show()
631 prompt = "Press Enter or c to continue [chpx]..."
632 while True:
633 ans = input(prompt).lower()
634 if ans in ("", " ", "c",):
635 break
636 elif ans in ("p", ):
637 import pdb
638 pdb.set_trace()
639 elif ans in ("h", ):
640 print("[h]elp [c]ontinue [p]db")
641 elif ans in ('x', ):
642 exit()
643 plt.close()