Coverage for python / lsst / cp / pipe / cpLinearitySolve.py: 10%

856 statements  

« prev     ^ index     » next       coverage.py v7.13.5, created at 2026-04-22 09:07 +0000

1# This file is part of cp_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21# 

22 

23__all__ = [ 

24 "LinearitySolveTask", 

25 "LinearitySolveConfig", 

26 "LinearityDoubleSplineSolveTask", 

27 "LinearityDoubleSplineSolveConfig", 

28] 

29 

30import copy 

31import logging 

32import numpy as np 

33import esutil 

34from scipy.stats import median_abs_deviation 

35from scipy.interpolate import Akima1DInterpolator 

36 

37import lsst.afw.image as afwImage 

38import lsst.pipe.base as pipeBase 

39import lsst.pipe.base.connectionTypes as cT 

40import lsst.pex.config as pexConfig 

41 

42from lsstDebug import getDebugFrame 

43from lsst.ip.isr import (Linearizer, IsrProvenance) 

44 

45from .utils import (funcPolynomial, irlsFit, AstierSplineLinearityFitter, 

46 extractCalibDate) 

47 

48 

49def ptcLookup(datasetType, registry, quantumDataId, collections): 

50 """Butler lookup function to allow PTC to be found. 

51 

52 Parameters 

53 ---------- 

54 datasetType : `lsst.daf.butler.DatasetType` 

55 Dataset type to look up. 

56 registry : `lsst.daf.butler.Registry` 

57 Registry for the data repository being searched. 

58 quantumDataId : `lsst.daf.butler.DataCoordinate` 

59 Data ID for the quantum of the task this dataset will be passed to. 

60 This must include an "instrument" key, and should also include any 

61 keys that are present in ``datasetType.dimensions``. If it has an 

62 ``exposure`` or ``visit`` key, that's a sign that this function is 

63 not actually needed, as those come with the temporal information that 

64 would allow a real validity-range lookup. 

65 collections : `lsst.daf.butler.registry.CollectionSearch` 

66 Collections passed by the user when generating a QuantumGraph. Ignored 

67 by this function (see notes below). 

68 

69 Returns 

70 ------- 

71 refs : `list` [ `DatasetRef` ] 

72 A zero- or single-element list containing the matching 

73 dataset, if one was found. 

74 

75 Raises 

76 ------ 

77 RuntimeError 

78 Raised if more than one PTC reference is found. 

79 """ 

80 refs = list(registry.queryDatasets(datasetType, dataId=quantumDataId, collections=collections, 

81 findFirst=False)) 

82 if len(refs) >= 2: 

83 RuntimeError("Too many PTC connections found. Incorrect collections supplied?") 

84 

85 return refs 

86 

87 

88class LinearitySolveConnections(pipeBase.PipelineTaskConnections, 

89 dimensions=("instrument", "detector")): 

90 dummy = cT.Input( 

91 name="raw", 

92 doc="Dummy exposure.", 

93 storageClass='Exposure', 

94 dimensions=("instrument", "exposure", "detector"), 

95 multiple=True, 

96 deferLoad=True, 

97 ) 

98 

99 camera = cT.PrerequisiteInput( 

100 name="camera", 

101 doc="Camera Geometry definition.", 

102 storageClass="Camera", 

103 dimensions=("instrument", ), 

104 isCalibration=True, 

105 ) 

106 

107 inputPtc = cT.PrerequisiteInput( 

108 name="ptc", 

109 doc="Input PTC dataset.", 

110 storageClass="PhotonTransferCurveDataset", 

111 dimensions=("instrument", "detector"), 

112 isCalibration=True, 

113 lookupFunction=ptcLookup, 

114 ) 

115 

116 inputLinearizerPtc = cT.Input( 

117 name="linearizerPtc", 

118 doc="Input linearizer PTC dataset.", 

119 storageClass="PhotonTransferCurveDataset", 

120 dimensions=("instrument", "detector"), 

121 isCalibration=True, 

122 ) 

123 

124 inputPhotodiodeCorrection = cT.Input( 

125 name="pdCorrection", 

126 doc="Input photodiode correction.", 

127 storageClass="IsrCalib", 

128 dimensions=("instrument", ), 

129 isCalibration=True, 

130 ) 

131 

132 inputNormalization = cT.Input( 

133 name="cpLinearizerPtcNormalization", 

134 doc="Focal-plane normalization table.", 

135 storageClass="ArrowAstropy", 

136 dimensions=("instrument",), 

137 isCalibration=True, 

138 ) 

139 

140 outputLinearizer = cT.Output( 

141 name="linearity", 

142 doc="Output linearity measurements.", 

143 storageClass="Linearizer", 

144 dimensions=("instrument", "detector"), 

145 isCalibration=True, 

146 ) 

147 

148 def __init__(self, *, config=None): 

149 super().__init__(config=config) 

150 

151 if not config.applyPhotodiodeCorrection: 

152 del self.inputPhotodiodeCorrection 

153 

154 if config.useLinearizerPtc: 

155 del self.inputPtc 

156 else: 

157 del self.inputLinearizerPtc 

158 

159 if not config.useFocalPlaneNormalization: 

160 del self.inputNormalization 

161 

162 

163class LinearitySolveConfig(pipeBase.PipelineTaskConfig, 

164 pipelineConnections=LinearitySolveConnections): 

165 """Configuration for solving the linearity from PTC dataset. 

166 """ 

167 linearityType = pexConfig.ChoiceField( 

168 dtype=str, 

169 doc="Type of linearizer to construct.", 

170 default="Squared", 

171 allowed={ 

172 "LookupTable": "Create a lookup table solution.", 

173 "Polynomial": "Create an arbitrary polynomial solution.", 

174 "Squared": "Create a single order squared solution.", 

175 "Spline": "Create a spline based solution.", 

176 "None": "Create a dummy solution.", 

177 } 

178 ) 

179 polynomialOrder = pexConfig.RangeField( 

180 dtype=int, 

181 doc="Degree of polynomial to fit. Must be at least 2.", 

182 default=3, 

183 min=2, 

184 ) 

185 splineKnots = pexConfig.Field( 

186 dtype=int, 

187 doc="Number of spline knots to use in fit.", 

188 default=10, 

189 ) 

190 

191 trimmedState = pexConfig.Field( 

192 dtype=bool, 

193 doc="Will this linearizer be used on trimmed data?", 

194 default=True, 

195 ) 

196 

197 maxLookupTableAdu = pexConfig.Field( 

198 dtype=int, 

199 doc="Maximum DN value for a LookupTable linearizer.", 

200 default=2**18, 

201 ) 

202 maxLinearAdu = pexConfig.Field( 

203 dtype=float, 

204 doc="Maximum adu value to use to estimate linear term; not used with spline fits.", 

205 default=20000.0, 

206 ) 

207 minLinearAdu = pexConfig.Field( 

208 dtype=float, 

209 doc="Minimum adu value to use to estimate linear term.", 

210 default=30.0, 

211 ) 

212 nSigmaClipLinear = pexConfig.Field( 

213 dtype=float, 

214 doc="Maximum deviation from linear solution for Poissonian noise.", 

215 default=5.0, 

216 ) 

217 ignorePtcMask = pexConfig.Field( 

218 dtype=bool, 

219 doc="Ignore the expIdMask set by the PTC solver?", 

220 default=False, 

221 deprecated="This field is no longer used. Will be removed after v28.", 

222 ) 

223 maxFracLinearityDeviation = pexConfig.Field( 

224 dtype=float, 

225 doc="Maximum fraction deviation from raw linearity to compute " 

226 "linearityTurnoff and linearityMaxSignal.", 

227 # TODO: DM-46811 investigate if this can be raised to 0.05. 

228 default=0.01, 

229 ) 

230 minSignalFitLinearityTurnoff = pexConfig.Field( 

231 dtype=float, 

232 doc="Minimum signal to compute raw linearity slope for linearityTurnoff.", 

233 default=1000.0, 

234 ) 

235 usePhotodiode = pexConfig.Field( 

236 dtype=bool, 

237 doc="Use the photodiode info instead of the raw expTimes?", 

238 default=False, 

239 ) 

240 applyPhotodiodeCorrection = pexConfig.Field( 

241 dtype=bool, 

242 doc="Calculate and apply a correction to the photodiode readings?", 

243 default=False, 

244 ) 

245 minPhotodiodeCurrent = pexConfig.Field( 

246 dtype=float, 

247 doc="Minimum value to trust photodiode signals.", 

248 default=0.0, 

249 ) 

250 doAutoGrouping = pexConfig.Field( 

251 dtype=bool, 

252 doc="Do automatic group detection? Cannot be True if splineGroupingColumn is also set. " 

253 "The automatic group detection will use the ratio of signal to exposure time (if " 

254 "autoGroupingUseExptime is True) or photodiode (if False) to determine which " 

255 "flat pairs were taken with different illumination settings.", 

256 default=False, 

257 ) 

258 autoGroupingUseExptime = pexConfig.Field( 

259 dtype=bool, 

260 doc="Use exposure time to determine automatic grouping. Used if doAutoGrouping=True.", 

261 default=True, 

262 ) 

263 autoGroupingThreshold = pexConfig.Field( 

264 dtype=float, 

265 doc="Minimum relative jump from sorted conversion values to determine a group.", 

266 default=0.1, 

267 ) 

268 autoGroupingMaxSignalFraction = pexConfig.Field( 

269 dtype=float, 

270 doc="Only do auto-grouping when the signal is this fraction of the maximum signal. " 

271 "All exposures with signal higher than this threshold will be put into the " 

272 "largest signal group. This config is needed if the input PTC goes beyond " 

273 "the linearity turnoff.", 

274 default=0.9, 

275 ) 

276 splineGroupingColumn = pexConfig.Field( 

277 dtype=str, 

278 doc="Column to use for grouping together points for Spline mode, to allow " 

279 "for different proportionality constants. If None, then grouping will " 

280 "only be done if doAutoGrouping is True.", 

281 default=None, 

282 optional=True, 

283 ) 

284 splineGroupingMinPoints = pexConfig.Field( 

285 dtype=int, 

286 doc="Minimum number of linearity points to allow grouping together points " 

287 "for Spline mode with splineGroupingColumn. This configuration is here " 

288 "to prevent misuse of the Spline code to avoid over-fitting.", 

289 default=100, 

290 ) 

291 splineFitMinIter = pexConfig.Field( 

292 dtype=int, 

293 doc="Minimum number of iterations for spline fit.", 

294 default=3, 

295 ) 

296 splineFitMaxIter = pexConfig.Field( 

297 dtype=int, 

298 doc="Maximum number of iterations for spline fit.", 

299 default=20, 

300 ) 

301 splineFitMaxRejectionPerIteration = pexConfig.Field( 

302 dtype=int, 

303 doc="Maximum number of rejections per iteration for spline fit.", 

304 default=5, 

305 ) 

306 doSplineFitOffset = pexConfig.Field( 

307 dtype=bool, 

308 doc="Fit a scattered light offset in the spline fit.", 

309 default=True, 

310 ) 

311 doSplineFitWeights = pexConfig.Field( 

312 dtype=bool, 

313 doc="Fit linearity weight parameters in the spline fit.", 

314 default=False, 

315 ) 

316 splineFitWeightParsStart = pexConfig.ListField( 

317 dtype=float, 

318 doc="Starting parameters for weight fit, if doSplineFitWeights=True. " 

319 "Parameters are such that sigma = sqrt(par[0]**2. + par[1]**2./mu)." 

320 "If doSplineFitWeights=False then these are used as-is; otherwise " 

321 "they are used as the initial values for fitting these parameters.", 

322 length=2, 

323 default=[1.0, 0.0], 

324 ) 

325 doSplineFitTemperature = pexConfig.Field( 

326 dtype=bool, 

327 doc="Fit temperature coefficient in spline fit?", 

328 default=False, 

329 ) 

330 splineFitTemperatureColumn = pexConfig.Field( 

331 dtype=str, 

332 doc="Name of the temperature column to use when fitting temperature " 

333 "coefficients in spline fit; this must not be None if " 

334 "doSplineFitTemperature is True.", 

335 default=None, 

336 optional=True, 

337 ) 

338 doSplineFitTemporal = pexConfig.Field( 

339 dtype=bool, 

340 doc="Fit a linear temporal parameter coefficient in spline fit?", 

341 default=False, 

342 ) 

343 useLinearizerPtc = pexConfig.Field( 

344 dtype=bool, 

345 doc="Use a linearizer ptc in a single pipeline?", 

346 default=False, 

347 ) 

348 useFocalPlaneNormalization = pexConfig.Field( 

349 dtype=bool, 

350 doc="Use focal-plane normalization in addition to/instead of photodiode? " 

351 "(Only used with spline fitting).", 

352 default=False, 

353 ) 

354 

355 def validate(self): 

356 super().validate() 

357 

358 if self.doSplineFitTemperature and self.splineFitTemperatureColumn is None: 

359 raise ValueError("Must set splineFitTemperatureColumn if doSplineFitTemperature is True.") 

360 

361 if self.doAutoGrouping and self.splineGroupingColumn is not None: 

362 raise ValueError("Must not set doAutoGrouping and also splineGroupingColumn") 

363 if self.doAutoGrouping: 

364 if not self.autoGroupingUseExptime and not self.usePhotodiode: 

365 raise ValueError("If doAutoGrouping is True and autoGroupingUseExptime is False, then " 

366 "usePhotodiode must be True.") 

367 

368 

369class LinearitySolveTask(pipeBase.PipelineTask): 

370 """Fit the linearity from the PTC dataset. 

371 """ 

372 

373 ConfigClass = LinearitySolveConfig 

374 _DefaultName = 'cpLinearitySolve' 

375 

376 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

377 """Ensure that the input and output dimensions are passed along. 

378 

379 Parameters 

380 ---------- 

381 butlerQC : `lsst.daf.butler.QuantumContext` 

382 Butler to operate on. 

383 inputRefs : `lsst.pipe.base.InputQuantizedConnection` 

384 Input data refs to load. 

385 ouptutRefs : `lsst.pipe.base.OutputQuantizedConnection` 

386 Output data refs to persist. 

387 """ 

388 inputs = butlerQC.get(inputRefs) 

389 

390 # Use the dimensions to set calib/provenance information. 

391 

392 if self.config.useLinearizerPtc: 

393 inputs["inputDims"] = dict(inputRefs.inputLinearizerPtc.dataId.required) 

394 inputs["inputPtc"] = inputs["inputLinearizerPtc"] 

395 del inputs["inputLinearizerPtc"] 

396 else: 

397 inputs["inputDims"] = dict(inputRefs.inputPtc.dataId.required) 

398 

399 # Add calibration provenance info to header. 

400 kwargs = dict() 

401 reference = getattr(inputRefs, "inputPtc", None) 

402 

403 if reference is not None and hasattr(reference, "run"): 

404 runKey = "PTC_RUN" 

405 runValue = reference.run 

406 idKey = "PTC_UUID" 

407 idValue = str(reference.id) 

408 dateKey = "PTC_DATE" 

409 calib = inputs.get("inputPtc", None) 

410 dateValue = extractCalibDate(calib) 

411 

412 kwargs[runKey] = runValue 

413 kwargs[idKey] = idValue 

414 kwargs[dateKey] = dateValue 

415 

416 self.log.info("Using " + str(reference.run)) 

417 

418 outputs = self.run(**inputs) 

419 outputs.outputLinearizer.updateMetadata(setDate=False, **kwargs) 

420 

421 butlerQC.put(outputs, outputRefs) 

422 

423 def run(self, inputPtc, dummy, camera, inputDims, 

424 inputPhotodiodeCorrection=None, inputNormalization=None): 

425 """Fit non-linearity to PTC data, returning the correct Linearizer 

426 object. 

427 

428 Parameters 

429 ---------- 

430 inputPtc : `lsst.ip.isr.PtcDataset` 

431 Pre-measured PTC dataset. 

432 dummy : `lsst.afw.image.Exposure` 

433 The exposure used to select the appropriate PTC dataset. 

434 In almost all circumstances, one of the input exposures 

435 used to generate the PTC dataset is the best option. 

436 camera : `lsst.afw.cameraGeom.Camera` 

437 Camera geometry. 

438 inputDims : `lsst.daf.butler.DataCoordinate` or `dict` 

439 DataIds to use to populate the output calibration. 

440 inputPhotodiodeCorrection : 

441 `lsst.ip.isr.PhotodiodeCorrection`, optional 

442 Pre-measured photodiode correction used in the case when 

443 applyPhotodiodeCorrection=True. 

444 inputNormalization : `astropy.table.Table`, optional 

445 Focal plane normalization table to use if 

446 useFocalPlaneNormalization is True. 

447 

448 Returns 

449 ------- 

450 results : `lsst.pipe.base.Struct` 

451 The results struct containing: 

452 

453 ``outputLinearizer`` 

454 Final linearizer calibration (`lsst.ip.isr.Linearizer`). 

455 ``outputProvenance`` 

456 Provenance data for the new calibration 

457 (`lsst.ip.isr.IsrProvenance`). 

458 

459 Notes 

460 ----- 

461 This task currently fits only polynomial-defined corrections, 

462 where the correction coefficients are defined such that: 

463 :math:`corrImage = uncorrImage + \\sum_i c_i uncorrImage^(2 + i)` 

464 These :math:`c_i` are defined in terms of the direct polynomial fit: 

465 :math:`meanVector ~ P(x=timeVector) = \\sum_j k_j x^j` 

466 such that :math:`c_(j-2) = -k_j/(k_1^j)` in units of DN^(1-j) (c.f., 

467 Eq. 37 of 2003.05978). The `config.polynomialOrder` or 

468 `config.splineKnots` define the maximum order of :math:`x^j` to fit. 

469 As :math:`k_0` and :math:`k_1` are degenerate with bias level and gain, 

470 they are not included in the non-linearity correction. 

471 """ 

472 if len(dummy) == 0: 

473 self.log.warning("No dummy exposure found.") 

474 

475 detector = camera[inputDims['detector']] 

476 if self.config.linearityType == 'LookupTable': 

477 table = np.zeros((len(detector), self.config.maxLookupTableAdu), dtype=np.float32) 

478 tableIndex = 0 

479 else: 

480 table = None 

481 tableIndex = None # This will fail if we increment it. 

482 

483 # Initialize the linearizer. 

484 linearizer = Linearizer(detector=detector, table=table, log=self.log) 

485 linearizer.updateMetadataFromExposures([inputPtc]) 

486 if self.config.usePhotodiode and self.config.applyPhotodiodeCorrection: 

487 abscissaCorrections = inputPhotodiodeCorrection.abscissaCorrections 

488 

489 groupingValues = _determineInputGroups( 

490 inputPtc, 

491 self.config.doAutoGrouping, 

492 self.config.autoGroupingUseExptime, 

493 self.config.autoGroupingMaxSignalFraction, 

494 self.config.autoGroupingThreshold, 

495 self.config.splineGroupingColumn, 

496 self.config.minPhotodiodeCurrent, 

497 ) 

498 

499 if self.config.linearityType == 'Spline': 

500 if self.config.doSplineFitTemperature: 

501 if self.config.splineFitTemperatureColumn not in inputPtc.auxValues: 

502 raise ValueError("Config requests fitting temperature coefficient for " 

503 f"{self.config.splineFitTemperatureColumn} but this column " 

504 "is not available in inputPtc.auxValues.") 

505 temperatureValues = inputPtc.auxValues[self.config.splineFitTemperatureColumn] 

506 else: 

507 temperatureValues = None 

508 

509 # We set this to have a value to fill the bad amps. 

510 fitOrder = self.config.splineKnots 

511 else: 

512 fitOrder = self.config.polynomialOrder 

513 

514 for i, amp in enumerate(detector): 

515 ampName = amp.getName() 

516 

517 # Save the input gains 

518 linearizer.inputGain[ampName] = inputPtc.gain[ampName] 

519 

520 if ampName in inputPtc.badAmps: 

521 linearizer = self.fillBadAmp(linearizer, fitOrder, inputPtc, amp) 

522 self.log.warning("Amp %s in detector %s has no usable PTC information. Skipping!", 

523 ampName, detector.getName()) 

524 continue 

525 

526 # Check for too few points. 

527 if self.config.linearityType == "Spline" \ 

528 and self.config.splineGroupingColumn is not None \ 

529 and len(inputPtc.inputExpIdPairs[ampName]) < self.config.splineGroupingMinPoints: 

530 raise RuntimeError( 

531 "The input PTC has too few points to reliably run with PD grouping. " 

532 "The recommended course of action is to set splineGroupingColumn to None. " 

533 "If you really know what you are doing, you may reduce " 

534 "config.splineGroupingMinPoints.") 

535 

536 # We start with all finite values. 

537 mask = np.isfinite(inputPtc.rawMeans[ampName]) 

538 

539 if self.config.linearityType == "Spline" and temperatureValues is not None: 

540 mask &= np.isfinite(temperatureValues) 

541 

542 if self.config.usePhotodiode: 

543 modExpTimes = inputPtc.photoCharges[ampName].copy() 

544 # Make sure any exposure pairs that do not have photodiode data 

545 # are masked. 

546 mask[~np.isfinite(modExpTimes)] = False 

547 

548 # Make sure any photodiode measurements below the configured 

549 # minimum are masked. 

550 mask[modExpTimes < self.config.minPhotodiodeCurrent] = False 

551 

552 # Get the photodiode correction. 

553 if self.config.applyPhotodiodeCorrection: 

554 for j, pair in enumerate(inputPtc.inputExpIdPairs[ampName]): 

555 try: 

556 correction = abscissaCorrections[str(pair)] 

557 except KeyError: 

558 correction = 0.0 

559 modExpTimes[j] += correction 

560 

561 inputAbscissa = modExpTimes 

562 else: 

563 inputAbscissa = inputPtc.rawExpTimes[ampName].copy() 

564 

565 # Normalize if configured. 

566 inputNorm = np.ones_like(inputAbscissa) 

567 if self.config.useFocalPlaneNormalization: 

568 exposures = np.asarray(inputPtc.inputExpIdPairs[ampName])[:, 0] 

569 a, b = esutil.numpy_util.match(exposures, inputNormalization["exposure"]) 

570 inputNorm[a] = inputNormalization["normalization"][b] 

571 inputAbscissa *= inputNorm 

572 

573 # Compute linearityTurnoff and linearitySignalMax. 

574 turnoffMask = inputPtc.expIdMask[ampName].copy() 

575 turnoffMask &= mask 

576 

577 turnoffIndex, turnoff, maxSignal, _ = _computeTurnoffAndMax( 

578 inputAbscissa, 

579 inputPtc.rawMeans[ampName], 

580 turnoffMask, 

581 groupingValues, 

582 ampName=ampName, 

583 minSignalFitLinearityTurnoff=self.config.minSignalFitLinearityTurnoff, 

584 maxFracLinearityDeviation=self.config.maxFracLinearityDeviation, 

585 log=self.log, 

586 ) 

587 

588 if np.isnan(turnoff): 

589 # This is a bad amp, with no linearizer. 

590 linearizer = self.fillBadAmp(linearizer, fitOrder, inputPtc, amp) 

591 self.log.warning("Amp %s in detector %s has no usable linearizer information. Skipping!", 

592 ampName, detector.getName()) 

593 continue 

594 

595 linearizer.linearityTurnoff[ampName] = turnoff 

596 linearizer.linearityMaxSignal[ampName] = maxSignal 

597 

598 inputOrdinate = inputPtc.rawMeans[ampName].copy() 

599 

600 linearizer.inputAbscissa[ampName] = inputAbscissa.copy() 

601 linearizer.inputOrdinate[ampName] = inputOrdinate.copy() 

602 linearizer.inputGroupingIndex[ampName] = groupingValues.copy() 

603 linearizer.inputNormalization[ampName] = inputNorm.copy() 

604 

605 if self.config.linearityType != 'Spline': 

606 mask &= (inputOrdinate < self.config.maxLinearAdu) 

607 else: 

608 # For spline fits, cut above the turnoff. 

609 self.log.info("Using linearityTurnoff of %.4f adu for amplifier %s", turnoff, ampName) 

610 extraMask = np.ones(len(inputOrdinate), dtype=bool) 

611 extraMask[turnoffIndex + 1:] = False 

612 mask &= extraMask 

613 

614 mask &= (inputOrdinate > self.config.minLinearAdu) 

615 

616 # Initial value for the mask. 

617 linearizer.inputMask[ampName] = mask.copy() 

618 

619 if mask.sum() < 2: 

620 linearizer = self.fillBadAmp(linearizer, fitOrder, inputPtc, amp) 

621 self.log.warning("Amp %s in detector %s has not enough points for fit. Skipping!", 

622 ampName, detector.getName()) 

623 continue 

624 

625 if self.config.linearityType != 'Spline': 

626 linearFit, linearFitErr, chiSq, weights = irlsFit([0.0, 100.0], inputAbscissa[mask], 

627 inputOrdinate[mask], funcPolynomial) 

628 

629 # Convert this proxy-to-flux fit into an expected linear flux 

630 linearOrdinate = linearFit[0] + linearFit[1] * inputAbscissa 

631 # Exclude low end outliers. 

632 # This is compared to the original values. 

633 threshold = self.config.nSigmaClipLinear * np.sqrt(abs(inputOrdinate)) 

634 

635 mask[np.abs(inputOrdinate - linearOrdinate) >= threshold] = False 

636 

637 linearizer.inputMask[ampName] = mask.copy() 

638 

639 if mask.sum() < 2: 

640 linearizer = self.fillBadAmp(linearizer, fitOrder, inputPtc, amp) 

641 self.log.warning("Amp %s in detector %s has not enough points in linear ordinate. " 

642 "Skipping!", ampName, detector.getName()) 

643 continue 

644 

645 self.debugFit('linearFit', inputAbscissa, inputOrdinate, linearOrdinate, mask, ampName) 

646 

647 # Do fits 

648 if self.config.linearityType in ['Polynomial', 'Squared', 'LookupTable']: 

649 polyFit = np.zeros(fitOrder + 1) 

650 polyFit[1] = 1.0 

651 polyFit, polyFitErr, chiSq, weights = irlsFit(polyFit, linearOrdinate[mask], 

652 inputOrdinate[mask], funcPolynomial) 

653 

654 # Truncate the polynomial fit to the squared term. 

655 k1 = polyFit[1] 

656 linearityCoeffs = np.array( 

657 [-coeff/(k1**order) for order, coeff in enumerate(polyFit)] 

658 )[2:] 

659 significant = np.where(np.abs(linearityCoeffs) > 1e-10) 

660 self.log.info("Significant polynomial fits: %s", significant) 

661 

662 modelOrdinate = funcPolynomial(polyFit, linearOrdinate) 

663 

664 self.debugFit( 

665 'polyFit', 

666 inputAbscissa[mask], 

667 inputOrdinate[mask], 

668 modelOrdinate[mask], 

669 None, 

670 ampName, 

671 ) 

672 

673 if self.config.linearityType == 'Squared': 

674 # The first term is the squared term. 

675 linearityCoeffs = linearityCoeffs[0: 1] 

676 elif self.config.linearityType == 'LookupTable': 

677 # Use linear part to get time at which signal is 

678 # maxAduForLookupTableLinearizer DN 

679 tMax = (self.config.maxLookupTableAdu - polyFit[0])/polyFit[1] 

680 timeRange = np.linspace(0, tMax, self.config.maxLookupTableAdu) 

681 signalIdeal = polyFit[0] + polyFit[1]*timeRange 

682 signalUncorrected = funcPolynomial(polyFit, timeRange) 

683 lookupTableRow = signalIdeal - signalUncorrected # LinearizerLookupTable has correction 

684 

685 linearizer.tableData[tableIndex, :] = lookupTableRow 

686 linearityCoeffs = np.array([tableIndex, 0]) 

687 tableIndex += 1 

688 elif self.config.linearityType in ['Spline']: 

689 # This is a spline fit with photodiode data based on a model 

690 # from Pierre Astier. 

691 # This model fits a spline with (optional) nuisance parameters 

692 # to allow for different linearity coefficients with different 

693 # photodiode settings. The minimization is a least-squares 

694 # fit with the residual of 

695 # Sum[(S(mu_i) + mu_i - O)/(k_j * D_i) - 1]**2, where S(mu_i) 

696 # is an Akima Spline function of mu_i, the observed flat-pair 

697 # mean; D_j is the photo-diode measurement corresponding to 

698 # that flat-pair; and k_j is a constant of proportionality 

699 # which is over index j as it is allowed to 

700 # be different based on different photodiode settings (e.g. 

701 # CCOBCURR); and O is a constant offset to allow for light 

702 # leaks (and is only fit if doSplineFitOffset=True). In 

703 # addition, if config.doSplineFitTemperature is True then 

704 # the fit will adjust mu such that 

705 # mu = mu_input*(1 + alpha*(T - T_ref)) 

706 # and T_ref is taken as the median temperature of the run. 

707 # Finally, if config.doSplineFitTemporal is True then the 

708 # fit will further adjust mu such that 

709 # mu = mu_input*(1 + beta*(mjd - mjd_ref)) 

710 # and mjd_ref is taken as the median mjd of the run. 

711 # Note that this fit is only valid if the input data 

712 # was taken with a randomly shuffled order of exposure 

713 # levels. 

714 

715 # The fit has additional constraints to ensure that the spline 

716 # goes through the (0, 0) point, as well as a normalization 

717 # condition so that the average of the spline over the full 

718 # range is 0. The normalization ensures that the spline only 

719 # fits deviations from linearity, rather than the linear 

720 # function itself which is degenerate with the gain. 

721 

722 # We want to make sure the top node is above the top value 

723 # to avoid edge issues with the top point. 

724 nodes = np.linspace(0.0, np.max(inputOrdinate[mask]) + 1.0, self.config.splineKnots) 

725 

726 if temperatureValues is not None: 

727 temperatureValuesScaled = temperatureValues - np.median(temperatureValues[mask]) 

728 else: 

729 temperatureValuesScaled = None 

730 

731 if self.config.doSplineFitTemporal: 

732 inputMjdScaled = inputPtc.inputExpPairMjdStartList[ampName].copy() 

733 inputMjdScaled -= np.nanmedian(inputMjdScaled) 

734 else: 

735 inputMjdScaled = None 

736 

737 fitter = AstierSplineLinearityFitter( 

738 nodes, 

739 groupingValues, 

740 inputAbscissa, 

741 inputOrdinate, 

742 mask=mask, 

743 log=self.log, 

744 fit_offset=self.config.doSplineFitOffset, 

745 fit_weights=self.config.doSplineFitWeights, 

746 weight_pars_start=self.config.splineFitWeightParsStart, 

747 fit_temperature=self.config.doSplineFitTemperature, 

748 temperature_scaled=temperatureValuesScaled, 

749 max_signal_nearly_linear=inputPtc.ptcTurnoff[ampName], 

750 fit_temporal=self.config.doSplineFitTemporal, 

751 mjd_scaled=inputMjdScaled, 

752 ) 

753 p0 = fitter.estimate_p0() 

754 pars = fitter.fit( 

755 p0, 

756 min_iter=self.config.splineFitMinIter, 

757 max_iter=self.config.splineFitMaxIter, 

758 max_rejection_per_iteration=self.config.splineFitMaxRejectionPerIteration, 

759 n_sigma_clip=self.config.nSigmaClipLinear, 

760 ) 

761 

762 # Confirm that the first parameter is 0, and set it to 

763 # exactly zero. 

764 if not np.isclose(pars[0], 0): 

765 raise RuntimeError("Programmer error! First spline parameter must " 

766 "be consistent with zero.") 

767 pars[0] = 0.0 

768 

769 linearityChisq = fitter.compute_chisq_dof(pars) 

770 

771 linearityCoeffs = np.concatenate([nodes, pars[fitter.par_indices["values"]]]) 

772 linearFit = np.array([0.0, np.mean(pars[fitter.par_indices["groups"]])]) 

773 

774 # We must modify the inputOrdinate according to the 

775 # nuisance terms in the linearity fit for the residual 

776 # computation code to work properly. 

777 # The true mu (inputOrdinate) is given by 

778 # mu = mu_in * (1 + alpha*t_scale) * (1 + beta*mjd_scale) 

779 if self.config.doSplineFitTemperature: 

780 inputOrdinate *= (1.0 

781 + pars[fitter.par_indices["temperature_coeff"]]*temperatureValuesScaled) 

782 if self.config.doSplineFitTemporal: 

783 inputOrdinate *= (1.0 

784 + pars[fitter.par_indices["temporal_coeff"]]*inputMjdScaled) 

785 # We have to adjust the abscissa for the different groups. 

786 # This is because we need a corrected abscissa to get a 

787 # reasonable linear fit to look for residuals, particularly in 

788 # the case of significantly different signal-vs-photodiode or 

789 # signal-vs-exptime scalings for different groups. This then 

790 # becomes a multiplication by the relative scaling of the 

791 # different groups. 

792 for j, group_index in enumerate(fitter.group_indices): 

793 inputAbscissa[group_index] *= (pars[fitter.par_indices["groups"][j]] / linearFit[1]) 

794 # And remove the offset term. 

795 if self.config.doSplineFitOffset: 

796 inputOrdinate -= pars[fitter.par_indices["offset"]] 

797 

798 linearOrdinate = linearFit[1] * inputOrdinate 

799 # For the spline fit, reuse the "polyFit -> fitParams" 

800 # field to record the linear coefficients for the groups. 

801 # We additionally append the offset and weight_pars; 

802 # however these will be zero-length arrays if these were 

803 # not configured to be fit. 

804 polyFit = np.concatenate(( 

805 pars[fitter.par_indices["groups"]], 

806 pars[fitter.par_indices["offset"]], 

807 pars[fitter.par_indices["weight_pars"]], 

808 pars[fitter.par_indices["temperature_coeff"]], 

809 pars[fitter.par_indices["temporal_coeff"]], 

810 )) 

811 polyFitErr = np.zeros_like(polyFit) 

812 chiSq = linearityChisq 

813 

814 # Update mask based on what the fitter rejected. 

815 mask = fitter.mask 

816 

817 linearizer.inputMask[ampName] = mask.copy() 

818 else: 

819 polyFit = np.zeros(1) 

820 polyFitErr = np.zeros(1) 

821 chiSq = np.nan 

822 linearityCoeffs = np.zeros(1) 

823 

824 linearizer.linearityType[ampName] = self.config.linearityType 

825 linearizer.linearityCoeffs[ampName] = linearityCoeffs 

826 if self.config.trimmedState: 

827 linearizer.linearityBBox[ampName] = amp.getBBox() 

828 else: 

829 linearizer.linearityBBox[ampName] = amp.getRawBBox() 

830 linearizer.fitParams[ampName] = polyFit 

831 linearizer.fitParamsErr[ampName] = polyFitErr 

832 linearizer.fitChiSq[ampName] = chiSq 

833 linearizer.linearFit[ampName] = linearFit 

834 

835 image = afwImage.ImageF(len(inputOrdinate), 1) 

836 image.array[:, :] = inputOrdinate 

837 linearizeFunction = linearizer.getLinearityTypeByName(linearizer.linearityType[ampName]) 

838 linearizeFunction()( 

839 image, 

840 **{'coeffs': linearizer.linearityCoeffs[ampName], 

841 'table': linearizer.tableData, 

842 'log': linearizer.log} 

843 ) 

844 linearizeModel = image.array[0, :] 

845 

846 # The residuals that we record are the final residuals compared to 

847 # a linear model, after everything has been (properly?) linearized. 

848 if mask.sum() < 2: 

849 self.log.warning("Amp %s in detector %s has not enough points in linear ordinate " 

850 "for residuals. Skipping!", ampName, detector.getName()) 

851 residuals = np.full_like(linearizeModel, np.nan) 

852 residualsUnmasked = residuals.copy() 

853 else: 

854 postLinearFit, _, _, _ = irlsFit( 

855 linearFit, 

856 inputAbscissa[mask], 

857 linearizeModel[mask], 

858 funcPolynomial, 

859 ) 

860 # When computing residuals, we only care about the slope of 

861 # the postLinearFit and not the intercept. The intercept 

862 # itself depends on a possibly unknown zero in the abscissa 

863 # (often photodiode) which may have an arbitrary value. 

864 residuals = linearizeModel - (postLinearFit[1] * inputAbscissa) 

865 residualsUnmasked = residuals.copy() 

866 # We set masked residuals to nan. 

867 residuals[~mask] = np.nan 

868 

869 linearizer.fitResidualsUnmasked[ampName] = residualsUnmasked 

870 linearizer.fitResiduals[ampName] = residuals 

871 linearizer.fitResidualsModel[ampName] = linearizeModel.copy() 

872 

873 finite = np.isfinite(residuals) 

874 if finite.sum() == 0: 

875 sigmad = np.nan 

876 else: 

877 sigmad = median_abs_deviation(residuals[finite]/inputOrdinate[finite], scale="normal") 

878 linearizer.fitResidualsSigmaMad[ampName] = sigmad 

879 

880 self.debugFit( 

881 'solution', 

882 inputOrdinate[mask], 

883 linearOrdinate[mask], 

884 linearizeModel[mask], 

885 None, 

886 ampName, 

887 ) 

888 

889 self.fixupBadAmps(linearizer) 

890 

891 linearizer.hasLinearity = True 

892 linearizer.validate() 

893 linearizer.updateMetadata(camera=camera, detector=detector, filterName='NONE') 

894 linearizer.updateMetadata(setDate=True, setCalibId=True) 

895 linearizer.updateMetadataFromExposures([inputPtc]) 

896 provenance = IsrProvenance(calibType='linearizer') 

897 

898 return pipeBase.Struct( 

899 outputLinearizer=linearizer, 

900 outputProvenance=provenance, 

901 ) 

902 

903 def fillBadAmp(self, linearizer, fitOrder, inputPtc, amp): 

904 # Need to fill linearizer with empty values 

905 # if the amp is non-functional 

906 ampName = amp.getName() 

907 nEntries = 1 

908 pEntries = 1 

909 if self.config.linearityType in ['Polynomial']: 

910 # We discard the first 2 entries in the polynomial. 

911 nEntries = fitOrder + 1 - 2 

912 pEntries = fitOrder + 1 - 2 

913 elif self.config.linearityType in ['Spline']: 

914 nEntries = fitOrder * 2 

915 elif self.config.linearityType in ['Squared', 'None']: 

916 nEntries = 1 

917 pEntries = fitOrder + 1 

918 elif self.config.linearityType in ['LookupTable']: 

919 nEntries = 2 

920 pEntries = fitOrder + 1 

921 

922 nPair = len(inputPtc.inputExpIdPairs[ampName]) 

923 

924 linearizer.linearityType[ampName] = "None" 

925 linearizer.linearityCoeffs[ampName] = np.zeros(nEntries) 

926 if self.config.trimmedState: 

927 linearizer.linearityBBox[ampName] = amp.getBBox() 

928 else: 

929 linearizer.linearityBBox[ampName] = amp.getRawBBox() 

930 linearizer.fitParams[ampName] = np.zeros(pEntries) 

931 linearizer.fitParamsErr[ampName] = np.zeros(pEntries) 

932 linearizer.fitChiSq[ampName] = np.nan 

933 linearizer.fitResiduals[ampName] = np.zeros(nPair) 

934 linearizer.fitResidualsSigmaMad[ampName] = np.nan 

935 linearizer.fitResidualsUnmasked[ampName] = np.zeros(nPair) 

936 linearizer.fitResidualsModel[ampName] = np.zeros(nPair) 

937 linearizer.linearFit[ampName] = np.zeros(2) 

938 linearizer.linearityTurnoff[ampName] = np.nan 

939 linearizer.linearityMaxSignal[ampName] = np.nan 

940 linearizer.inputMask[ampName] = np.zeros(nPair, dtype=np.bool_) 

941 linearizer.inputAbscissa[ampName] = np.zeros(nPair) 

942 linearizer.inputOrdinate[ampName] = np.zeros(nPair) 

943 linearizer.inputGroupingIndex[ampName] = np.zeros(nPair, dtype=np.int64) 

944 linearizer.inputNormalization[ampName] = np.ones(nPair) 

945 

946 return linearizer 

947 

948 def fixupBadAmps(self, linearizer): 

949 """Fix nan padding in bad amplifiers. 

950 

951 Parameters 

952 ---------- 

953 linearizer : `lsst.ip.isr.Linearizer` 

954 """ 

955 fitParamsMaxLen = 0 

956 for ampName in linearizer.ampNames: 

957 if (length := len(linearizer.fitParams[ampName])) > fitParamsMaxLen: 

958 fitParamsMaxLen = length 

959 

960 for ampName in linearizer.ampNames: 

961 if linearizer.linearityType[ampName] == "None": 

962 # Bad amplifier. 

963 linearizer.fitParams[ampName] = np.zeros(fitParamsMaxLen) 

964 linearizer.fitParamsErr[ampName] = np.zeros(fitParamsMaxLen) 

965 elif len(linearizer.fitParams[ampName]) != fitParamsMaxLen: 

966 raise RuntimeError("Linearity has mismatched fitParams; check code/data.") 

967 

968 def debugFit(self, stepname, xVector, yVector, yModel, mask, ampName): 

969 """Debug method for linearity fitting. 

970 

971 Parameters 

972 ---------- 

973 stepname : `str` 

974 A label to use to check if we care to debug at a given 

975 line of code. 

976 xVector : `numpy.array`, (N,) 

977 The values to use as the independent variable in the 

978 linearity fit. 

979 yVector : `numpy.array`, (N,) 

980 The values to use as the dependent variable in the 

981 linearity fit. 

982 yModel : `numpy.array`, (N,) 

983 The values to use as the linearized result. 

984 mask : `numpy.array` [`bool`], (N,) , optional 

985 A mask to indicate which entries of ``xVector`` and 

986 ``yVector`` to keep. 

987 ampName : `str` 

988 Amplifier name to lookup linearity correction values. 

989 """ 

990 frame = getDebugFrame(self._display, stepname) 

991 if frame: 

992 import matplotlib.pyplot as plt 

993 fig, axs = plt.subplots(2) 

994 

995 if mask is None: 

996 mask = np.ones_like(xVector, dtype=bool) 

997 

998 fig.suptitle(f"{stepname} {ampName} {self.config.linearityType}") 

999 if stepname == 'linearFit': 

1000 axs[0].set_xlabel("Input Abscissa (time or mondiode)") 

1001 axs[0].set_ylabel("Input Ordinate (flux)") 

1002 axs[1].set_xlabel("Linear Ordinate (linear flux)") 

1003 axs[1].set_ylabel("Flux Difference: (input - linear)") 

1004 elif stepname in ('polyFit', 'splineFit'): 

1005 axs[0].set_xlabel("Linear Abscissa (linear flux)") 

1006 axs[0].set_ylabel("Input Ordinate (flux)") 

1007 axs[1].set_xlabel("Linear Ordinate (linear flux)") 

1008 axs[1].set_ylabel("Flux Difference: (input - full model fit)") 

1009 elif stepname == 'solution': 

1010 axs[0].set_xlabel("Input Abscissa (time or mondiode)") 

1011 axs[0].set_ylabel("Linear Ordinate (linear flux)") 

1012 axs[1].set_xlabel("Model flux (linear flux)") 

1013 axs[1].set_ylabel("Flux Difference: (linear - model)") 

1014 

1015 axs[0].set_yscale('log') 

1016 axs[0].set_xscale('log') 

1017 axs[0].scatter(xVector, yVector) 

1018 axs[0].scatter(xVector[~mask], yVector[~mask], c='red', marker='x') 

1019 axs[1].set_xscale('log') 

1020 

1021 axs[1].scatter(yModel, yVector[mask] - yModel) 

1022 fig.tight_layout() 

1023 fig.show() 

1024 

1025 prompt = "Press Enter or c to continue [chpx]..." 

1026 while True: 

1027 ans = input(prompt).lower() 

1028 if ans in ("", " ", "c",): 

1029 break 

1030 elif ans in ("p", ): 

1031 import pdb 

1032 pdb.set_trace() 

1033 elif ans in ("h", ): 

1034 print("[h]elp [c]ontinue [p]db") 

1035 elif ans in ('x', ): 

1036 exit() 

1037 plt.close() 

1038 

1039 

1040class LinearityDoubleSplineSolveConnections( 

1041 pipeBase.PipelineTaskConnections, 

1042 dimensions=("instrument", "detector"), 

1043): 

1044 dummy = cT.Input( 

1045 name="raw", 

1046 doc="Dummy exposure.", 

1047 storageClass='Exposure', 

1048 dimensions=("instrument", "exposure", "detector"), 

1049 multiple=True, 

1050 deferLoad=True, 

1051 ) 

1052 camera = cT.PrerequisiteInput( 

1053 name="camera", 

1054 doc="Camera Geometry definition.", 

1055 storageClass="Camera", 

1056 dimensions=("instrument", ), 

1057 isCalibration=True, 

1058 ) 

1059 inputLinearizerPtc = cT.Input( 

1060 name="linearizerPtc", 

1061 doc="Input linearizer PTC dataset.", 

1062 storageClass="PhotonTransferCurveDataset", 

1063 dimensions=("instrument", "detector"), 

1064 isCalibration=True, 

1065 ) 

1066 inputNormalization = cT.Input( 

1067 name="cpLinearizerPtcNormalization", 

1068 doc="Focal-plane normalization table.", 

1069 storageClass="ArrowAstropy", 

1070 dimensions=("instrument",), 

1071 isCalibration=True, 

1072 ) 

1073 inputBinnedImagesHandles = cT.Input( 

1074 name="cpPtcPairBinned", 

1075 doc="Tabulated binned exposure pairs.", 

1076 storageClass="ArrowAstropy", 

1077 dimensions=("instrument", "exposure", "detector"), 

1078 multiple=True, 

1079 deferLoad=True, 

1080 ) 

1081 outputLinearizer = cT.Output( 

1082 name="linearity", 

1083 doc="Output linearity measurements.", 

1084 storageClass="Linearizer", 

1085 dimensions=("instrument", "detector"), 

1086 isCalibration=True, 

1087 ) 

1088 

1089 def __init__(self, *, config=None): 

1090 super().__init__(config=config) 

1091 

1092 if not config.useFocalPlaneNormalization: 

1093 del self.inputNormalization 

1094 

1095 

1096class LinearityDoubleSplineSolveConfig( 

1097 pipeBase.PipelineTaskConfig, 

1098 pipelineConnections=LinearityDoubleSplineSolveConnections, 

1099): 

1100 maxFracLinearityDeviation = pexConfig.Field( 

1101 dtype=float, 

1102 doc="Maximum fraction deviation from raw linearity to compute " 

1103 "linearityTurnoff and linearityMaxSignal.", 

1104 # TODO: DM-46811 investigate if this can be raised to 0.05. 

1105 default=0.01, 

1106 ) 

1107 minSignalFitLinearityTurnoff = pexConfig.Field( 

1108 dtype=float, 

1109 doc="Minimum signal to compute raw linearity slope for linearityTurnoff.", 

1110 default=1000.0, 

1111 ) 

1112 maxNoiseReference = pexConfig.Field( 

1113 dtype=float, 

1114 doc="Maximum read noise (e-) in the PTC for an amp to be considered as a reference.", 

1115 default=12.0, 

1116 ) 

1117 usePhotodiode = pexConfig.Field( 

1118 dtype=bool, 

1119 doc="Use the photodiode info instead of the raw expTimes?", 

1120 default=False, 

1121 ) 

1122 minPhotodiodeCurrent = pexConfig.Field( 

1123 dtype=float, 

1124 doc="Minimum value to trust photodiode signals.", 

1125 default=0.0, 

1126 ) 

1127 doAutoGrouping = pexConfig.Field( 

1128 dtype=bool, 

1129 doc="Do automatic group detection? Cannot be True if splineGroupingColumn is also set. " 

1130 "The automatic group detection will use the ratio of signal to exposure time (if " 

1131 "autoGroupingUseExptime is True) or photodiode (if False) to determine which " 

1132 "flat pairs were taken with different illumination settings.", 

1133 default=False, 

1134 ) 

1135 autoGroupingUseExptime = pexConfig.Field( 

1136 dtype=bool, 

1137 doc="Use exposure time to determine automatic grouping. Used if doAutoGrouping=True.", 

1138 default=True, 

1139 ) 

1140 autoGroupingThreshold = pexConfig.Field( 

1141 dtype=float, 

1142 doc="Minimum relative jump from sorted conversion values to determine a group.", 

1143 default=0.1, 

1144 ) 

1145 autoGroupingMaxSignalFraction = pexConfig.Field( 

1146 dtype=float, 

1147 doc="Only do auto-grouping when the signal is this fraction of the maximum signal. " 

1148 "All exposures with signal higher than this threshold will be put into the " 

1149 "largest signal group. This config is needed if the input PTC goes beyond " 

1150 "the linearity turnoff.", 

1151 default=0.9, 

1152 ) 

1153 groupingColumn = pexConfig.Field( 

1154 dtype=str, 

1155 doc="Column to use for grouping together points, to allow " 

1156 "for different proportionality constants. If None, then grouping will " 

1157 "only be done if doAutoGrouping is True.", 

1158 default=None, 

1159 optional=True, 

1160 ) 

1161 absoluteSplineMinimumSignalNode = pexConfig.Field( 

1162 dtype=float, 

1163 doc="Smallest node (above 0) for absolute spline (adu).", 

1164 default=0.0, 

1165 ) 

1166 absoluteSplineLowThreshold = pexConfig.Field( 

1167 dtype=float, 

1168 doc="Threshold for the low-level linearity nodes for absolute spline (adu). " 

1169 "If this is below ``absoluteSplineMinimumSignalNode`` then the low " 

1170 "level checks will be skipped.", 

1171 default=0.0, 

1172 ) 

1173 absoluteSplineLowNodeSize = pexConfig.Field( 

1174 dtype=float, 

1175 doc="Minimum size for low-level linearity nodes for absolute spline (adu).", 

1176 default=2000.0, 

1177 ) 

1178 absoluteSplineNodeSize = pexConfig.Field( 

1179 dtype=float, 

1180 doc="Minimum size for linearity nodes for absolute spline above absoluteSplineLowThreshold e(adu); " 

1181 "note that there will always be a node at the reference PTC turnoff.", 

1182 default=3000.0, 

1183 ) 

1184 absoluteSplineFitMinIter = pexConfig.Field( 

1185 dtype=int, 

1186 doc="Minimum number of iterations for absolute spline fit.", 

1187 default=3, 

1188 ) 

1189 absoluteSplineFitMaxIter = pexConfig.Field( 

1190 dtype=int, 

1191 doc="Maximum number of iterations for absolute spline fit.", 

1192 default=20, 

1193 ) 

1194 absoluteSplineFitMaxRejectionPerIteration = pexConfig.Field( 

1195 dtype=int, 

1196 doc="Maximum number of rejections per iteration for absolute spline fit.", 

1197 default=5, 

1198 ) 

1199 absoluteNSigmaClipLinear = pexConfig.Field( 

1200 dtype=float, 

1201 doc="Sigma-clipping for absolute spline solution.", 

1202 default=5.0, 

1203 ) 

1204 doAbsoluteSplineFitOffset = pexConfig.Field( 

1205 dtype=bool, 

1206 doc="Fit a scattered light offset in the spline fit.", 

1207 default=True, 

1208 ) 

1209 doAbsoluteSplineFitWeights = pexConfig.Field( 

1210 dtype=bool, 

1211 doc="Fit linearity weight parameters in the spline fit.", 

1212 default=False, 

1213 ) 

1214 absoluteSplineFitWeightParsStart = pexConfig.ListField( 

1215 dtype=float, 

1216 doc="Starting parameters for weight fit, if doSplineFitWeights=True. " 

1217 "Parameters are such that sigma = sqrt(par[0]**2. + par[1]**2./mu)." 

1218 "If doSplineFitWeights=False then these are used as-is; otherwise " 

1219 "they are used as the initial values for fitting these parameters.", 

1220 length=2, 

1221 default=[1.0, 0.0], 

1222 ) 

1223 doAbsoluteSplineFitTemperature = pexConfig.Field( 

1224 dtype=bool, 

1225 doc="Fit temperature coefficient in spline fit?", 

1226 default=False, 

1227 ) 

1228 absoluteSplineFitTemperatureColumn = pexConfig.Field( 

1229 dtype=str, 

1230 doc="Name of the temperature column to use when fitting temperature " 

1231 "coefficients in spline fit; this must not be None if " 

1232 "doSplineFitTemperature is True.", 

1233 default=None, 

1234 optional=True, 

1235 ) 

1236 doAbsoluteSplineFitTemporal = pexConfig.Field( 

1237 dtype=bool, 

1238 doc="Fit a linear temporal parameter coefficient in spline fit?", 

1239 default=False, 

1240 ) 

1241 useFocalPlaneNormalization = pexConfig.Field( 

1242 dtype=bool, 

1243 doc="Use focal-plane normalization in addition to/instead of photodiode? " 

1244 "(Only used with for absolute spline fitting).", 

1245 default=False, 

1246 ) 

1247 relativeSplineReferenceCounts = pexConfig.Field( 

1248 dtype=float, 

1249 doc="Number of target counts (adu) to select a reference image for " 

1250 "relative spline solution.", 

1251 default=10000.0, 

1252 ) 

1253 relativeSplineMinimumSignalNode = pexConfig.Field( 

1254 dtype=float, 

1255 doc="Smallest node (above 0) for relative spline (adu).", 

1256 default=100.0, 

1257 ) 

1258 relativeSplineLowThreshold = pexConfig.Field( 

1259 dtype=float, 

1260 doc="Threshold for the low-level linearity nodes for relative spline (adu)." 

1261 "If this is below ``relativeSplineMinimumSignalNode`` then the low " 

1262 "level checks will be skipped.", 

1263 default=5000.0, 

1264 ) 

1265 relativeSplineLowNodeSize = pexConfig.Field( 

1266 dtype=float, 

1267 doc="Minimum size for low-level linearity nodes for relative spline (adu).", 

1268 default=750.0, 

1269 ) 

1270 relativeSplineMidNodeSize = pexConfig.Field( 

1271 dtype=float, 

1272 doc="Minimum size for mid-level linearity nodes for relative spline (adu); " 

1273 "this applies to counts between relativeSplineLowThreshold and the " 

1274 "PTC turnoff.", 

1275 default=5000.0, 

1276 ) 

1277 relativeSplineHighNodeSize = pexConfig.Field( 

1278 dtype=float, 

1279 doc="Minimum size for high-level linearity nodes for relative spline (adu); " 

1280 "this applies to counts between the PTC and linearity turnoffs.", 

1281 default=2000.0, 

1282 ) 

1283 relativeSplineFitMinIter = pexConfig.Field( 

1284 dtype=int, 

1285 doc="Minimum number of iterations for relative spline fit.", 

1286 default=3, 

1287 ) 

1288 relativeSplineFitMaxIter = pexConfig.Field( 

1289 dtype=int, 

1290 doc="Maximum number of iterations for relative spline fit.", 

1291 default=20, 

1292 ) 

1293 relativeSplineFitMaxRejectionPerIteration = pexConfig.Field( 

1294 dtype=int, 

1295 doc="Maximum number of rejections per iteration for relative spline fit.", 

1296 default=5, 

1297 ) 

1298 relativeNSigmaClipLinear = pexConfig.Field( 

1299 dtype=float, 

1300 doc="Sigma-clipping for relative spline solution.", 

1301 default=5.0, 

1302 ) 

1303 

1304 def validate(self): 

1305 super().validate() 

1306 

1307 if self.doAbsoluteSplineFitTemperature and self.absoluteSplineFitTemperatureColumn is None: 

1308 raise ValueError( 

1309 "Must set absoluteSplineFitTemperatureColumn if doAbsoluteSplineFitTemperature is True.", 

1310 ) 

1311 

1312 if self.doAutoGrouping and self.groupingColumn is not None: 

1313 raise ValueError("Must not set doAutoGrouping and also groupingColumn") 

1314 if self.doAutoGrouping: 

1315 if not self.autoGroupingUseExptime and not self.usePhotodiode: 

1316 raise ValueError("If doAutoGrouping is True and autoGroupingUseExptime is False, then " 

1317 "usePhotodiode must be True.") 

1318 

1319 

1320class LinearityDoubleSplineSolveTask(pipeBase.PipelineTask): 

1321 ConfigClass = LinearityDoubleSplineSolveConfig 

1322 _DefaultName = "cpLinearityDoubleSplineSolve" 

1323 

1324 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

1325 # docstring inherited 

1326 inputs = butlerQC.get(inputRefs) 

1327 

1328 if self.config.useFocalPlaneNormalization: 

1329 inputNormalization = inputs["inputNormalization"] 

1330 else: 

1331 inputNormalization = None 

1332 

1333 # Add calibration provenance info to header. 

1334 kwargs = dict() 

1335 reference = getattr(inputRefs, "inputLinearizerPtc", None) 

1336 

1337 if reference is not None and hasattr(reference, "run"): 

1338 runKey = "PTC_RUN" 

1339 runValue = reference.run 

1340 idKey = "PTC_UUID" 

1341 idValue = str(reference.id) 

1342 dateKey = "PTC_DATE" 

1343 calib = inputs.get("inputPtc", None) 

1344 dateValue = extractCalibDate(calib) 

1345 

1346 kwargs[runKey] = runValue 

1347 kwargs[idKey] = idValue 

1348 kwargs[dateKey] = dateValue 

1349 

1350 self.log.info("Using " + str(reference.run)) 

1351 

1352 outputs = self.run( 

1353 inputPtc=inputs["inputLinearizerPtc"], 

1354 camera=inputs["camera"], 

1355 inputBinnedImagesHandles=inputs["inputBinnedImagesHandles"], 

1356 inputNormalization=inputNormalization, 

1357 ) 

1358 outputs.outputLinearizer.updateMetadata(setDate=False, **kwargs) 

1359 

1360 butlerQC.put(outputs, outputRefs) 

1361 

1362 def run( 

1363 self, 

1364 *, 

1365 inputPtc, 

1366 camera, 

1367 inputBinnedImagesHandles, 

1368 inputNormalization, 

1369 ): 

1370 """Fit the double-spline relative/absolute linearity correction. 

1371 

1372 Parameters 

1373 ---------- 

1374 inputPtc : `lsst.ip.isr.PtcDataset` 

1375 Pre-measured PTC dataset. 

1376 camera : `lsst.afw.cameraGeom.Camera` 

1377 Camera geometry. 

1378 inputBinnedImagesHandles : `list` [`DeferredDatasetHandle`] 

1379 Handles for input binned image pairs. 

1380 inputNormalization : `astropy.table.Table`, optional 

1381 Focal plane normalization table to use if 

1382 useFocalPlaneNormalization is True. 

1383 

1384 Returns 

1385 ------- 

1386 results : `lsst.pipe.base.Struct` 

1387 The results struct containing: 

1388 

1389 ``outputLinearizer`` 

1390 Final linearizer calibration (`lsst.ip.isr.Linearizer`). 

1391 ``outputProvenance`` 

1392 Provenance data for the new calibration 

1393 (`lsst.ip.isr.IsrProvenance`). 

1394 """ 

1395 detector = camera[inputPtc.metadata["DETECTOR"]] 

1396 

1397 binnedImagesHandleDict = { 

1398 handle.dataId["exposure"]: handle for handle in inputBinnedImagesHandles 

1399 } 

1400 

1401 linearizer = Linearizer(detector=detector, log=self.log) 

1402 linearizer.updateMetadataFromExposures([inputPtc]) 

1403 

1404 groupingValues = _determineInputGroups( 

1405 inputPtc, 

1406 self.config.doAutoGrouping, 

1407 self.config.autoGroupingUseExptime, 

1408 self.config.autoGroupingMaxSignalFraction, 

1409 self.config.autoGroupingThreshold, 

1410 self.config.groupingColumn, 

1411 self.config.minPhotodiodeCurrent, 

1412 ) 

1413 

1414 if self.config.doAbsoluteSplineFitTemperature: 

1415 if self.config.absoluteSplineFitTemperatureColumn not in inputPtc.auxValues: 

1416 raise ValueError( 

1417 "Config requests fitting temperature coefficient for " 

1418 f"{self.config.splineFitTemperatureColumn} but this column " 

1419 "is not available in inputPtc.auxValues.", 

1420 ) 

1421 temperatureValues = inputPtc.auxValues[self.config.splineFitTemperatureColumn] 

1422 else: 

1423 temperatureValues = None 

1424 

1425 # Fill the linearizer with empty values. 

1426 firstAmp = None 

1427 for ampName in inputPtc.ampNames: 

1428 if ampName not in inputPtc.badAmps: 

1429 firstAmp = ampName 

1430 break 

1431 if firstAmp is None: 

1432 raise pipeBase.NoWorkFound("No valid amps in input PTC.") 

1433 nExp = len(inputPtc.inputExpIdPairs[firstAmp]) * 2 

1434 nAmp = len(inputPtc.ampNames) 

1435 

1436 for amp in detector: 

1437 ampName = amp.getName() 

1438 

1439 linearizer.inputGain[ampName] = inputPtc.gain[ampName] 

1440 linearizer.linearityType[ampName] = "None" 

1441 linearizer.linearityCoeffs[ampName] = np.zeros(1) 

1442 # This is not used; kept for compatibility. 

1443 linearizer.linearityBBox[ampName] = amp.getBBox() 

1444 linearizer.fitParams[ampName] = np.zeros(1) 

1445 linearizer.fitParamsErr[ampName] = np.zeros(1) 

1446 linearizer.fitChiSq[ampName] = np.nan 

1447 linearizer.fitResiduals[ampName] = np.zeros(nExp) 

1448 linearizer.fitResidualsSigmaMad[ampName] = np.nan 

1449 linearizer.fitResidualsUnmasked[ampName] = np.zeros(nExp) 

1450 linearizer.fitResidualsModel[ampName] = np.zeros(nExp) 

1451 linearizer.linearFit[ampName] = np.zeros(2) 

1452 linearizer.linearityTurnoff[ampName] = np.nan 

1453 linearizer.linearityMaxSignal[ampName] = np.nan 

1454 linearizer.inputMask[ampName] = np.zeros(nExp, dtype=np.bool_) 

1455 linearizer.inputAbscissa[ampName] = np.zeros(nExp) 

1456 linearizer.inputOrdinate[ampName] = np.zeros(nExp) 

1457 linearizer.inputGroupingIndex[ampName] = np.zeros(nExp, dtype=np.int64) 

1458 linearizer.inputNormalization[ampName] = np.ones(nExp) 

1459 

1460 linearizer.absoluteReferenceAmplifier = "" 

1461 

1462 # Extract values in common, and per-amp. 

1463 data = np.zeros( 

1464 nExp, 

1465 dtype=[ 

1466 ("exp_id", "i8"), 

1467 ("exptime", "f8"), 

1468 ("photocharge", "f8"), 

1469 ("mjd", "f8"), 

1470 ("raw_mean", ("f8", nAmp)), 

1471 ("abscissa", "f8"), 

1472 ("grouping", "i4"), 

1473 # The following are computed in the relative scaling 

1474 # measurements. 

1475 ("ref_counts", "f8"), 

1476 ("gain_ratio", ("f8", nAmp)), 

1477 ], 

1478 ) 

1479 

1480 data["exp_id"] = np.asarray(inputPtc.inputExpIdPairs[firstAmp]).ravel() 

1481 data["exptime"] = np.repeat(inputPtc.rawExpTimes[firstAmp], 2) 

1482 data["mjd"] = np.repeat(inputPtc.inputExpPairMjdStartList[firstAmp], 2) 

1483 data["photocharge"] = np.repeat(inputPtc.photoCharges[firstAmp], 2) 

1484 data["photocharge"][::2] -= inputPtc.photoChargeDeltas[firstAmp] / 2. 

1485 data["photocharge"][1::2] += inputPtc.photoChargeDeltas[firstAmp] / 2. 

1486 data["grouping"] = np.repeat(groupingValues, 2) 

1487 

1488 for i, amp in enumerate(detector): 

1489 ampName = amp.getName() 

1490 

1491 data["raw_mean"][:, i] = np.repeat(inputPtc.rawMeans[ampName], 2) 

1492 data["raw_mean"][::2, i] -= inputPtc.rawDeltas[ampName] / 2. 

1493 data["raw_mean"][1::2, i] += inputPtc.rawDeltas[ampName] / 2. 

1494 

1495 if self.config.usePhotodiode: 

1496 data["abscissa"][:] = data["photocharge"] 

1497 

1498 data["abscissa"][data["photocharge"] < self.config.minPhotodiodeCurrent] = np.nan 

1499 else: 

1500 data["abscissa"][:] = data["exptime"] 

1501 

1502 # Normalize if configured. 

1503 inputNorm = np.ones(nExp, dtype=np.float64) 

1504 if self.config.useFocalPlaneNormalization: 

1505 a, b = esutil.numpy_util.match(data["exp_id"], inputNormalization["exposure"]) 

1506 inputNorm[a] = inputNormalization["normalization"][b] 

1507 data["abscissa"] *= inputNorm 

1508 

1509 postTurnoffMasks = {} 

1510 

1511 # Compute linearity turnoff for each amp. 

1512 for i, amp in enumerate(detector): 

1513 ampName = amp.getName() 

1514 

1515 if ampName in inputPtc.badAmps: 

1516 self.log.warning( 

1517 "Amp %s in detector %s has no usable PTC information. Skipping!", 

1518 ampName, 

1519 detector.getName(), 

1520 ) 

1521 continue 

1522 

1523 mask = np.isfinite(data["raw_mean"][:, i]) 

1524 

1525 turnoffMask = np.repeat(inputPtc.expIdMask[ampName], 2) 

1526 turnoffMask &= mask 

1527 

1528 _, turnoff, maxSignal, goodPoints = _computeTurnoffAndMax( 

1529 data["abscissa"], 

1530 data["raw_mean"][:, i], 

1531 turnoffMask, 

1532 data["grouping"], 

1533 ampName=ampName, 

1534 minSignalFitLinearityTurnoff=self.config.minSignalFitLinearityTurnoff, 

1535 maxFracLinearityDeviation=self.config.maxFracLinearityDeviation, 

1536 log=self.log, 

1537 ) 

1538 

1539 # Use the goodPoints as an initial estimate of the mask 

1540 # above the turnoff. But we only want to maintain the 

1541 # "high end" outliers. 

1542 postTurnoffMask = goodPoints 

1543 postTurnoffMask[data["raw_mean"][:, i] < np.median(data["raw_mean"][:, i])] = True 

1544 postTurnoffMasks[ampName] = postTurnoffMask 

1545 

1546 if np.isnan(turnoff): 

1547 # This is a bad amp, with no linearizer. 

1548 self.log.warning( 

1549 "Amp %s in detector %s has no usable linearizer information. Skipping!", 

1550 ampName, 

1551 detector.getName(), 

1552 ) 

1553 continue 

1554 

1555 linearizer.linearityTurnoff[ampName] = turnoff 

1556 linearizer.linearityMaxSignal[ampName] = maxSignal 

1557 

1558 self.log.info("Amplifier %s has a linearity turnoff of %.2f adu.", ampName, turnoff) 

1559 

1560 # Choose the reference amplifier as the one with the largest 

1561 # turnoff. This ensures that the absolute fit covers the full 

1562 # range. We additionally confirm that the ptc turnoff is 

1563 # finite for this amplifier. 

1564 turnoffArray = np.asarray([linearizer.linearityTurnoff[ampName] for ampName in inputPtc.ampNames]) 

1565 # This is a possibly redundant check to make sure that a bad amp is 

1566 # not chosen as a reference amp. We also check that a high noise 

1567 # amp is not chosen as the reference amp. 

1568 for i, ampName in enumerate(inputPtc.ampNames): 

1569 if ampName in inputPtc.badAmps \ 

1570 or not np.isfinite(inputPtc.ptcTurnoff[ampName]) \ 

1571 or inputPtc.noise[ampName] > self.config.maxNoiseReference: 

1572 turnoffArray[i] = np.nan 

1573 

1574 if np.all(~np.isfinite(turnoffArray)): 

1575 # Return the default blank linearizer. 

1576 linearizer.hasLinearity = True 

1577 linearizer.validate() 

1578 linearizer.updateMetadata(camera=camera, detector=detector, filterName='NONE') 

1579 linearizer.updateMetadata(setDate=True, setCalibId=True) 

1580 linearizer.updateMetadataFromExposures([inputPtc]) 

1581 provenance = IsrProvenance(calibType='linearizer') 

1582 

1583 return pipeBase.Struct( 

1584 outputLinearizer=linearizer, 

1585 outputProvenance=provenance, 

1586 ) 

1587 

1588 refAmpIndex = np.argmax(np.nan_to_num(turnoffArray)) 

1589 refAmpName = inputPtc.ampNames[refAmpIndex] 

1590 linearizer.absoluteReferenceAmplifier = refAmpName 

1591 

1592 # Choose a reference image. 

1593 refExpIndex = np.argmin( 

1594 np.abs( 

1595 np.nan_to_num(data["raw_mean"][:, refAmpIndex]) - self.config.relativeSplineReferenceCounts 

1596 ) 

1597 ) 

1598 refExpId = data["exp_id"][refExpIndex] 

1599 

1600 self.log.info( 

1601 "Using exposure %d (%.2f adu in amp %s) as reference.", 

1602 refExpId, 

1603 data["raw_mean"][refExpIndex, refAmpIndex], 

1604 refAmpName, 

1605 ) 

1606 

1607 # We need to know if the reference exposure is the first or second 

1608 # in the pair, because the binned are pairs. 

1609 offset = refExpIndex % 2 

1610 

1611 refBinned = binnedImagesHandleDict[data["exp_id"][refExpIndex - offset]].get() 

1612 refBinned = copy.copy(refBinned) 

1613 if offset == 0: 

1614 refBinned["value"] = refBinned["value1"] 

1615 else: 

1616 refBinned["value"] = refBinned["value2"] 

1617 

1618 # Scale reference according to reference amplifier. 

1619 refScaling = np.median(refBinned["value"][refBinned["amp_index"] == refAmpIndex]) 

1620 refBinned["value"] /= refScaling 

1621 

1622 # Get the invidual amp scalings. 

1623 # These are the relative gains for the reference image. 

1624 ampScalings = np.asarray( 

1625 [ 

1626 np.median(refBinned["value"][refBinned["amp_index"] == ampIndex]) 

1627 for ampIndex in range(nAmp) 

1628 ], 

1629 ) 

1630 

1631 # Compute the gain ratios for every exposure. 

1632 # The binned images are stored as pairs. 

1633 self.log.info("Computing gain ratios for %d exposures.", len(data)) 

1634 for i in range(len(data)): 

1635 expId = data["exp_id"][i] 

1636 if (i % 2) == 0: 

1637 binned = binnedImagesHandleDict[expId].get() 

1638 binned["value"] = binned["value1"] 

1639 else: 

1640 binned["value"] = binned["value2"] 

1641 

1642 binned["value"] /= refBinned["value"] 

1643 

1644 gainRatios = np.asarray( 

1645 [ 

1646 np.median(binned["value"][binned["amp_index"] == ampIndex]) 

1647 for ampIndex in range(nAmp) 

1648 ] 

1649 ) 

1650 ref_counts = gainRatios[refAmpIndex] 

1651 gainRatios /= ref_counts 

1652 

1653 data["ref_counts"][i] = ref_counts 

1654 data["gain_ratio"][i, :] = gainRatios 

1655 

1656 # We need to know which group has the largest size. 

1657 groupAmplitudes = np.zeros(len(np.unique(data["grouping"]))) 

1658 for g in range(len(groupAmplitudes)): 

1659 use = (data["grouping"] == g) 

1660 groupAmplitudes[g] = np.nanmax(data["ref_counts"][use]) - np.nanmin(data["ref_counts"][use]) 

1661 maxAmplitudeGroup = np.argmax(groupAmplitudes) 

1662 

1663 self.log.info("Illumination group %d has the largest signal amplitude.", maxAmplitudeGroup) 

1664 

1665 # Compute relative linearization first. 

1666 maxRelNodes = 0 

1667 

1668 for i, amp in enumerate(detector): 

1669 if i == refAmpIndex: 

1670 continue 

1671 

1672 ampName = amp.getName() 

1673 

1674 ptcTurnoff = inputPtc.ptcTurnoff[ampName] 

1675 linearityTurnoff = linearizer.linearityTurnoff[ampName] 

1676 

1677 if not np.isfinite(ptcTurnoff) or not np.isfinite(linearityTurnoff): 

1678 # This is a bad amp; skip it. 

1679 continue 

1680 

1681 if ptcTurnoff < self.config.relativeSplineLowThreshold: 

1682 lowThreshold = 0.0 

1683 else: 

1684 lowThreshold = self.config.relativeSplineLowThreshold 

1685 

1686 relAbscissa = data["ref_counts"] * ampScalings[i] 

1687 relOrdinate = data["ref_counts"] * data["gain_ratio"][:, i] * ampScalings[i] 

1688 

1689 # The mask here must exclude everything beyond the turnoff. 

1690 # Note that we need to do this before we use the actual 

1691 # turnoff to compute the nodes to avoid nodes going past the 

1692 # data domain. 

1693 relMask = postTurnoffMasks[ampName] & np.isfinite(relAbscissa) & np.isfinite(relOrdinate) 

1694 

1695 # Make sure that the linearity turnoff used here does not 

1696 # go beyond the max value of the relOrdinate 

1697 linearityTurnoff = min(linearityTurnoff, np.max(relOrdinate[relMask])) 

1698 

1699 relNodes = _noderator( 

1700 lowThreshold, 

1701 ptcTurnoff, 

1702 linearityTurnoff, 

1703 self.config.relativeSplineMinimumSignalNode, 

1704 self.config.relativeSplineLowNodeSize, 

1705 self.config.relativeSplineMidNodeSize, 

1706 self.config.relativeSplineHighNodeSize, 

1707 ) 

1708 

1709 self.log.info("Relative linearity for amplifier %s using %d nodes.", ampName, len(relNodes)) 

1710 

1711 # Update the number of relative nodes to concatenation. 

1712 if len(relNodes) > maxRelNodes: 

1713 maxRelNodes = len(relNodes) 

1714 

1715 linearizer.inputMask[ampName] = relMask.copy() 

1716 linearizer.inputAbscissa[ampName] = relAbscissa.copy() 

1717 linearizer.inputOrdinate[ampName] = relOrdinate.copy() 

1718 linearizer.inputGroupingIndex[ampName] = data["grouping"].copy() 

1719 linearizer.inputNormalization[ampName] = np.ones_like(relAbscissa) 

1720 

1721 fitter = AstierSplineLinearityFitter( 

1722 relNodes, 

1723 data["grouping"], 

1724 relAbscissa, 

1725 relOrdinate, 

1726 mask=relMask, 

1727 fit_offset=False, 

1728 fit_weights=False, 

1729 fit_temperature=False, 

1730 max_signal_nearly_linear=ptcTurnoff, 

1731 fit_temporal=False, 

1732 # Put a cap on the maximum correction in absolute value. 

1733 max_frac_correction=np.inf, 

1734 max_correction=10_000.0, 

1735 ) 

1736 p0 = fitter.estimate_p0() 

1737 pars = fitter.fit( 

1738 p0, 

1739 min_iter=self.config.relativeSplineFitMinIter, 

1740 max_iter=self.config.relativeSplineFitMaxIter, 

1741 max_rejection_per_iteration=self.config.relativeSplineFitMaxRejectionPerIteration, 

1742 n_sigma_clip=self.config.relativeNSigmaClipLinear, 

1743 ) 

1744 

1745 # Confirm that the first parameter is 0, and set it to 

1746 # exactly zero. 

1747 relValues = pars[fitter.par_indices["values"]] 

1748 if not np.isclose(relValues[0], 0): 

1749 raise RuntimeError("Programmer error! First spline parameter must " 

1750 "be consistent with zero.") 

1751 relValues[0] = 0.0 

1752 

1753 if np.any(np.abs(pars[fitter.par_indices["values"]]) > 10_000.0): 

1754 self.log.warning("Unconstrained nodes escaped containment; clipping.") 

1755 lo = (pars[fitter.par_indices["values"]] < -10_000.0) 

1756 if np.sum(lo) > 0: 

1757 pars[fitter.par_indices["values"][lo]] = -10_000.0 

1758 hi = (pars[fitter.par_indices["values"]] > 10_000.0) 

1759 if np.sum(hi) > 0: 

1760 pars[fitter.par_indices["values"][hi]] = 10_000.0 

1761 

1762 # We adjust the node values according to the slope of the 

1763 # group with the largest amplitude. This removes a degeneracy 

1764 # in the normalization and ensures that the overall linearized 

1765 # correction is as close to the reference as possible. 

1766 relValues -= (1.0 - pars[fitter.par_indices["groups"][maxAmplitudeGroup]]) * relNodes 

1767 

1768 relChisq = fitter.compute_chisq_dof(pars) 

1769 

1770 # Our reference fit is always 1.0 slope. 

1771 relLinearFit = np.array([0.0, 1.0]) 

1772 

1773 # Adjust the abscissa for different groups for residuals. 

1774 for j, groupIndex in enumerate(fitter.group_indices): 

1775 relAbscissa[groupIndex] *= (pars[fitter.par_indices["groups"][j]] / relLinearFit[1]) 

1776 

1777 relMask = fitter.mask 

1778 

1779 # Record values in the linearizer. 

1780 linearizer.linearityType[ampName] = "DoubleSpline" 

1781 # Note that we have a placeholder for the number of nodes in 

1782 # the absolute spline. 

1783 linearizer.linearityCoeffs[ampName] = np.concatenate([[len(relNodes), 0], relNodes, relValues]) 

1784 linearizer.fitChiSq[ampName] = relChisq 

1785 linearizer.linearFit[ampName] = relLinearFit 

1786 

1787 # Compute residuals. 

1788 spl = Akima1DInterpolator(relNodes, relValues, method="akima") 

1789 relOffset = spl(np.clip(relOrdinate, relNodes[0], relNodes[-1])) 

1790 relModel = relOrdinate - relOffset 

1791 

1792 if relMask.sum() < 2: 

1793 self.log.warning("Amp %s in detector %s has not enough points in linear ordinate " 

1794 "for residuals. Skipping!", ampName, detector.getName()) 

1795 relResiduals = np.full_like(relModel, np.nan) 

1796 relResidualsUnmasked = relResiduals.copy() 

1797 else: 

1798 postLinearFit, _, _, _ = irlsFit( 

1799 relLinearFit, 

1800 relAbscissa[relMask], 

1801 relModel[relMask], 

1802 funcPolynomial, 

1803 ) 

1804 # When computing residuals, we only care about the slope of 

1805 # the postLinearFit and not the intercept. The intercept 

1806 # itself depends on a possibly unknown zero in the abscissa 

1807 # (often photodiode) which may have an arbitrary value. 

1808 relResiduals = relModel - (postLinearFit[1] * relAbscissa) 

1809 relResidualsUnmasked = relResiduals.copy() 

1810 # We set masked residuals to nan. 

1811 relResiduals[~relMask] = np.nan 

1812 

1813 linearizer.fitResidualsUnmasked[ampName] = relResidualsUnmasked 

1814 linearizer.fitResiduals[ampName] = relResiduals 

1815 linearizer.fitResidualsModel[ampName] = relModel.copy() 

1816 

1817 finite = np.isfinite(relResiduals) 

1818 if finite.sum() == 0: 

1819 sigmad = np.nan 

1820 else: 

1821 sigmad = median_abs_deviation(relResiduals[finite]/relOrdinate[finite], scale="normal") 

1822 linearizer.fitResidualsSigmaMad[ampName] = sigmad 

1823 

1824 # Now compute absolute linearization. 

1825 

1826 if temperatureValues is not None: 

1827 temperatureValuesScaled = temperatureValues - np.median(temperatureValues) 

1828 else: 

1829 temperatureValuesScaled = None 

1830 

1831 if self.config.doAbsoluteSplineFitTemporal: 

1832 inputMjdScaled = data["mjd"].copy() 

1833 inputMjdScaled -= np.nanmedian(inputMjdScaled) 

1834 else: 

1835 inputMjdScaled = None 

1836 

1837 absAbscissa = data["abscissa"].copy() 

1838 absOrdinate = data["ref_counts"].copy() 

1839 

1840 # These are guaranteed to be finite (as checked previously). 

1841 absPtcTurnoff = inputPtc.ptcTurnoff[refAmpName] 

1842 absLinearityTurnoff = linearizer.linearityTurnoff[refAmpName] 

1843 

1844 if absPtcTurnoff < self.config.absoluteSplineLowThreshold: 

1845 lowThreshold = 0.0 

1846 else: 

1847 lowThreshold = self.config.absoluteSplineLowThreshold 

1848 

1849 # The mask here must exclude everything beyond the turnoff. 

1850 # Note that we need to do this before we use the actual 

1851 # turnoff to compute the nodes to avoid nodes going past the 

1852 # data domain. 

1853 absMask = postTurnoffMasks[refAmpName] & np.isfinite(absAbscissa) & np.isfinite(absOrdinate) 

1854 

1855 absLinearityTurnoff = min(absLinearityTurnoff, np.max(absOrdinate[absMask])) 

1856 

1857 absNodes = _noderator( 

1858 lowThreshold, 

1859 absPtcTurnoff, 

1860 absLinearityTurnoff, 

1861 self.config.absoluteSplineMinimumSignalNode, 

1862 self.config.absoluteSplineLowNodeSize, 

1863 # The medium and high are matched for absolute spline. 

1864 self.config.absoluteSplineNodeSize, 

1865 self.config.absoluteSplineNodeSize, 

1866 ) 

1867 

1868 self.log.info("Absolute linearity for using %d nodes.", len(absNodes)) 

1869 

1870 # We store the absolute residuals with the reference amplifier. 

1871 linearizer.linearityType[refAmpName] = "DoubleSpline" 

1872 linearizer.inputMask[refAmpName] = absMask.copy() 

1873 linearizer.inputAbscissa[refAmpName] = absAbscissa.copy() 

1874 linearizer.inputOrdinate[refAmpName] = absOrdinate.copy() 

1875 linearizer.inputGroupingIndex[refAmpName] = data["grouping"].copy() 

1876 linearizer.inputNormalization[refAmpName] = inputNorm.copy() 

1877 

1878 fitter = AstierSplineLinearityFitter( 

1879 absNodes, 

1880 data["grouping"].copy(), 

1881 absAbscissa, 

1882 absOrdinate, 

1883 mask=absMask, 

1884 log=self.log, 

1885 fit_offset=self.config.doAbsoluteSplineFitOffset, 

1886 fit_weights=self.config.doAbsoluteSplineFitWeights, 

1887 weight_pars_start=self.config.absoluteSplineFitWeightParsStart, 

1888 fit_temperature=self.config.doAbsoluteSplineFitTemperature, 

1889 temperature_scaled=temperatureValuesScaled, 

1890 max_signal_nearly_linear=absPtcTurnoff, 

1891 fit_temporal=self.config.doAbsoluteSplineFitTemporal, 

1892 mjd_scaled=inputMjdScaled, 

1893 ) 

1894 p0 = fitter.estimate_p0() 

1895 pars = fitter.fit( 

1896 p0, 

1897 min_iter=self.config.absoluteSplineFitMinIter, 

1898 max_iter=self.config.absoluteSplineFitMaxIter, 

1899 max_rejection_per_iteration=self.config.absoluteSplineFitMaxRejectionPerIteration, 

1900 n_sigma_clip=self.config.absoluteNSigmaClipLinear, 

1901 ) 

1902 

1903 # Confirm that the first parameter is 0, and set it to 

1904 # exactly zero. 

1905 absValues = pars[fitter.par_indices["values"]] 

1906 if not np.isclose(absValues[0], 0): 

1907 raise RuntimeError("Programmer error! First spline parameter must " 

1908 "be consistent with zero.") 

1909 absValues[0] = 0.0 

1910 

1911 # We need a place to store this. 

1912 linearizer.fitChiSq[refAmpName] = fitter.compute_chisq_dof(pars) 

1913 

1914 absLinearFit = np.array([0.0, np.mean(pars[fitter.par_indices["groups"]])]) 

1915 

1916 # We must modify the inputOrdinate according to the 

1917 # nuisance terms in the linearity fit for the residual 

1918 # computation code to work properly. 

1919 # The true mu (inputOrdinate) is given by 

1920 # mu = mu_in * (1 + alpha*t_scale) * (1 + beta*mjd_scale) 

1921 if self.config.doAbsoluteSplineFitTemperature: 

1922 absOrdinate *= (1.0 

1923 + pars[fitter.par_indices["temperature_coeff"]]*temperatureValuesScaled) 

1924 if self.config.doAbsoluteSplineFitTemporal: 

1925 absOrdinate *= (1.0 

1926 + pars[fitter.par_indices["temporal_coeff"]]*inputMjdScaled) 

1927 

1928 # Adjust the abscissa for different groups for residuals. 

1929 for j, groupIndex in enumerate(fitter.group_indices): 

1930 absAbscissa[groupIndex] *= (pars[fitter.par_indices["groups"][j]] / absLinearFit[1]) 

1931 # And remove the offset term. 

1932 if self.config.doAbsoluteSplineFitOffset: 

1933 absOrdinate -= pars[fitter.par_indices["offset"]] 

1934 

1935 absMask = fitter.mask 

1936 

1937 # Compute residuals. 

1938 spl = Akima1DInterpolator(absNodes, absValues, method="akima") 

1939 absOffset = spl(np.clip(absOrdinate, absNodes[0], absNodes[-1])) 

1940 absModel = absOrdinate - absOffset 

1941 

1942 if absMask.sum() < 2: 

1943 self.log.warning("Detector %s has not enough points in linear ordinate " 

1944 "for residuals. Skipping!", detector.getName()) 

1945 # We have to KICK OUT HERE something is VERY wrong. 

1946 absResiduals = np.full_like(absModel, np.nan) 

1947 absResidualsUnmasked = relResiduals.copy() 

1948 else: 

1949 postLinearFit, _, _, _ = irlsFit( 

1950 absLinearFit, 

1951 absAbscissa[absMask], 

1952 absModel[absMask], 

1953 funcPolynomial, 

1954 ) 

1955 # When computing residuals, we only care about the slope of 

1956 # the postLinearFit and not the intercept. The intercept 

1957 # itself depends on a possibly unknown zero in the abscissa 

1958 # (often photodiode) which may have an arbitrary value. 

1959 absResiduals = absModel - (postLinearFit[1] * absAbscissa) 

1960 absResidualsUnmasked = absResiduals.copy() 

1961 # We set masked residuals to nan. 

1962 absResiduals[~absMask] = np.nan 

1963 

1964 linearizer.fitResidualsUnmasked[refAmpName] = absResidualsUnmasked 

1965 linearizer.fitResiduals[refAmpName] = absResiduals 

1966 linearizer.fitResidualsModel[refAmpName] = absModel.copy() 

1967 

1968 finite = np.isfinite(absResiduals) 

1969 if finite.sum() == 0: 

1970 sigmad = np.nan 

1971 else: 

1972 sigmad = median_abs_deviation(absResiduals[finite]/absOrdinate[finite], scale="normal") 

1973 linearizer.fitResidualsSigmaMad[refAmpName] = sigmad 

1974 

1975 # Record the absolute nodes and values in each individual amplifier, 

1976 # along with extra padding for alignment. 

1977 nAbsNodes = len(absNodes) 

1978 for i, amp in enumerate(detector): 

1979 ampName = amp.getName() 

1980 

1981 coeffs = np.zeros(2 * nAbsNodes + 2 * maxRelNodes + 2) 

1982 if ampName == refAmpName: 

1983 # The reference amplifier only has the absolute spline. 

1984 coeffs[1] = nAbsNodes 

1985 coeffs[2: 2 + 2 * nAbsNodes] = np.concatenate([absNodes, absValues]) 

1986 else: 

1987 nRelNodes = int(linearizer.linearityCoeffs[ampName][0]) 

1988 

1989 coeffs = np.zeros(2 * nAbsNodes + 2 * maxRelNodes + 2) 

1990 coeffs[0] = nRelNodes 

1991 coeffs[1] = nAbsNodes 

1992 relStart = 2 

1993 relEnd = relStart + 2 * nRelNodes 

1994 coeffs[relStart: relEnd] = linearizer.linearityCoeffs[ampName][relStart: relEnd] 

1995 absStart = relEnd 

1996 absEnd = absStart + 2 * nAbsNodes 

1997 coeffs[absStart: absEnd] = np.concatenate([absNodes, absValues]) 

1998 

1999 linearizer.linearityCoeffs[ampName] = coeffs 

2000 

2001 linearizer.hasLinearity = True 

2002 linearizer.validate() 

2003 linearizer.updateMetadata(camera=camera, detector=detector, filterName='NONE') 

2004 linearizer.updateMetadata(setDate=True, setCalibId=True) 

2005 linearizer.updateMetadataFromExposures([inputPtc]) 

2006 provenance = IsrProvenance(calibType='linearizer') 

2007 

2008 return pipeBase.Struct( 

2009 outputLinearizer=linearizer, 

2010 outputProvenance=provenance, 

2011 ) 

2012 

2013 

2014def _determineInputGroups( 

2015 ptc, 

2016 doAutoGrouping, 

2017 autoGroupingUseExptime, 

2018 autoGroupingMaxSignalFraction, 

2019 autoGroupingThreshold, 

2020 groupingColumn, 

2021 minPhotodiodeCurrent, 

2022): 

2023 """Determine input groups for linearity fit. 

2024 

2025 If ``splineGroupingColumn`` is set, then grouping will be done 

2026 based on this. Otherwise, if ``doAutoGrouping`` is False, then 

2027 no grouping will be done. Finally, grouping will be done by measuring 

2028 the ratio of signal to exposure time (if 

2029 ``autoGroupingUseExptime`` is set; recommended) or photocharge. 

2030 These are then clustered with a simple algorithm to split into groups. 

2031 If the data was taking by varying exposure time at different 

2032 illumination levels, this grouping is very robust as the clusters are 

2033 very well separated. 

2034 

2035 Parameters 

2036 ---------- 

2037 ptc : `lsst.ip.isr.PhotonTransferCurveDataset` 

2038 Input PTC to do grouping. 

2039 doAutoGrouping : `bool` 

2040 Do automatic grouping of pairs? 

2041 autoGroupingUseExptime : `bool` 

2042 Use exposure time for automatic grouping of pairs? 

2043 autoGroupingMaxSignalFraction : `float` 

2044 All exposures with signal higher than this threshold will 

2045 be put into the largest signal group. 

2046 autoGroupingThreshold : `float` 

2047 Minimum relative jump from sorted values to determine a group. 

2048 minPhotodiodeCurrent : `float` 

2049 Minimum photodiode current if auto-grouping is used and 

2050 autoGroupingUseExptime is False. 

2051 splineGroupingColumn : `str` or `None` 

2052 Column to be used for spline grouping (if doAutoGrouping is False). 

2053 

2054 Returns 

2055 ------- 

2056 groupingValues : `np.ndarray` 

2057 Array of values that are unique for a given group. 

2058 """ 

2059 nPair = np.asarray(ptc.inputExpIdPairs[ptc.ampNames[0]]).shape[0] 

2060 groupingValues = np.zeros(nPair, dtype=np.int64) 

2061 

2062 if not doAutoGrouping: 

2063 if groupingColumn is not None: 

2064 if groupingColumn not in ptc.auxValues: 

2065 raise ValueError(f"Config requests grouping by {groupingColumn}, " 

2066 "but this column is not available in ptc.auxValues.") 

2067 

2068 uGroupValues = np.unique(ptc.auxValues[groupingColumn]) 

2069 for i, uGroupValue in enumerate(uGroupValues): 

2070 groupingValues[ptc.auxValues[groupingColumn] == uGroupValue] = i 

2071 else: 

2072 means = np.zeros((nPair, len(ptc.ampNames))) 

2073 exptimes = np.zeros_like(means) 

2074 for i, ampName in enumerate(ptc.ampNames): 

2075 means[:, i] = ptc.rawMeans[ampName] * ptc.gain[ampName] 

2076 exptimes[:, i] = ptc.rawExpTimes[ampName] 

2077 detMeans = np.nanmean(means, axis=1) 

2078 detExptimes = np.nanmean(exptimes, axis=1) 

2079 

2080 if autoGroupingUseExptime: 

2081 abscissa = detExptimes 

2082 else: 

2083 abscissa = ptc.photoCharges[ptc.ampNames[0]].copy() 

2084 # Set illegal photocharges to NaN. 

2085 abscissa[abscissa < minPhotodiodeCurrent] = np.nan 

2086 

2087 ratio = detMeans / abscissa 

2088 ratio /= np.nanmedian(ratio) 

2089 

2090 # Adjust those that are above threshold so they fall into the 

2091 # largest group. 

2092 above = (detMeans > autoGroupingMaxSignalFraction*np.nanmax(detMeans)) 

2093 maxIndex = np.argmax(detMeans[~above]) 

2094 ratio[above] = ratio[maxIndex] 

2095 

2096 # The clustering of ratios into groups is performed with a simple 

2097 # algorithm based on sorting and looking for the largest gaps. 

2098 # See https://stackoverflow.com/a/18385795 

2099 st = np.argsort(ratio) 

2100 stratio = ratio[st] 

2101 delta = stratio[1:] - stratio[0: -1] 

2102 

2103 transitions, = np.where(delta > autoGroupingThreshold) 

2104 # If there are no transitions then everything ends up in group 0. 

2105 if len(transitions) > 0: 

2106 ratioCuts = stratio[transitions] + autoGroupingThreshold/2. 

2107 

2108 for i in range(len(transitions) + 1): 

2109 if i == 0: 

2110 inGroup, = np.where(ratio < ratioCuts[i]) 

2111 elif i == len(transitions): 

2112 inGroup, = np.where(ratio > ratioCuts[i - 1]) 

2113 else: 

2114 inGroup, = np.where((ratio > ratioCuts[i - 1]) & (ratio < ratioCuts[i])) 

2115 groupingValues[inGroup] = i 

2116 

2117 # Ensure out-of-range photoCharges/exptimes are in their own group. 

2118 # These are masked later on. 

2119 groupingValues[~np.isfinite(abscissa)] = -1 

2120 # And put the high ones in the max group. 

2121 groupingValues[above] = groupingValues[maxIndex] 

2122 

2123 return groupingValues 

2124 

2125 

2126def _computeTurnoffAndMax( 

2127 abscissa, 

2128 ordinate, 

2129 initialMask, 

2130 groupingValues, 

2131 ampName="UNKNOWN", 

2132 minSignalFitLinearityTurnoff=1000.0, 

2133 maxFracLinearityDeviation=0.01, 

2134 log=None, 

2135): 

2136 """Compute the turnoff and max signal. 

2137 

2138 Parameters 

2139 ---------- 

2140 abscissa : `np.ndarray` 

2141 Input x values, either photoCharges or exposure times. 

2142 These should be cleaned of any non-finite values. 

2143 ordinate : `np.ndarray` 

2144 Input y values, the raw mean values for the amp. 

2145 These should be cleaned of any non-finite values. 

2146 initialMask : `np.ndarray` 

2147 Mask to use for initial fit (usually from PTC). 

2148 groupingValues : `np.ndarray` 

2149 Array of values that are used to group different fits. 

2150 ampName : `str`, optional 

2151 Amplifier name (used for logging). 

2152 minSignalFitLinearityTurnoff : `float`, optional 

2153 Minimum signal to cmpute raw linearity slope for linearityTurnoff. 

2154 maxFracLinearityDeviation : `float`, optional 

2155 Maximum fraction deviation from raw linearity to compute turnoff. 

2156 log : `logging.Logger`, optional 

2157 Log object. 

2158 

2159 Returns 

2160 ------- 

2161 turnoffIndex : `int` 

2162 Fit turnoff index (keyed to raw input). 

2163 turnoff : `float` 

2164 Fit turnoff value. 

2165 maxSignal : `float` 

2166 Fit maximum signal value. 

2167 goodPoints : `np.ndarray` 

2168 Mask of good points used in turnoff fit. 

2169 """ 

2170 if log is None: 

2171 log = logging.getLogger(__name__) 

2172 

2173 # Follow eo_pipe: 

2174 # https://github.com/lsst-camera-dh/eo_pipe/blob/6afa546569f622b8d604921e248200481c445730/python/lsst/eo/pipe/linearityPlotsTask.py#L50 

2175 # Replacing flux with abscissa, Ne with ordinate. 

2176 

2177 # Fit a line with the y-intercept fixed to zero, using the 

2178 # signal counts Ne as the variance in the chi-square, i.e., 

2179 # chi2 = sum( (ordinate - aa*abscissa)**2/ordinate ) 

2180 # Minimizing chi2 wrt aa, gives 

2181 # aa = sum(abscissa) / sum(abscissa**2/ordinate) 

2182 

2183 fitMask = initialMask.copy() 

2184 fitMask[ordinate < minSignalFitLinearityTurnoff] = False 

2185 goodPoints = fitMask.copy() 

2186 

2187 gValues = np.unique(groupingValues) 

2188 groupIndicesList = [] 

2189 for gValue in gValues: 

2190 use, = np.where(groupingValues == gValue) 

2191 groupIndicesList.append(use) 

2192 

2193 found = False 

2194 while (fitMask.sum() >= 4) and not found: 

2195 residuals = np.zeros_like(ordinate) 

2196 

2197 abscissaMasked = abscissa.copy() 

2198 abscissaMasked[~fitMask] = np.nan 

2199 ordinateMasked = ordinate.copy() 

2200 ordinateMasked[~fitMask] = np.nan 

2201 

2202 for groupIndices in groupIndicesList: 

2203 num = np.nansum(abscissaMasked[groupIndices]) 

2204 denom = np.nansum(abscissaMasked[groupIndices]**2./ordinateMasked[groupIndices]) 

2205 aa = num / denom 

2206 

2207 residuals[groupIndices] = (ordinate[groupIndices] - aa*abscissa[groupIndices]) / \ 

2208 ordinate[groupIndices] 

2209 

2210 # Use the residuals to compute the turnoff. 

2211 residuals -= np.nanmedian(residuals) 

2212 

2213 goodPoints = np.abs(residuals) < maxFracLinearityDeviation 

2214 

2215 if goodPoints.sum() > 4: 

2216 # This was an adequate fit. 

2217 found = True 

2218 turnoff = np.max(ordinate[goodPoints]) 

2219 turnoffIndex = np.where(np.isclose(ordinate, turnoff))[0][0] 

2220 else: 

2221 # This was a bad fit; remove the largest outlier. 

2222 badIndex = np.argmax(np.abs(residuals)[fitMask]) 

2223 fitIndices, = np.nonzero(fitMask) 

2224 fitMask[fitIndices[badIndex]] = False 

2225 

2226 if not found: 

2227 # Could not find any reasonable value. 

2228 log.warning( 

2229 "Could not find a reasonable initial linear fit to compute linearity turnoff for " 

2230 "amplifier %s; may need finer sampling of input data?", 

2231 ampName, 

2232 ) 

2233 if np.all(~fitMask): 

2234 return -1, np.nan, np.nan, goodPoints 

2235 

2236 turnoff = np.max(ordinate[fitMask]) 

2237 turnoffIndex = np.where(np.isclose(ordinate, turnoff))[0][0] 

2238 

2239 residuals = np.zeros(len(ordinate)) 

2240 

2241 # Fit the maximum signal. 

2242 if turnoffIndex == (len(residuals) - 1): 

2243 # This is the last point; we can't do a fit. 

2244 # This is not a warning because we do not actually need this 

2245 # value in practice. 

2246 log.info( 

2247 "No linearity turnoff detected for amplifier %s; try to increase the signal range.", 

2248 ampName, 

2249 ) 

2250 maxSignal = ordinate[turnoffIndex] 

2251 else: 

2252 maxSignalInitial = np.nanmax(ordinate) 

2253 

2254 highFluxPoints = (np.nan_to_num(ordinate) 

2255 > (1.0 - maxFracLinearityDeviation)*maxSignalInitial) 

2256 maxSignal = np.median(ordinate[highFluxPoints]) 

2257 

2258 return turnoffIndex, turnoff, maxSignal, goodPoints 

2259 

2260 

2261def _noderator(turnoff0, turnoff1, turnoff2, minNode, lowNodeSize, midNodeSize, highNodeSize): 

2262 """The "noderator" node-finder. 

2263 

2264 Parameters 

2265 ---------- 

2266 turnoff0 : `float` 

2267 Zeroth turnoff value (e.g. expectation of low-level 

2268 non-linearity threshold) (adu). 

2269 turnoff1 : `float` 

2270 First turnoff value (e.g. ptc turnoff) (adu). 

2271 turnoff2 : `float` 

2272 Second turnoff value (e.g. linearity turnoff) (adu). 

2273 minNode : `float` 

2274 Location to place the first node after 0.0 (if this is <= 0.0 

2275 it will be ignored) (adu). 

2276 lowNodeSize : `float` 

2277 Minimum node size in the low-level non-linearity regime 

2278 (below turnoff0) (adu). 

2279 midNodeSize : `float` 

2280 Minimum node size in the mid-level non-linearity regime 

2281 (between turnoff0 and turnoff1) (adu). 

2282 highNodeSize : `float` 

2283 Minimum node size in the high-level non-linearity regime 

2284 (between turnoff1 and turnoff2) (adu). 

2285 

2286 Returns 

2287 ------- 

2288 nodes : `np.ndarray` 

2289 Array of node values (adu). 

2290 """ 

2291 if turnoff0 > minNode: 

2292 # At least 2 nodes (edges) in the low signal regime. 

2293 nNodesLow = np.clip(int(np.ceil((turnoff0 - minNode) / lowNodeSize)), 2, None) 

2294 midStart = turnoff0 

2295 else: 

2296 nNodesLow = 0 

2297 midStart = 0.0 

2298 # At least 5 nodes (akima minimum) in the mid signal regime. 

2299 nNodesMid = np.clip(int(np.ceil((turnoff1 - midStart) / midNodeSize)), 5, None) 

2300 if turnoff2 > turnoff1: 

2301 # At least 2 nodes (edges) in the high signal regime. 

2302 nNodesHigh = np.clip(int(np.ceil((turnoff2 - turnoff1) / highNodeSize)), 2, None) 

2303 else: 

2304 nNodesHigh = 0 

2305 nodesLow = np.linspace(minNode, turnoff0, nNodesLow) 

2306 nodesMid = np.linspace(midStart, turnoff1, nNodesMid) 

2307 nodesHigh = np.linspace(turnoff1, turnoff2, nNodesHigh) 

2308 

2309 # Make sure we do not duplicate nodes when concatenating. 

2310 nodeList = [] 

2311 if nNodesLow > 1: 

2312 nodeList.append([0.0]) 

2313 nodeList.append(nodesLow[:-1]) 

2314 if nNodesMid > 1: 

2315 nodeList.append(nodesMid) 

2316 if nNodesHigh > 1: 

2317 nodeList.append(nodesHigh[1:]) 

2318 return np.concatenate(nodeList)