Coverage for python / lsst / summit / utils / utils.py: 17%
621 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-18 09:28 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-18 09:28 +0000
1# This file is part of summit_utils.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21from __future__ import annotations
23import datetime
24import logging
25import os
26from collections.abc import Iterable, Sequence
27from dataclasses import dataclass
28from typing import TYPE_CHECKING, Any
30import astropy.units as u
31import matplotlib
32import numpy as np
33import numpy.typing as npt
34import statsmodels.api as sm # type: ignore[import-untyped]
35from astro_metadata_translator import ObservationInfo
36from astropy.coordinates import AltAz, EarthLocation, SkyCoord, get_sun
37from astropy.stats import mad_std
38from astropy.time import Time
39from deprecated.sphinx import deprecated
40from matplotlib.patches import Rectangle
41from scipy.ndimage import gaussian_filter
42from sklearn.linear_model import LinearRegression, RANSACRegressor # type: ignore[import-untyped]
44import lsst.afw.detection as afwDetect
45import lsst.afw.detection as afwDetection
46import lsst.afw.image as afwImage
47import lsst.afw.math as afwMath
48import lsst.daf.base as dafBase
49import lsst.daf.butler as dafButler
50import lsst.geom as geom
51import lsst.obs.lsst.translators
52import lsst.pipe.base as pipeBase
53import lsst.utils.packages as packageUtils
54from lsst.afw.coord import Weather
55from lsst.afw.detection import Footprint, FootprintSet
56from lsst.daf.butler.cli.cliLog import CliLog
57from lsst.obs.lsst import Latiss, LsstCam, LsstComCam, LsstComCamSim
58from lsst.obs.lsst.translators.latiss import AUXTEL_LOCATION
59from lsst.obs.lsst.translators.lsst import FILTER_DELIMITER, SIMONYI_LOCATION
61from . import dateTime as newLocation
62from .astrometry.utils import genericCameraHeaderToWcs
64if TYPE_CHECKING:
65 from lsst.afw.cameraGeom import Camera
67__all__ = [
68 "SIGMATOFWHM",
69 "FWHMTOSIGMA",
70 "EFD_CLIENT_MISSING_MSG",
71 "GOOGLE_CLOUD_MISSING_MSG",
72 "AUXTEL_LOCATION",
73 "countPixels",
74 "quickSmooth",
75 "argMax2d",
76 "dayObsIntToString", # deprecated
77 "dayObsSeqNumToVisitId",
78 "getImageStats",
79 "detectObjectsInExp",
80 "fluxesFromFootprints",
81 "fluxFromFootprint",
82 "humanNameForCelestialObject",
83 "getFocusFromHeader",
84 "checkStackSetup",
85 "setupLogging",
86 "getCurrentDayObs_datetime", # deprecated
87 "getCurrentDayObs_int", # deprecated
88 "getCurrentDayObs_humanStr", # deprecated
89 "getExpRecordAge", # deprecated
90 "getSite",
91 "getAltAzFromSkyPosition",
92 "getExpPositionOffset",
93 "starTrackerFileToExposure",
94 "obsInfoToDict",
95 "getFieldNameAndTileNumber",
96 "getAirmassSeeingCorrection",
97 "getFilterSeeingCorrection", # deprecated
98 "getBandpassSeeingCorrection",
99 "getCdf",
100 "getQuantiles",
101 "digitizeData",
102 "getBboxAround",
103 "bboxToMatplotlibRectanle",
104 "computeExposureId",
105 "computeCcdExposureId",
106 "getDetectorIds",
107 "getImageArray",
108 "getSunAngle",
109 "RobustFitter",
110]
113SIGMATOFWHM = 2.0 * np.sqrt(2.0 * np.log(2.0))
114FWHMTOSIGMA = 1 / SIGMATOFWHM
116EFD_CLIENT_MISSING_MSG = (
117 "ImportError: lsst_efd_client not found. Please install with:\n" " pip install lsst-efd-client"
118)
120GOOGLE_CLOUD_MISSING_MSG = (
121 "ImportError: Google cloud storage not found. Please install with:\n"
122 " pip install google-cloud-storage"
123)
126def countPixels(maskedImage: afwImage.MaskedImage, maskPlane: str) -> int:
127 """Count the number of pixels in an image with a given mask bit set.
129 Parameters
130 ----------
131 maskedImage : `lsst.afw.image.MaskedImage`
132 The masked image,
133 maskPlane : `str`
134 The name of the bitmask.
136 Returns
137 -------
138 count : `int``
139 The number of pixels in with the selected mask bit
140 """
141 bit = maskedImage.mask.getPlaneBitMask(maskPlane)
142 return len(np.where(np.bitwise_and(maskedImage.mask.array, bit))[0])
145def quickSmooth(data: npt.NDArray[np.float64], sigma: float = 2) -> npt.NDArray[np.float64]:
146 """Perform a quick smoothing of the image.
148 Not to be used for scientific purposes, but improves the stretch and
149 visual rendering of low SNR against the sky background in cutouts.
151 Parameters
152 ----------
153 data : `np.array`
154 The image data to smooth
155 sigma : `float`, optional
156 The size of the smoothing kernel.
158 Returns
159 -------
160 smoothData : `np.array`
161 The smoothed data
162 """
163 kernel = [sigma, sigma]
164 smoothData = gaussian_filter(data, kernel, mode="constant")
165 return smoothData
168def argMax2d(array: npt.NDArray[np.float64]) -> tuple[tuple[float, float], bool, list[tuple[float, float]]]:
169 """Get the index of the max value of an array and whether it's unique.
171 If its not unique, returns a list of the other locations containing the
172 maximum value, e.g. returns
174 (12, 34), False, [(56,78), (910, 1112)]
176 Parameters
177 ----------
178 array : `np.array`
179 The data
181 Returns
182 -------
183 maxLocation : `tuple`
184 The coords of the first instance of the max value
185 unique : `bool`
186 Whether it's the only location
187 otherLocations : `list` of `tuple`
188 List of the other max values' locations, empty if False
189 """
190 uniqueMaximum = False
191 maxCoords = np.where(array == np.max(array))
192 # list of coords as tuples
193 listMaxCoords = [(float(coord[0]), float(coord[1])) for coord in zip(*maxCoords)]
194 if len(listMaxCoords) == 1: # single unambiguous value
195 uniqueMaximum = True
197 return listMaxCoords[0], uniqueMaximum, listMaxCoords[1:]
200@deprecated(
201 reason="dayObsIntToString() has moved to lsst.summit.utils.dateTime. The function is unchanged, "
202 "but you should change to import from there. This function will be removed after w_2026_01.",
203 version="w_2026_01",
204 category=FutureWarning,
205)
206def dayObsIntToString(dayObs: int) -> str:
207 """Convert an integer dayObs to a dash-delimited string.
209 e.g. convert the hard to read 20210101 to 2021-01-01
211 Parameters
212 ----------
213 dayObs : `int`
214 The dayObs.
216 Returns
217 -------
218 dayObs : `str`
219 The dayObs as a string.
220 """
221 return newLocation.dayObsIntToString(dayObs)
224def dayObsSeqNumToVisitId(dayObs: int, seqNum: int) -> int:
225 """Get the visit id for a given dayObs/seqNum.
227 Parameters
228 ----------
229 dayObs : `int`
230 The dayObs.
231 seqNum : `int`
232 The seqNum.
234 Returns
235 -------
236 visitId : `int`
237 The visitId.
239 Notes
240 -----
241 TODO: Remove this horrible hack once DM-30948 makes this possible
242 programatically/via the butler.
243 """
244 if dayObs < 19700101 or dayObs > 35000101:
245 raise ValueError(f"dayObs value {dayObs} outside plausible range")
246 return int(f"{dayObs}{seqNum:05}")
249def getImageStats(exp: afwImage.Exposure) -> pipeBase.Struct:
250 """Calculate a grab-bag of stats for an image. Must remain fast.
252 Parameters
253 ----------
254 exp : `lsst.afw.image.Exposure`
255 The input exposure.
257 Returns
258 -------
259 stats : `lsst.pipe.base.Struct`
260 A container with attributes containing measurements and statistics
261 for the image.
262 """
263 result = pipeBase.Struct()
265 vi = exp.visitInfo
266 expTime = vi.exposureTime
267 md = exp.getMetadata()
269 obj = vi.object
270 mjd = vi.getDate().get()
271 result.object = obj
272 result.mjd = mjd
274 fullFilterString = exp.filter.physicalLabel
275 filt = fullFilterString.split(FILTER_DELIMITER)[0]
276 grating = fullFilterString.split(FILTER_DELIMITER)[1]
278 airmass = vi.getBoresightAirmass()
279 rotangle = vi.getBoresightRotAngle().asDegrees()
281 azAlt = vi.getBoresightAzAlt()
282 az = azAlt[0].asDegrees()
283 el = azAlt[1].asDegrees()
285 result.expTime = expTime
286 result.filter = filt
287 result.grating = grating
288 result.airmass = airmass
289 result.rotangle = rotangle
290 result.az = az
291 result.el = el
292 result.focus = md.get("FOCUSZ")
294 data = exp.image.array
295 result.maxValue = np.max(data)
297 peak, uniquePeak, otherPeaks = argMax2d(data)
298 result.maxPixelLocation = peak
299 result.multipleMaxPixels = uniquePeak
301 result.nBadPixels = countPixels(exp.maskedImage, "BAD")
302 result.nSatPixels = countPixels(exp.maskedImage, "SAT")
303 result.percentile99 = np.percentile(data, 99)
304 result.percentile9999 = np.percentile(data, 99.99)
306 sctrl = afwMath.StatisticsControl()
307 sctrl.setNumSigmaClip(5)
308 sctrl.setNumIter(2)
309 statTypes = afwMath.MEANCLIP | afwMath.STDEVCLIP
310 stats = afwMath.makeStatistics(exp.maskedImage, statTypes, sctrl)
311 std, stderr = stats.getResult(afwMath.STDEVCLIP)
312 mean, meanerr = stats.getResult(afwMath.MEANCLIP)
314 result.clippedMean = mean
315 result.clippedStddev = std
317 return result
320def detectObjectsInExp(
321 exp: afwImage.Exposure, nSigma: float = 10, nPixMin: int = 10, grow: int = 0
322) -> afwDetect.FootprintSet:
323 """Quick and dirty object detection for an exposure.
325 Return the footPrintSet for the objects in a preferably-postISR exposure.
327 Parameters
328 ----------
329 exp : `lsst.afw.image.Exposure`
330 The exposure to detect objects in.
331 nSigma : `float`
332 The number of sigma for detection.
333 nPixMin : `int`
334 The minimum number of pixels in an object for detection.
335 grow : `int`
336 The number of pixels to grow the footprint by after detection.
338 Returns
339 -------
340 footPrintSet : `lsst.afw.detection.FootprintSet`
341 The set of footprints in the image.
342 """
343 median = np.nanmedian(exp.image.array)
344 exp.image -= median
346 threshold = afwDetect.Threshold(nSigma, afwDetect.Threshold.STDEV)
347 footPrintSet = afwDetect.FootprintSet(exp.getMaskedImage(), threshold, "DETECTED", nPixMin)
348 if grow > 0:
349 isotropic = True
350 footPrintSet = afwDetect.FootprintSet(footPrintSet, grow, isotropic)
352 exp.image += median # add back in to leave background unchanged
353 return footPrintSet
356def fluxesFromFootprints(
357 footprints: afwDetect.FootprintSet | afwDetect.Footprint | Iterable[afwDetect.Footprint],
358 parentImage: afwImage.Image,
359 subtractImageMedian: bool = False,
360) -> npt.NDArray[np.float64]:
361 """Calculate the flux from a set of footprints, given the parent image,
362 optionally subtracting the whole-image median from each pixel as a very
363 rough background subtraction.
365 Parameters
366 ----------
367 footprints : `lsst.afw.detection.FootprintSet` or
368 `lsst.afw.detection.Footprint` or
369 `iterable` of `lsst.afw.detection.Footprint`
370 The footprints to measure.
371 parentImage : `lsst.afw.image.Image`
372 The parent image.
373 subtractImageMedian : `bool`, optional
374 Subtract a whole-image median from each pixel in the footprint when
375 summing as a very crude background subtraction. Does not change the
376 original image.
378 Returns
379 -------
380 fluxes : `list` of `float`
381 The fluxes for each footprint.
383 Raises
384 ------
385 TypeError : raise for unsupported types.
386 """
387 median = 0
388 if subtractImageMedian:
389 median = np.nanmedian(parentImage.array)
391 # poor person's single dispatch
392 badTypeMsg = (
393 "This function works with FootprintSets, single Footprints, and iterables of Footprints. "
394 f"Got {type(footprints)}: {footprints}"
395 )
396 if isinstance(footprints, FootprintSet):
397 footprints = footprints.getFootprints()
398 elif isinstance(footprints, Sequence):
399 if not isinstance(footprints[0], Footprint):
400 raise TypeError(badTypeMsg)
401 elif isinstance(footprints, Footprint):
402 footprints = [footprints]
403 else:
404 raise TypeError(badTypeMsg)
406 return np.array([fluxFromFootprint(fp, parentImage, backgroundValue=median) for fp in footprints])
409def fluxFromFootprint(
410 footprint: afwDetection.Footprint, parentImage: afwImage.Image, backgroundValue: float = 0
411) -> float:
412 """Calculate the flux from a footprint, given the parent image, optionally
413 subtracting a single value from each pixel as a very rough background
414 subtraction, e.g. the image median.
416 Parameters
417 ----------
418 footprint : `lsst.afw.detection.Footprint`
419 The footprint to measure.
420 parentImage : `lsst.afw.image.Image`
421 Image containing the footprint.
422 backgroundValue : `bool`, optional
423 The value to subtract from each pixel in the footprint when summing
424 as a very crude background subtraction. Does not change the original
425 image.
427 Returns
428 -------
429 flux : `float`
430 The flux in the footprint
431 """
432 if backgroundValue: # only do the subtraction if non-zero for speed
433 xy0 = parentImage.getBBox().getMin()
434 return footprint.computeFluxFromArray(parentImage.array - backgroundValue, xy0)
435 return footprint.computeFluxFromImage(parentImage)
438def humanNameForCelestialObject(objName: str) -> list[str]:
439 """Returns a list of all human names for obj, or [] if none are found.
441 Parameters
442 ----------
443 objName : `str`
444 The/a name of the object.
446 Returns
447 -------
448 names : `list` of `str`
449 The names found for the object
450 """
451 from astroquery.simbad import Simbad
453 results = []
454 try:
455 simbadResult = Simbad.query_objectids(objName)
456 for row in simbadResult:
457 if row["ID"].startswith("NAME"):
458 results.append(row["ID"].replace("NAME ", ""))
459 return results
460 except Exception:
461 return [] # same behavior as for found but un-named objects
464def _getAltAzZenithsFromSeqNum(
465 butler: dafButler.Butler, dayObs: int, seqNumList: list[int]
466) -> tuple[list[float], list[float], list[float]]:
467 """Get the alt, az and zenith angle for the seqNums of a given dayObs.
469 Parameters
470 ----------
471 butler : `lsst.daf.butler.Butler`
472 The butler to query.
473 dayObs : `int`
474 The dayObs.
475 seqNumList : `list` of `int`
476 The seqNums for which to return the alt, az and zenith
478 Returns
479 -------
480 azimuths : `list` of `float`
481 List of the azimuths for each seqNum
482 elevations : `list` of `float`
483 List of the elevations for each seqNum
484 zeniths : `list` of `float`
485 List of the zenith angles for each seqNum
486 """
487 azimuths, elevations, zeniths = [], [], []
488 for seqNum in seqNumList:
489 md = butler.get("raw.metadata", day_obs=dayObs, seq_num=seqNum, detector=0)
490 obsInfo = ObservationInfo(md)
491 assert obsInfo.altaz_begin is not None, f"altaz_begin is None for seqNum={seqNum}"
492 alt = obsInfo.altaz_begin.alt.value
493 az = obsInfo.altaz_begin.az.value
494 elevations.append(alt)
495 zeniths.append(90 - alt)
496 azimuths.append(az)
497 return azimuths, elevations, zeniths
500def getFocusFromHeader(exp: afwImage.Exposure) -> float | None:
501 """Get the raw focus value from the header.
503 Parameters
504 ----------
505 exp : `lsst.afw.image.exposure`
506 The exposure.
508 Returns
509 -------
510 focus : `float` or `None`
511 The focus value if found, else ``None``.
512 """
513 md = exp.getMetadata()
514 if "FOCUSZ" in md:
515 return md["FOCUSZ"]
516 return None
519def checkStackSetup() -> None:
520 """Check which weekly tag is being used and which local packages are setup.
522 Designed primarily for use in notbooks/observing, this prints the weekly
523 tag(s) are setup for lsst_distrib, and lists any locally setup packages and
524 the path to each.
526 Notes
527 -----
528 Uses print() instead of logger messages as this should simply print them
529 without being vulnerable to any log messages potentially being diverted.
530 """
531 packages = packageUtils.getEnvironmentPackages(include_all=True)
533 lsstDistribHashAndTags = packages["lsst_distrib"] # looks something like 'g4eae7cb9+1418867f (w_2022_13)'
534 lsstDistribTags = lsstDistribHashAndTags.split()[1]
535 if len(lsstDistribTags.split()) == 1:
536 tag = lsstDistribTags.replace("(", "")
537 tag = tag.replace(")", "")
538 print(f"You are running {tag} of lsst_distrib")
539 else: # multiple weekly tags found for lsst_distrib!
540 print(f"The version of lsst_distrib you have is compatible with: {lsstDistribTags}")
542 localPackages = []
543 localPaths = []
544 for package, tags in packages.items():
545 if tags.startswith("LOCAL:"):
546 path = tags.split("LOCAL:")[1]
547 path = path.split("@")[0] # don't need the git SHA etc
548 localPaths.append(path)
549 localPackages.append(package)
551 if localPackages:
552 print("\nLocally setup packages:")
553 print("-----------------------")
554 maxLen = max(len(package) for package in localPackages)
555 for package, path in zip(localPackages, localPaths):
556 print(f"{package:<{maxLen}s} at {path}")
557 else:
558 print("\nNo locally setup packages (using a vanilla stack)")
561def setupLogging(longlog: bool = False) -> None:
562 """Setup logging in the same way as one would get from pipetask run.
564 Code that isn't run through the butler CLI defaults to WARNING level
565 messages and no logger names. This sets the behaviour to follow whatever
566 the pipeline default is, currently
567 <logger_name> <level>: <message> e.g.
568 lsst.isr INFO: Masking defects.
569 """
570 CliLog.initLog(longlog=longlog)
573@deprecated(
574 reason="getCurrentDayObs_datetime() has moved to lsst.summit.utils.dateTime. The function is unchanged,"
575 " but renamed to getCurrentDayObsDatetime and you should change to import from there. This function will"
576 " be removed after w_2026_01.",
577 version="w_2026_01",
578 category=FutureWarning,
579)
580def getCurrentDayObs_datetime() -> datetime.date:
581 """Get the current day_obs - the observatory rolls the date over at UTC-12
583 Returned as datetime.date(2022, 4, 28)
584 """
585 return newLocation.getCurrentDayObsDatetime()
588@deprecated(
589 reason="getCurrentDayObs_int() has moved to lsst.summit.utils.dateTime. The function is unchanged,"
590 " but renamed to getCurrentDayObsInt and you should change to import from there. This function will"
591 " be removed after w_2026_01.",
592 version="w_2026_01",
593 category=FutureWarning,
594)
595def getCurrentDayObs_int() -> int:
596 """Return the current dayObs as an int in the form 20220428"""
597 return newLocation.getCurrentDayObsInt()
600@deprecated(
601 reason="getCurrentDayObs_humanStr() has moved to lsst.summit.utils.dateTime. The function is unchanged,"
602 " but renamed to getCurrentDayObsHumanStr and you should change to import from there. This function will"
603 " be removed after w_2026_01.",
604 version="w_2026_01",
605 category=FutureWarning,
606)
607def getCurrentDayObs_humanStr() -> str:
608 """Return the current dayObs as a string in the form '2022-04-28'"""
609 return newLocation.getCurrentDayObsHumanStr()
612@deprecated(
613 reason="getExpRecordAge() has moved to lsst.summit.utils.dateTime. The function is unchanged, "
614 "but you should change to import from there. This function will be removed after w_2026_01.",
615 version="w_2026_01",
616 category=FutureWarning,
617)
618def getExpRecordAge(expRecord: dafButler.DimensionRecord) -> float:
619 """Get the time, in seconds, since the end of exposure.
621 Parameters
622 ----------
623 expRecord : `lsst.daf.butler.DimensionRecord`
624 The exposure record.
626 Returns
627 -------
628 age : `float`
629 The age of the exposure, in seconds.
630 """
631 return -1 * (expRecord.timespan.end - Time.now()).sec
634def getSite() -> str:
635 """Returns where the code is running.
637 Returns
638 -------
639 location : `str`
640 One of:
641 ['tucson', 'summit', 'base', 'staff-rsp', 'rubin-devl', 'jenkins',
642 'usdf-k8s']
643 If the location cannot be determined "UNKOWN" is returned.
645 """
646 # All nublado instances guarantee that EXTERNAL_URL is set and uniquely
647 # identifies it.
648 location = os.getenv("EXTERNAL_INSTANCE_URL", "")
649 if location == "https://tucson-teststand.lsst.codes": 649 ↛ 650line 649 didn't jump to line 650 because the condition on line 649 was never true
650 return "tucson"
651 elif location == "https://summit-lsp.lsst.codes": 651 ↛ 652line 651 didn't jump to line 652 because the condition on line 651 was never true
652 return "summit"
653 elif location == "https://base-lsp.lsst.codes": 653 ↛ 654line 653 didn't jump to line 654 because the condition on line 653 was never true
654 return "base"
655 elif location == "https://usdf-rsp.slac.stanford.edu": 655 ↛ 656line 655 didn't jump to line 656 because the condition on line 655 was never true
656 return "staff-rsp"
657 elif location == "https://usdf-rsp-dev.slac.stanford.edu": 657 ↛ 658line 657 didn't jump to line 658 because the condition on line 657 was never true
658 return "staff-rsp" # we don't care this is the dev RSP, it's basically the same env wrt paths etc
660 # if no EXTERNAL_URL, try HOSTNAME to see if we're on the dev nodes
661 # it is expected that this will be extensible to SLAC
662 hostname = os.getenv("HOSTNAME", "")
663 interactiveNodes = ("sdfrome", "sdfiana")
664 if hostname.startswith(interactiveNodes): 664 ↛ 665line 664 didn't jump to line 665 because the condition on line 664 was never true
665 return "rubin-devl"
666 elif hostname == "htcondor.ls.lsst.org": 666 ↛ 667line 666 didn't jump to line 667 because the condition on line 666 was never true
667 return "base"
668 elif hostname == "htcondor.cp.lsst.org": 668 ↛ 669line 668 didn't jump to line 669 because the condition on line 668 was never true
669 return "summit"
671 jenkinsHome = os.getenv("JENKINS_HOME", "")
672 if jenkinsHome != "": 672 ↛ 673line 672 didn't jump to line 673 because the condition on line 672 was never true
673 return "jenkins"
675 # we're probably inside a k8s pod doing rapid analysis work at this point
676 location = os.getenv("RAPID_ANALYSIS_LOCATION", "")
677 if location == "TTS": 677 ↛ 678line 677 didn't jump to line 678 because the condition on line 677 was never true
678 return "tucson"
679 if location == "BTS": 679 ↛ 680line 679 didn't jump to line 680 because the condition on line 679 was never true
680 return "base"
681 if location == "SUMMIT": 681 ↛ 682line 681 didn't jump to line 682 because the condition on line 681 was never true
682 return "summit"
683 if location == "USDF": 683 ↛ 684line 683 didn't jump to line 684 because the condition on line 683 was never true
684 return "usdf-k8s"
686 # we have failed
687 return "UNKOWN"
690def getAltAzFromSkyPosition(
691 skyPos: geom.SpherePoint,
692 visitInfo: afwImage.VisitInfo,
693 doCorrectRefraction: bool = False,
694 wavelength: float = 500.0,
695 pressureOverride: float | None = None,
696 temperatureOverride: float | None = None,
697 relativeHumidityOverride: float | None = None,
698) -> tuple[geom.Angle, geom.Angle]:
699 """Get the alt/az from the position on the sky and the time and location
700 of the observation.
702 The temperature, pressure and relative humidity are taken from the
703 visitInfo by default, but can be individually overridden as needed. It
704 should be noted that the visitInfo never contains a nominal wavelength, and
705 so this takes a default value of 500nm.
707 Parameters
708 ----------
709 skyPos : `lsst.geom.SpherePoint`
710 The position on the sky.
711 visitInfo : `lsst.afw.image.VisitInfo`
712 The visit info containing the time of the observation.
713 doCorrectRefraction : `bool`, optional
714 Correct for the atmospheric refraction?
715 wavelength : `float`, optional
716 The nominal wavelength in nanometers (e.g. 500.0), as a float.
717 pressureOverride : `float`, optional
718 The pressure, in bars (e.g. 0.770), to override the value supplied in
719 the visitInfo, as a float.
720 temperatureOverride : `float`, optional
721 The temperature, in Celsius (e.g. 10.0), to override the value supplied
722 in the visitInfo, as a float.
723 relativeHumidityOverride : `float`, optional
724 The relativeHumidity in the range 0..1 (i.e. not as a percentage), to
725 override the value supplied in the visitInfo, as a float.
727 Returns
728 -------
729 alt : `lsst.geom.Angle`
730 The altitude.
731 az : `lsst.geom.Angle`
732 The azimuth.
733 """
734 skyLocation = SkyCoord(skyPos.getRa().asRadians(), skyPos.getDec().asRadians(), unit=u.rad)
735 long = visitInfo.observatory.getLongitude()
736 lat = visitInfo.observatory.getLatitude()
737 ele = visitInfo.observatory.getElevation()
738 earthLocation = EarthLocation.from_geodetic(long.asDegrees(), lat.asDegrees(), ele)
740 refractionKwargs = {}
741 if doCorrectRefraction:
742 # wavelength is never supplied in the visitInfo so always take this
743 wavelength = wavelength * u.nm
745 if pressureOverride:
746 pressure = pressureOverride
747 else:
748 pressure = visitInfo.weather.getAirPressure()
749 # ObservationInfos (which are the "source of truth" use pascals) so
750 # convert from pascals to bars
751 pressure /= 100000.0
752 pressure = pressure * u.bar
754 if temperatureOverride:
755 temperature = temperatureOverride
756 else:
757 temperature = visitInfo.weather.getAirTemperature()
758 temperature = temperature * u.deg_C
760 if relativeHumidityOverride:
761 relativeHumidity = relativeHumidityOverride
762 else:
763 relativeHumidity = visitInfo.weather.getHumidity() / 100.0 # this is in percent
764 relativeHumidity = relativeHumidity
766 refractionKwargs = dict(
767 pressure=pressure, temperature=temperature, relative_humidity=relativeHumidity, obswl=wavelength
768 )
770 obsTime = visitInfo.date.toAstropy()
771 altAz = AltAz(obstime=obsTime, location=earthLocation, **refractionKwargs)
773 obsAltAz = skyLocation.transform_to(altAz)
774 alt = geom.Angle(obsAltAz.alt.degree, geom.degrees)
775 az = geom.Angle(obsAltAz.az.degree, geom.degrees)
777 return alt, az
780def getExpPositionOffset(
781 exp1: afwImage.Exposure,
782 exp2: afwImage.Exposure,
783 useWcs: bool = True,
784 allowDifferentPlateScales: bool = False,
785) -> pipeBase.Struct:
786 """Get the change in sky position between two exposures.
788 Given two exposures, calculate the offset on the sky between the images.
789 If useWcs then use the (fitted or unfitted) skyOrigin from their WCSs, and
790 calculate the alt/az from the observation times, otherwise use the nominal
791 values in the exposures' visitInfos. Note that if using the visitInfo
792 values that for a given pointing the ra/dec will be ~identical, regardless
793 of whether astrometric fitting has been performed.
795 Values are given as exp1-exp2.
797 Parameters
798 ----------
799 exp1 : `lsst.afw.image.Exposure`
800 The first exposure.
801 exp2 : `lsst.afw.image.Exposure`
802 The second exposure.
803 useWcs : `bool`
804 Use the WCS for the ra/dec and alt/az if True, else use the nominal/
805 boresight values from the exposures' visitInfos.
806 allowDifferentPlateScales : `bool`, optional
807 Use to disable checking that plate scales are the same. Generally,
808 differing plate scales would indicate an error, but where blind-solving
809 has been undertaken during commissioning plate scales can be different
810 enough to warrant setting this to ``True``.
812 Returns
813 -------
814 offsets : `lsst.pipe.base.Struct`
815 A struct containing the offsets:
816 ``deltaRa``
817 The diference in ra (`lsst.geom.Angle`)
818 ``deltaDec``
819 The diference in dec (`lsst.geom.Angle`)
820 ``deltaAlt``
821 The diference in alt (`lsst.geom.Angle`)
822 ``deltaAz``
823 The diference in az (`lsst.geom.Angle`)
824 ``deltaPixels``
825 The diference in pixels (`float`)
826 """
828 wcs1 = exp1.getWcs()
829 wcs2 = exp2.getWcs()
830 pixScaleArcSec = wcs1.getPixelScale().asArcseconds()
831 if not allowDifferentPlateScales:
832 assert np.isclose(
833 pixScaleArcSec, wcs2.getPixelScale().asArcseconds()
834 ), "Pixel scales in the exposures differ."
836 if useWcs:
837 p1 = wcs1.getSkyOrigin()
838 p2 = wcs2.getSkyOrigin()
839 alt1, az1 = getAltAzFromSkyPosition(p1, exp1.getInfo().getVisitInfo())
840 alt2, az2 = getAltAzFromSkyPosition(p2, exp2.getInfo().getVisitInfo())
841 ra1 = p1[0]
842 ra2 = p2[0]
843 dec1 = p1[1]
844 dec2 = p2[1]
845 else:
846 az1 = exp1.visitInfo.boresightAzAlt[0]
847 az2 = exp2.visitInfo.boresightAzAlt[0]
848 alt1 = exp1.visitInfo.boresightAzAlt[1]
849 alt2 = exp2.visitInfo.boresightAzAlt[1]
851 ra1 = exp1.visitInfo.boresightRaDec[0]
852 ra2 = exp2.visitInfo.boresightRaDec[0]
853 dec1 = exp1.visitInfo.boresightRaDec[1]
854 dec2 = exp2.visitInfo.boresightRaDec[1]
856 p1 = exp1.visitInfo.boresightRaDec
857 p2 = exp2.visitInfo.boresightRaDec
859 angular_offset = p1.separation(p2).asArcseconds()
860 deltaPixels = angular_offset / pixScaleArcSec
862 ret = pipeBase.Struct(
863 deltaRa=(ra1 - ra2).wrapNear(geom.Angle(0.0)),
864 deltaDec=dec1 - dec2,
865 deltaAlt=alt1 - alt2,
866 deltaAz=(az1 - az2).wrapNear(geom.Angle(0.0)),
867 deltaPixels=deltaPixels,
868 )
870 return ret
873def starTrackerFileToExposure(filename: str, logger: logging.Logger | None = None) -> afwImage.Exposure:
874 """Read the exposure from the file and set the wcs from the header.
876 Parameters
877 ----------
878 filename : `str`
879 The full path to the file.
880 logger : `logging.Logger`, optional
881 The logger to use for errors, created if not supplied.
883 Returns
884 -------
885 exp : `lsst.afw.image.Exposure`
886 The exposure.
887 """
888 if not logger:
889 logger = logging.getLogger(__name__)
890 exp = afwImage.ExposureF(filename)
891 try:
892 wcs = genericCameraHeaderToWcs(exp)
893 exp.setWcs(wcs)
894 except Exception as e:
895 logger.warning(f"Failed to set wcs from header: {e}")
897 # for some reason the date isn't being set correctly
898 # DATE-OBS is present in the original header, but it's being
899 # stripped out and somehow not set (plus it doesn't give the midpoint
900 # of the exposure), so set it manually from the midpoint here
901 try:
902 newArgs = {} # dict to unpack into visitInfo.copyWith - fill it with whatever needs to be replaced
903 md = exp.getMetadata()
905 begin = datetime.datetime.fromisoformat(md["DATE-BEG"])
906 end = datetime.datetime.fromisoformat(md["DATE-END"])
907 duration = end - begin
908 mid = begin + duration / 2
909 newTime = dafBase.DateTime(mid.isoformat(), dafBase.DateTime.Timescale.TAI)
910 newArgs["date"] = newTime
912 # AIRPRESS is being set as PRESSURE so afw doesn't pick it up
913 # once we're using the butler for data we will just set it to take
914 # PRESSURE in the translator instead of this
915 weather = exp.visitInfo.getWeather()
916 oldPressure = weather.getAirPressure()
917 if not np.isfinite(oldPressure):
918 pressure = md.get("PRESSURE")
919 if pressure is not None:
920 logger.info("Patching the weather info using the PRESSURE header keyword")
921 newWeather = Weather(weather.getAirTemperature(), pressure, weather.getHumidity())
922 newArgs["weather"] = newWeather
924 if newArgs:
925 newVi = exp.visitInfo.copyWith(**newArgs)
926 exp.info.setVisitInfo(newVi)
927 except Exception as e:
928 logger.warning(f"Failed to set date from header: {e}")
930 return exp
933def obsInfoToDict(obsInfo: ObservationInfo) -> dict:
934 """Convert an ObservationInfo to a dict.
936 Parameters
937 ----------
938 obsInfo : `astro_metadata_translator.ObservationInfo`
939 The ObservationInfo to convert.
941 Returns
942 -------
943 obsInfoDict : `dict`
944 The ObservationInfo as a dict.
945 """
946 return {prop: getattr(obsInfo, prop) for prop in obsInfo.all_properties.keys()}
949def getFieldNameAndTileNumber(
950 field: str, warn: bool = True, logger: logging.Logger | None = None
951) -> tuple[str, int | None]:
952 """Get the tile name and number of an observed field.
954 It is assumed to always be appended, with an underscore, to the rest of the
955 field name. Returns the name and number as a tuple, or the name unchanged
956 if no tile number is found.
958 Parameters
959 ----------
960 field : `str`
961 The name of the field
963 Returns
964 -------
965 fieldName : `str`
966 The name of the field without the trailing tile number, if present.
967 tileNum : `int`
968 The number of the tile, as an integer, or ``None`` if not found.
969 """
970 if warn and not logger:
971 logger = logging.getLogger("lsst.summit.utils.utils.getFieldNameAndTileNumber")
973 if "_" not in field:
974 if warn and logger is not None:
975 logger.warning(
976 f"Field {field} does not contain an underscore," " so cannot determine the tile number."
977 )
978 return field, None
980 try:
981 fieldParts = field.split("_")
982 fieldNum = int(fieldParts[-1])
983 except ValueError:
984 if warn and logger is not None:
985 logger.warning(
986 f"Field {field} does not contain only an integer after the final underscore"
987 " so cannot determine the tile number."
988 )
989 return field, None
991 return "_".join(fieldParts[:-1]), fieldNum
994def getAirmassSeeingCorrection(airmass: float) -> float:
995 """Get the correction factor for seeing due to airmass.
997 Parameters
998 ----------
999 airmass : `float`
1000 The airmass, greater than or equal to 1.
1002 Returns
1003 -------
1004 correctionFactor : `float`
1005 The correction factor to apply to the seeing.
1007 Raises
1008 ------
1009 ValueError raised for unphysical airmasses.
1010 """
1011 if airmass < 1:
1012 raise ValueError(f"Invalid airmass: {airmass}")
1013 return airmass ** (-0.6)
1016@deprecated(
1017 reason=". Will be removed after v28.0.",
1018 version="v27.0",
1019 category=FutureWarning,
1020)
1021def getFilterSeeingCorrection(filterName: str) -> float:
1022 """Get the correction factor for seeing due to a filter.
1024 Parameters
1025 ----------
1026 filterName : `str`
1027 The name of the filter, e.g. 'SDSSg_65mm'.
1029 Returns
1030 -------
1031 correctionFactor : `float`
1032 The correction factor to apply to the seeing.
1034 Raises
1035 ------
1036 ValueError raised for unknown filters.
1037 """
1038 return getBandpassSeeingCorrection(filterName)
1041def getBandpassSeeingCorrection(filterName: str) -> float:
1042 """Get the correction factor for seeing due to a filter.
1044 Parameters
1045 ----------
1046 filterName : `str`
1047 The name of the filter, e.g. 'SDSSg_65mm'.
1049 Returns
1050 -------
1051 correctionFactor : `float`
1052 The correction factor to apply to the seeing.
1054 Raises
1055 ------
1056 ValueError raised for unknown filters.
1057 """
1058 match filterName:
1059 case "SDSSg_65mm": # LATISS
1060 return (474.41 / 500.0) ** 0.2
1061 case "SDSSr_65mm": # LATISS
1062 return (628.47 / 500.0) ** 0.2
1063 case "SDSSi_65mm": # LATISS
1064 return (769.51 / 500.0) ** 0.2
1065 case "SDSSz_65mm": # LATISS
1066 return (871.45 / 500.0) ** 0.2
1067 case "SDSSy_65mm": # LATISS
1068 return (986.8 / 500.0) ** 0.2
1069 case "u_02": # ComCam
1070 return (370.697 / 500.0) ** 0.2
1071 case "g_01": # ComCam
1072 return (476.359 / 500.0) ** 0.2
1073 case "r_03": # ComCam
1074 return (619.383 / 500.0) ** 0.2
1075 case "i_06": # ComCam
1076 return (754.502 / 500.0) ** 0.2
1077 case "z_03": # ComCam
1078 return (866.976 / 500.0) ** 0.2
1079 case "y_04": # ComCam
1080 return (972.713 / 500.0) ** 0.2
1081 case "u_24": # LSSTCam
1082 return (365.1397 / 500.0) ** 0.2
1083 case "g_6": # LSSTCam
1084 return (475.6104 / 500.0) ** 0.2
1085 case "r_57": # LSSTCam
1086 return (618.6055 / 500.0) ** 0.2
1087 case "i_39": # LSSTCam
1088 return (753.5490 / 500.0) ** 0.2
1089 case "z_20": # LSSTCam
1090 return (868.0935 / 500.0) ** 0.2
1091 case "y_10": # LSSTCam
1092 return (972.5979 / 500.0) ** 0.2
1093 case _:
1094 raise ValueError(f"Unknown filter name: {filterName}")
1097def getCdf(data: np.ndarray, scale: int, nBinsMax: int = 300_000) -> tuple[np.ndarray | float, float, float]:
1098 """Return an approximate cumulative distribution function scaled to
1099 the [0, scale] range.
1101 If the input data is all nan, then the output cdf will be nan as well as
1102 the min and max values.
1104 Parameters
1105 ----------
1106 data : `np.array`
1107 The input data.
1108 scale : `int`
1109 The scaling range of the output.
1110 nBinsMax : `int`, optional
1111 Maximum number of bins to use.
1113 Returns
1114 -------
1115 cdf : `np.array` of `int`
1116 A monotonically increasing sequence that represents a scaled
1117 cumulative distribution function, starting with the value at
1118 minVal, then at (minVal + 1), and so on.
1119 minVal : `float`
1120 An integer smaller than the minimum value in the input data.
1121 maxVal : `float`
1122 An integer larger than the maximum value in the input data.
1123 """
1124 flatData = data.ravel()
1125 size = flatData.size - np.count_nonzero(np.isnan(flatData))
1127 minVal = np.floor(np.nanmin(flatData))
1128 maxVal = np.ceil(np.nanmax(flatData)) + 1.0
1130 if np.isnan(minVal) or np.isnan(maxVal):
1131 # if either the min or max are nan, then the data is all nan as we're
1132 # using nanmin and nanmax. Given this, we can't calculate a cdf, so
1133 # return nans for all values
1134 return np.nan, np.nan, np.nan
1136 nBins = np.clip(int(maxVal) - int(minVal), 1, nBinsMax)
1138 hist, binEdges = np.histogram(flatData, bins=nBins, range=(int(minVal), int(maxVal)))
1140 cdf = (scale * np.cumsum(hist) / size).astype(np.int64)
1141 return cdf, minVal, maxVal
1144def getQuantiles(data: npt.NDArray[np.float64], nColors: int) -> npt.NDArray[np.floating[Any]]:
1145 """Get a set of boundaries that equally distribute data into
1146 nColors intervals. The output can be used to make a colormap of nColors
1147 colors.
1149 This is equivalent to using the numpy function:
1150 np.nanquantile(data, np.linspace(0, 1, nColors + 1))
1151 but with a coarser precision, yet sufficient for our use case. This
1152 implementation gives a significant speed-up. In the case of large
1153 ranges, np.nanquantile is used because it is more memory efficient.
1155 If all elements of ``data`` are nan then the output ``boundaries`` will
1156 also all be ``nan`` to keep the interface consistent.
1158 Parameters
1159 ----------
1160 data : `np.array`
1161 The input image data.
1162 nColors : `int`
1163 The number of intervals to distribute data into.
1165 Returns
1166 -------
1167 boundaries: `list` of `float`
1168 A monotonically increasing sequence of size (nColors + 1). These are
1169 the edges of nColors intervals.
1170 """
1171 dataRange = np.nanmax(data) - np.nanmin(data)
1172 if dataRange > 300_000:
1173 # Use slower but memory efficient nanquantile
1174 logger = logging.getLogger(__name__)
1175 logger.warning(f"Data range is very large ({dataRange}); using slower quantile code.")
1176 boundaries = np.nanquantile(data, np.linspace(0, 1, nColors + 1))
1177 else:
1178 cdf, minVal, maxVal = getCdf(data, nColors)
1179 if np.isnan(minVal): # cdf calculation has failed because all data is nan
1180 return np.asarray([np.nan for _ in range(nColors)])
1181 assert isinstance(cdf, np.ndarray), "cdf is not an np.ndarray"
1182 scale = (maxVal - minVal) / len(cdf)
1184 boundaries = np.asarray([np.argmax(cdf >= i) * scale + minVal for i in range(nColors)] + [maxVal])
1186 return boundaries
1189def digitizeData(data: npt.NDArray[np.float64], nColors: int = 256) -> npt.NDArray[np.integer]:
1190 """
1191 Scale data into nColors using its cumulative distribution function.
1193 Parameters
1194 ----------
1195 data : `np.array`
1196 The input image data.
1197 nColors : `int`
1198 The number of intervals to distribute data into.
1200 Returns
1201 -------
1202 data: `np.array` of `int`
1203 Scaled data in the [0, nColors - 1] range.
1204 """
1205 cdf, minVal, maxVal = getCdf(data, nColors - 1)
1206 assert isinstance(cdf, np.ndarray), "cdf is not an np.ndarray"
1207 scale = (maxVal - minVal) / len(cdf)
1208 bins = np.floor((data * scale - minVal)).astype(np.int64)
1209 return cdf[bins]
1212def getBboxAround(centroid: geom.Point, boxSize: int, exp: afwImage.Exposure) -> geom.Box2I:
1213 """Get a bbox centered on a point, clipped to the exposure.
1215 If the bbox would extend beyond the bounds of the exposure it is clipped to
1216 the exposure, resulting in a non-square bbox.
1218 Parameters
1219 ----------
1220 centroid : `lsst.geom.Point`
1221 The source centroid.
1222 boxsize : `int`
1223 The size of the box to centre around the centroid.
1224 exp : `lsst.afw.image.Exposure`
1225 The exposure, so the bbox can be clipped to not overrun the bounds.
1227 Returns
1228 -------
1229 bbox : `lsst.geom.Box2I`
1230 The bounding box, centered on the centroid unless clipping to the
1231 exposure causes it to be non-square.
1232 """
1233 bbox = geom.BoxI().makeCenteredBox(centroid, geom.Extent2I(boxSize, boxSize))
1234 bbox = bbox.clippedTo(exp.getBBox())
1235 return bbox
1238def bboxToMatplotlibRectanle(bbox: geom.Box2I | geom.Box2D) -> matplotlib.patches.Rectangle:
1239 """Convert a bbox to a matplotlib Rectangle for plotting.
1241 Parameters
1242 ----------
1243 results : `lsst.geom.Box2I` or `lsst.geom.Box2D`
1244 The bbox to convert.
1246 Returns
1247 -------
1248 rectangle : `matplotlib.patches.Rectangle`
1249 The rectangle.
1250 """
1251 ll = bbox.minX, bbox.minY
1252 width, height = bbox.getDimensions()
1253 return Rectangle(ll, width, height)
1256def computeExposureId(instrument: str, controller: str, dayObs: int, seqNum: int) -> int:
1257 instrument = instrument.lower()
1258 if instrument == "latiss":
1259 return lsst.obs.lsst.translators.LatissTranslator.compute_exposure_id(dayObs, seqNum, controller)
1260 elif instrument == "lsstcomcam":
1261 return lsst.obs.lsst.translators.LsstComCamTranslator.compute_exposure_id(dayObs, seqNum, controller)
1262 elif instrument == "lsstcomcamsim":
1263 return lsst.obs.lsst.translators.LsstComCamSimTranslator.compute_exposure_id(
1264 dayObs, seqNum, controller
1265 )
1266 elif instrument == "lsstcam":
1267 return lsst.obs.lsst.translators.LsstCamTranslator.compute_exposure_id(dayObs, seqNum, controller)
1268 else:
1269 raise ValueError("Unknown instrument {instrument}")
1272def computeCcdExposureId(instrument: str, exposureId: int, detector: int) -> int:
1273 instrument = instrument.lower()
1274 if instrument == "latiss":
1275 if detector != 0:
1276 raise ValueError("Invalid detector {detector} for LATISS")
1277 return lsst.obs.lsst.translators.LatissTranslator.compute_detector_exposure_id(exposureId, detector)
1278 elif instrument == "lsstcomcam":
1279 if detector < 0 or detector >= 9:
1280 raise ValueError("Invalid detector {detector} for LSSTComCam")
1281 return lsst.obs.lsst.translators.LsstComCamTranslator.compute_detector_exposure_id(
1282 exposureId, detector
1283 )
1284 elif instrument == "lsstcomcamsim":
1285 if detector < 0 or detector >= 9:
1286 raise ValueError("Invalid detector {detector} for LSSTComCamSim")
1287 return lsst.obs.lsst.translators.LsstComCamSimTranslator.compute_detector_exposure_id(
1288 exposureId, detector
1289 )
1290 elif instrument == "lsstcam":
1291 if detector < 0 or detector >= 205:
1292 raise ValueError("Invalid detector {detector} for LSSTCam")
1293 return lsst.obs.lsst.translators.LsstCamTranslator.compute_detector_exposure_id(exposureId, detector)
1294 else:
1295 raise ValueError("Unknown instrument {instrument}")
1298def getDetectorIds(instrumentName: str) -> list[int]:
1299 """Get a list of detector IDs for a given instrument.
1301 Parameters
1302 ----------
1303 instrumentName : `str`
1304 The name of the instrument.
1306 Returns
1307 -------
1308 detectorIds : `list` of `int`
1309 The list of detector IDs.
1310 """
1311 camera = getCameraFromInstrumentName(instrumentName)
1312 return [detector.getId() for detector in camera]
1315def getCameraFromInstrumentName(instrumentName: str) -> Camera:
1316 """Get the camera object given the instrument name (case insenstive).
1318 Parameters
1319 ----------
1320 instrumentName : `str`
1321 The name of the instrument, e.g. "LATISS" or "LSSTCam". Case
1322 insenstive.
1324 Returns
1325 -------
1326 camera: `lsst.afw.cameraGeom.Camera`
1327 The camera object corresponding to the instrument name.
1329 Raises
1330 ------
1331 ValueError
1332 If the instrument name is not supported.
1333 """
1335 _instrument = instrumentName.lower()
1337 match _instrument:
1338 case "lsstcam":
1339 camera = LsstCam.getCamera()
1340 case "lsstcomcam":
1341 camera = LsstComCam.getCamera()
1342 case "lsstcomcamsim":
1343 camera = LsstComCamSim.getCamera()
1344 case "latiss":
1345 camera = Latiss.getCamera()
1346 case _:
1347 raise ValueError(f"Unsupported instrument: {instrumentName}")
1348 return camera
1351def calcEclipticCoords(ra: float, dec: float) -> tuple[float, float]:
1352 """Get the ecliptic coordinates of the specified ra and dec.
1354 Transform J2000 (ra, dec), both in degrees, to
1355 IAU1976 Ecliptic coordinates (also returning degrees).
1357 Matches the results of:
1359 from astropy.coordinates import SkyCoord, HeliocentricEclipticIAU76
1360 from dataclasses import dataclass
1361 import astropy.units as u
1363 p = SkyCoord(ra=ra0*u.deg, dec=dec0*u.deg, distance=1*u.au, frame='hcrs')
1364 ecl = p.transform_to(HeliocentricEclipticIAU76)
1365 print(ecl.lon.value, ecl.lat.value)
1367 except that it's fast.
1369 Parameters
1370 ----------
1371 ra : `float`
1372 The right ascension, in degrees.
1373 dec : `float`
1374 The declination, in degrees.
1376 Returns
1377 -------
1378 lambda : `float`
1379 The ecliptic longitude in degrees.
1380 beta : `float`
1381 The ecliptic latitude in degrees.
1382 """
1384 ra, dec = np.deg2rad(ra), np.deg2rad(dec)
1386 # IAU 1976 obliquity
1387 epsilon = np.deg2rad(23.43929111111111)
1388 cos_eps, sin_eps = np.cos(epsilon), np.sin(epsilon)
1390 sra, cra = np.sin(ra), np.cos(ra)
1391 sdec, cdec = np.sin(dec), np.cos(dec)
1393 lambda_ = np.arctan2((sra * cos_eps + sdec / cdec * sin_eps), cra)
1394 beta = np.arcsin(sdec * cos_eps - cdec * sin_eps * sra)
1396 # normalize
1397 if lambda_ < 0:
1398 lambda_ += np.pi * 2
1400 return (np.rad2deg(lambda_), np.rad2deg(beta))
1403def getImageArray(
1404 inputData: np.ndarray | afwImage.Exposure | afwImage.Image | afwImage.MaskedImage,
1405) -> np.ndarray:
1406 """Get the image data from anything image-like.
1408 Parameters
1409 ----------
1410 inputData : `numpy.ndarray`, `lsst.afw.image.Exposure`,
1411 `lsst.afw.image.Image`, or `lsst.afw.image.MaskedImage`
1412 The input data.
1413 Returns
1414 -------
1415 imageData : `numpy.ndarray`
1416 The image data.
1417 Raises
1418 ------
1419 TypeError
1420 If the input data is not a numpy array, lsst.afw.image.Exposure,
1421 lsst.afw.image.Image, or lsst.afw.image.MaskedImage.
1422 """
1423 match inputData:
1424 case np.ndarray():
1425 imageData = inputData
1426 case afwImage.MaskedImage():
1427 imageData = inputData.image.array
1428 case afwImage.Image():
1429 imageData = inputData.array
1430 case afwImage.Exposure():
1431 imageData = inputData.image.array
1432 case _:
1433 raise TypeError(
1434 "This function accepts numpy array, lsst.afw.image.Exposure components."
1435 f" Got {type(inputData)}"
1436 )
1437 return imageData
1440def getSunAngle(time: Time | None = None) -> float:
1441 """Get the angle of the sun to the horizon at the specified time.
1443 If no time is specified, the current time is used. Positive numbers means
1444 the sun is above the horizon, negative means it is below.
1446 Returns
1447 -------
1448 sun_alt : `float`
1449 The angle of the sun to the horizon, in degrees.
1450 """
1451 if time is None:
1452 time = Time.now()
1453 sun_altaz = get_sun(time).transform_to(AltAz(obstime=time, location=SIMONYI_LOCATION))
1454 return sun_altaz.alt.deg
1457@dataclass(slots=True)
1458class RobustFitResult:
1459 slope: float
1460 intercept: float
1461 scatter: float
1462 slopePValue: float
1463 slopeStdErr: float
1464 slopeTValue: float
1465 interceptPValue: float
1466 interceptStdErr: float
1467 interceptTValue: float
1468 outlierMask: np.ndarray
1471class RobustFitter:
1472 """
1473 Robust linear fit using RANSAC + OLS, reusable across datasets.
1475 This class fits a robust linear trend to x and y using scikit-learn's
1476 RANSAC for inlier detection, followed by an OLS fit on inliers to compute
1477 slope, intercept, scatter, standard errors, t-values, and p-values.
1478 Configuration (e.g., minSamples) is set at init. Call `fit()` per dataset.
1479 Results are returned as a `RobustFitResult` and stored internally for later
1480 access (e.g., plotting).
1482 Parameters
1483 ----------
1484 minSamples : `float`, optional
1485 Minimum fraction of samples chosen randomly from the original data.
1486 """
1488 def __init__(self, *, minSamples: float = 0.2) -> None:
1489 self.minSamples = minSamples
1490 self._clearState()
1492 def _clearState(self) -> None:
1493 """Reset stored fit state."""
1494 self.model: Any = None
1495 self.olsModel: Any = None
1496 self.outlierMask: np.ndarray | None = None
1497 self.x: np.ndarray | None = None
1498 self.y: np.ndarray | None = None
1499 self.slope: float = np.nan
1500 self.intercept: float = np.nan
1501 self.ciSlope: tuple[float, float] = (np.nan, np.nan)
1502 self.ciIntercept: tuple[float, float] = (np.nan, np.nan)
1503 self.slopePValue = np.nan
1504 self.slopeStdErr = np.nan
1505 self.slopeTValue = np.nan
1506 self.scatter = np.nan
1508 @staticmethod
1509 def _defaultResidualThreshold(y: np.ndarray) -> float:
1510 """Compute default residual threshold from finite y values."""
1511 yFinite = np.asarray(y)[np.isfinite(y)]
1512 if yFinite.size == 0:
1513 raise ValueError("Cannot compute residual threshold: no finite y.")
1514 return 1.5 * float(mad_std(yFinite))
1516 def fit(
1517 self,
1518 x: np.ndarray,
1519 y: np.ndarray,
1520 *,
1521 residualThreshold: float | None = None,
1522 randomState: int = 42,
1523 ) -> RobustFitResult:
1524 """
1525 Fit robust line to (x, y) using RANSAC + OLS on inliers.
1527 Recomputes residual threshold each call if not provided.
1529 Parameters
1530 ----------
1531 x : array-like
1532 Independent variable values.
1533 y : array-like
1534 Dependent variable values.
1535 residualThreshold : float, optional
1536 Residual threshold for inlier detection. If None, computed
1537 as 1.5 × MAD of y.
1538 randomState : int, optional
1539 Random seed for RANSAC.
1541 Returns
1542 -------
1543 result : `RobustFitResult`
1544 Best-fit parameters and statistics from the OLS inlier fit.
1546 Raises
1547 ------
1548 ValueError
1549 If no finite x/y values are available for fitting.
1550 """
1551 self._clearState()
1553 x = np.asarray(x)
1554 y = np.asarray(y)
1556 finiteMask = np.isfinite(x) & np.isfinite(y)
1557 if not finiteMask.any():
1558 raise ValueError("No finite x/y values to fit.")
1560 X = x[finiteMask].reshape(-1, 1)
1561 yFit = y[finiteMask]
1563 if residualThreshold is None:
1564 residualThreshold = self._defaultResidualThreshold(yFit)
1566 ransac = RANSACRegressor(
1567 estimator=LinearRegression(),
1568 min_samples=self.minSamples,
1569 residual_threshold=residualThreshold,
1570 random_state=randomState,
1571 )
1572 ransac.fit(X, yFit)
1574 slope = float(ransac.estimator_.coef_[0])
1575 intercept = float(ransac.estimator_.intercept_)
1577 self.x = x
1578 self.y = y
1580 outlierMask = np.ones_like(x, dtype=bool)
1581 outlierMask[finiteMask] = ~ransac.inlier_mask_
1582 self.outlierMask = outlierMask
1584 xIn = x[~outlierMask]
1585 yIn = y[~outlierMask]
1586 XDesign = sm.add_constant(xIn)
1587 ols = sm.OLS(yIn, XDesign).fit()
1588 ci = ols.conf_int(alpha=0.32)
1589 ciIntercept = (float(ci[0][0]), float(ci[0][1]))
1590 ciSlope = (float(ci[1][0]), float(ci[1][1]))
1591 scatter = float(np.nanstd(ols.resid, ddof=1)) if ols.resid.size > 1 else np.nan
1593 self.model = ransac
1594 self.olsModel = ols
1595 self.slope = slope
1596 self.intercept = intercept
1597 self.ciSlope = ciSlope
1598 self.ciIntercept = ciIntercept
1599 self.slopePValue = float(ols.pvalues[1]) if np.isfinite(ols.pvalues[1]) else np.nan
1600 self.slopeStdErr = float(ols.bse[1]) if np.isfinite(ols.bse[1]) else np.nan
1601 self.slopeTValue = float(ols.tvalues[1]) if np.isfinite(ols.tvalues[1]) else np.nan
1602 self.scatter = scatter
1604 return RobustFitResult(
1605 slope=slope,
1606 intercept=intercept,
1607 scatter=scatter,
1608 slopePValue=self.slopePValue,
1609 slopeStdErr=self.slopeStdErr,
1610 slopeTValue=self.slopeTValue,
1611 interceptPValue=float(ols.pvalues[0]) if np.isfinite(ols.pvalues[0]) else np.nan,
1612 interceptStdErr=float(ols.bse[0]) if np.isfinite(ols.bse[0]) else np.nan,
1613 interceptTValue=float(ols.tvalues[0]) if np.isfinite(ols.tvalues[0]) else np.nan,
1614 outlierMask=self.outlierMask,
1615 )
1617 def reset(self) -> None:
1618 """Clear any stored fit state."""
1619 self._clearState()
1621 def plotBestFit(self, ax: matplotlib.axes.Axes, label=None, color=None, alphaBand=0.2, lw=2, nBins=5):
1622 """Plot the best fit line, confidence interval,
1623 and optionally scatter/binned data.
1625 Parameters
1626 ----------
1627 ax : matplotlib.axes.Axes, optional
1628 The axes to plot on. If None, uses current axes.
1629 label : str, optional
1630 Label for the best fit line.
1631 color : str or None, optional
1632 Color for the fit line and confidence band.
1633 alphaBand : float, optional
1634 Alpha transparency for the confidence interval band.
1635 lw : int, optional
1636 Line width for the best fit line.
1637 nBins : int, optional
1638 Number of bins for binned statistics.
1640 Returns
1641 -------
1642 ax : matplotlib.axes.Axes
1643 The axes with the plot.
1644 """
1645 # Handle case where self.x contains only NaNs
1646 if self.x is None or self.y is None or self.outlierMask is None or self.olsModel is None:
1647 raise RuntimeError("Fit must be called before plotting.")
1649 if np.isnan(self.x).all():
1650 raise ValueError("All x values are NaN; cannot plot best fit.")
1652 xx = np.linspace(np.nanmin(self.x), np.nanmax(self.x), 200)
1653 X_design = sm.add_constant(xx)
1655 pred = self.olsModel.get_prediction(X_design)
1656 summary_frame = pred.summary_frame(alpha=0.05) # 95% intervals
1658 mean = summary_frame["mean"]
1659 ci_lo = summary_frame["mean_ci_lower"]
1660 ci_hi = summary_frame["mean_ci_upper"]
1662 # Plot best fit
1663 ax.plot(xx, mean, color=color, label=label, lw=lw)
1664 # Plot confidence interval band
1665 ax.fill_between(xx, ci_lo, ci_hi, color=color, alpha=alphaBand)
1667 mask = self.outlierMask
1668 xin, yin = self.x[~mask], self.y[~mask] # inliers
1669 xout, _ = self.x[mask], self.y[mask] # outliers
1671 # Bin data in nbins
1672 if np.all(np.isnan(xout)):
1673 # Skip binning if all values are NaN
1674 pass
1675 else:
1676 bin_centers, means, stds, bin_counts = self.getBinnedData(xin, yin, nBins)
1678 # Plot binned means and stds
1679 ax.errorbar(
1680 bin_centers,
1681 means,
1682 color=color or "black",
1683 yerr=stds,
1684 fmt="o",
1685 capsize=4,
1686 markersize=6,
1687 alpha=0.7,
1688 zorder=10,
1689 )
1691 return ax
1693 @staticmethod
1694 def getBinnedData(
1695 x: np.ndarray, y: np.ndarray, nbins: int
1696 ) -> tuple[np.ndarray, list[float], list[float], list[int]]:
1697 """Get binned statistics for x and y."""
1698 bins = np.linspace(np.nanmin(x), np.nanmax(x), nbins + 1)
1699 bin_centers = 0.5 * (bins[:-1] + bins[1:])
1700 digitized = np.digitize(x, bins) - 1
1701 means = []
1702 stds = []
1703 bin_counts = []
1704 for i in range(nbins):
1705 bin_y = y[digitized == i]
1706 if len(bin_y) > 0:
1707 means.append(np.nanmedian(bin_y))
1708 if len(bin_y) > 1:
1709 stds.append(np.nanstd(bin_y, ddof=1) / np.sqrt(len(bin_y)))
1710 else:
1711 stds.append(np.nan)
1712 bin_counts.append(len(bin_y))
1713 else:
1714 means.append(np.nan)
1715 stds.append(np.nan)
1716 bin_counts.append(0)
1717 return bin_centers, means, stds, bin_counts