Coverage for python / lsst / summit / utils / utils.py: 17%
620 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-22 09:17 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-22 09:17 +0000
1# This file is part of summit_utils.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21from __future__ import annotations
23import datetime
24import logging
25import os
26from collections.abc import Iterable, Sequence
27from dataclasses import dataclass
28from typing import TYPE_CHECKING, Any
30import astropy.units as u
31import matplotlib
32import numpy as np
33import numpy.typing as npt
34import statsmodels.api as sm # type: ignore[import-untyped]
35from astro_metadata_translator import ObservationInfo
36from astropy.coordinates import AltAz, EarthLocation, SkyCoord, get_sun
37from astropy.stats import mad_std
38from astropy.time import Time
39from deprecated.sphinx import deprecated
40from matplotlib.patches import Rectangle
41from scipy.ndimage import gaussian_filter
42from sklearn.linear_model import LinearRegression, RANSACRegressor # type: ignore[import-untyped]
44import lsst.afw.detection as afwDetect
45import lsst.afw.detection as afwDetection
46import lsst.afw.image as afwImage
47import lsst.afw.math as afwMath
48import lsst.daf.base as dafBase
49import lsst.daf.butler as dafButler
50import lsst.geom as geom
51import lsst.obs.lsst.translators
52import lsst.pipe.base as pipeBase
53import lsst.utils.packages as packageUtils
54from lsst.afw.coord import Weather
55from lsst.afw.detection import Footprint, FootprintSet
56from lsst.daf.butler.cli.cliLog import CliLog
57from lsst.obs.lsst import Latiss, LsstCam, LsstComCam, LsstComCamSim
58from lsst.obs.lsst.translators.latiss import AUXTEL_LOCATION
59from lsst.obs.lsst.translators.lsst import FILTER_DELIMITER, SIMONYI_LOCATION
61from . import dateTime as newLocation
62from .astrometry.utils import genericCameraHeaderToWcs
64if TYPE_CHECKING:
65 from lsst.afw.cameraGeom import Camera
67__all__ = [
68 "SIGMATOFWHM",
69 "FWHMTOSIGMA",
70 "EFD_CLIENT_MISSING_MSG",
71 "GOOGLE_CLOUD_MISSING_MSG",
72 "AUXTEL_LOCATION",
73 "countPixels",
74 "quickSmooth",
75 "argMax2d",
76 "dayObsIntToString", # deprecated
77 "dayObsSeqNumToVisitId",
78 "getImageStats",
79 "detectObjectsInExp",
80 "fluxesFromFootprints",
81 "fluxFromFootprint",
82 "humanNameForCelestialObject",
83 "getFocusFromHeader",
84 "checkStackSetup",
85 "setupLogging",
86 "getCurrentDayObs_datetime", # deprecated
87 "getCurrentDayObs_int", # deprecated
88 "getCurrentDayObs_humanStr", # deprecated
89 "getExpRecordAge", # deprecated
90 "getSite",
91 "getAltAzFromSkyPosition",
92 "getExpPositionOffset",
93 "starTrackerFileToExposure",
94 "obsInfoToDict",
95 "getFieldNameAndTileNumber",
96 "getAirmassSeeingCorrection",
97 "getFilterSeeingCorrection", # deprecated
98 "getBandpassSeeingCorrection",
99 "getCdf",
100 "getQuantiles",
101 "digitizeData",
102 "getBboxAround",
103 "bboxToMatplotlibRectanle",
104 "computeExposureId",
105 "computeCcdExposureId",
106 "getDetectorIds",
107 "getImageArray",
108 "getSunAngle",
109 "RobustFitter",
110]
113SIGMATOFWHM = 2.0 * np.sqrt(2.0 * np.log(2.0))
114FWHMTOSIGMA = 1 / SIGMATOFWHM
116EFD_CLIENT_MISSING_MSG = (
117 "ImportError: lsst_efd_client not found. Please install with:\n" " pip install lsst-efd-client"
118)
120GOOGLE_CLOUD_MISSING_MSG = (
121 "ImportError: Google cloud storage not found. Please install with:\n"
122 " pip install google-cloud-storage"
123)
126def countPixels(maskedImage: afwImage.MaskedImage, maskPlane: str) -> int:
127 """Count the number of pixels in an image with a given mask bit set.
129 Parameters
130 ----------
131 maskedImage : `lsst.afw.image.MaskedImage`
132 The masked image,
133 maskPlane : `str`
134 The name of the bitmask.
136 Returns
137 -------
138 count : `int``
139 The number of pixels in with the selected mask bit
140 """
141 bit = maskedImage.mask.getPlaneBitMask(maskPlane)
142 return len(np.where(np.bitwise_and(maskedImage.mask.array, bit))[0])
145def quickSmooth(data: npt.NDArray[np.float64], sigma: float = 2) -> npt.NDArray[np.float64]:
146 """Perform a quick smoothing of the image.
148 Not to be used for scientific purposes, but improves the stretch and
149 visual rendering of low SNR against the sky background in cutouts.
151 Parameters
152 ----------
153 data : `np.array`
154 The image data to smooth
155 sigma : `float`, optional
156 The size of the smoothing kernel.
158 Returns
159 -------
160 smoothData : `np.array`
161 The smoothed data
162 """
163 kernel = [sigma, sigma]
164 smoothData = gaussian_filter(data, kernel, mode="constant")
165 return smoothData
168def argMax2d(array: npt.NDArray[np.float64]) -> tuple[tuple[float, float], bool, list[tuple[float, float]]]:
169 """Get the index of the max value of an array and whether it's unique.
171 If its not unique, returns a list of the other locations containing the
172 maximum value, e.g. returns
174 (12, 34), False, [(56,78), (910, 1112)]
176 Parameters
177 ----------
178 array : `np.array`
179 The data
181 Returns
182 -------
183 maxLocation : `tuple`
184 The coords of the first instance of the max value
185 unique : `bool`
186 Whether it's the only location
187 otherLocations : `list` of `tuple`
188 List of the other max values' locations, empty if False
189 """
190 uniqueMaximum = False
191 maxCoords = np.where(array == np.max(array))
192 # list of coords as tuples
193 listMaxCoords = [(float(coord[0]), float(coord[1])) for coord in zip(*maxCoords)]
194 if len(listMaxCoords) == 1: # single unambiguous value
195 uniqueMaximum = True
197 return listMaxCoords[0], uniqueMaximum, listMaxCoords[1:]
200@deprecated(
201 reason="dayObsIntToString() has moved to lsst.summit.utils.dateTime. The function is unchanged, "
202 "but you should change to import from there. This function will be removed after w_2026_01.",
203 version="w_2026_01",
204 category=FutureWarning,
205)
206def dayObsIntToString(dayObs: int) -> str:
207 """Convert an integer dayObs to a dash-delimited string.
209 e.g. convert the hard to read 20210101 to 2021-01-01
211 Parameters
212 ----------
213 dayObs : `int`
214 The dayObs.
216 Returns
217 -------
218 dayObs : `str`
219 The dayObs as a string.
220 """
221 return newLocation.dayObsIntToString(dayObs)
224def dayObsSeqNumToVisitId(dayObs: int, seqNum: int) -> int:
225 """Get the visit id for a given dayObs/seqNum.
227 Parameters
228 ----------
229 dayObs : `int`
230 The dayObs.
231 seqNum : `int`
232 The seqNum.
234 Returns
235 -------
236 visitId : `int`
237 The visitId.
239 Notes
240 -----
241 TODO: Remove this horrible hack once DM-30948 makes this possible
242 programatically/via the butler.
243 """
244 if dayObs < 19700101 or dayObs > 35000101:
245 raise ValueError(f"dayObs value {dayObs} outside plausible range")
246 return int(f"{dayObs}{seqNum:05}")
249def getImageStats(exp: afwImage.Exposure) -> pipeBase.Struct:
250 """Calculate a grab-bag of stats for an image. Must remain fast.
252 Parameters
253 ----------
254 exp : `lsst.afw.image.Exposure`
255 The input exposure.
257 Returns
258 -------
259 stats : `lsst.pipe.base.Struct`
260 A container with attributes containing measurements and statistics
261 for the image.
262 """
263 result = pipeBase.Struct()
265 vi = exp.visitInfo
266 expTime = vi.exposureTime
267 md = exp.getMetadata()
269 obj = vi.object
270 mjd = vi.getDate().get()
271 result.object = obj
272 result.mjd = mjd
274 fullFilterString = exp.filter.physicalLabel
275 filt = fullFilterString.split(FILTER_DELIMITER)[0]
276 grating = fullFilterString.split(FILTER_DELIMITER)[1]
278 airmass = vi.getBoresightAirmass()
279 rotangle = vi.getBoresightRotAngle().asDegrees()
281 azAlt = vi.getBoresightAzAlt()
282 az = azAlt[0].asDegrees()
283 el = azAlt[1].asDegrees()
285 result.expTime = expTime
286 result.filter = filt
287 result.grating = grating
288 result.airmass = airmass
289 result.rotangle = rotangle
290 result.az = az
291 result.el = el
292 result.focus = md.get("FOCUSZ")
294 data = exp.image.array
295 result.maxValue = np.max(data)
297 peak, uniquePeak, otherPeaks = argMax2d(data)
298 result.maxPixelLocation = peak
299 result.multipleMaxPixels = uniquePeak
301 result.nBadPixels = countPixels(exp.maskedImage, "BAD")
302 result.nSatPixels = countPixels(exp.maskedImage, "SAT")
303 result.percentile99 = np.percentile(data, 99)
304 result.percentile9999 = np.percentile(data, 99.99)
306 sctrl = afwMath.StatisticsControl()
307 sctrl.setNumSigmaClip(5)
308 sctrl.setNumIter(2)
309 statTypes = afwMath.MEANCLIP | afwMath.STDEVCLIP
310 stats = afwMath.makeStatistics(exp.maskedImage, statTypes, sctrl)
311 std, stderr = stats.getResult(afwMath.STDEVCLIP)
312 mean, meanerr = stats.getResult(afwMath.MEANCLIP)
314 result.clippedMean = mean
315 result.clippedStddev = std
317 return result
320def detectObjectsInExp(
321 exp: afwImage.Exposure, nSigma: float = 10, nPixMin: int = 10, grow: int = 0
322) -> afwDetect.FootprintSet:
323 """Quick and dirty object detection for an exposure.
325 Return the footPrintSet for the objects in a preferably-postISR exposure.
327 Parameters
328 ----------
329 exp : `lsst.afw.image.Exposure`
330 The exposure to detect objects in.
331 nSigma : `float`
332 The number of sigma for detection.
333 nPixMin : `int`
334 The minimum number of pixels in an object for detection.
335 grow : `int`
336 The number of pixels to grow the footprint by after detection.
338 Returns
339 -------
340 footPrintSet : `lsst.afw.detection.FootprintSet`
341 The set of footprints in the image.
342 """
343 median = np.nanmedian(exp.image.array)
344 exp.image -= median
346 threshold = afwDetect.Threshold(nSigma, afwDetect.Threshold.STDEV)
347 footPrintSet = afwDetect.FootprintSet(exp.getMaskedImage(), threshold, "DETECTED", nPixMin)
348 if grow > 0:
349 isotropic = True
350 footPrintSet = afwDetect.FootprintSet(footPrintSet, grow, isotropic)
352 exp.image += median # add back in to leave background unchanged
353 return footPrintSet
356def fluxesFromFootprints(
357 footprints: afwDetect.FootprintSet | afwDetect.Footprint | Iterable[afwDetect.Footprint],
358 parentImage: afwImage.Image,
359 subtractImageMedian: bool = False,
360) -> npt.NDArray[np.float64]:
361 """Calculate the flux from a set of footprints, given the parent image,
362 optionally subtracting the whole-image median from each pixel as a very
363 rough background subtraction.
365 Parameters
366 ----------
367 footprints : `lsst.afw.detection.FootprintSet` or
368 `lsst.afw.detection.Footprint` or
369 `iterable` of `lsst.afw.detection.Footprint`
370 The footprints to measure.
371 parentImage : `lsst.afw.image.Image`
372 The parent image.
373 subtractImageMedian : `bool`, optional
374 Subtract a whole-image median from each pixel in the footprint when
375 summing as a very crude background subtraction. Does not change the
376 original image.
378 Returns
379 -------
380 fluxes : `list` of `float`
381 The fluxes for each footprint.
383 Raises
384 ------
385 TypeError : raise for unsupported types.
386 """
387 median = 0
388 if subtractImageMedian:
389 median = np.nanmedian(parentImage.array)
391 # poor person's single dispatch
392 badTypeMsg = (
393 "This function works with FootprintSets, single Footprints, and iterables of Footprints. "
394 f"Got {type(footprints)}: {footprints}"
395 )
396 if isinstance(footprints, FootprintSet):
397 footprints = footprints.getFootprints()
398 elif isinstance(footprints, Sequence):
399 if not isinstance(footprints[0], Footprint):
400 raise TypeError(badTypeMsg)
401 elif isinstance(footprints, Footprint):
402 footprints = [footprints]
403 else:
404 raise TypeError(badTypeMsg)
406 return np.array([fluxFromFootprint(fp, parentImage, backgroundValue=median) for fp in footprints])
409def fluxFromFootprint(
410 footprint: afwDetection.Footprint, parentImage: afwImage.Image, backgroundValue: float = 0
411) -> float:
412 """Calculate the flux from a footprint, given the parent image, optionally
413 subtracting a single value from each pixel as a very rough background
414 subtraction, e.g. the image median.
416 Parameters
417 ----------
418 footprint : `lsst.afw.detection.Footprint`
419 The footprint to measure.
420 parentImage : `lsst.afw.image.Image`
421 Image containing the footprint.
422 backgroundValue : `bool`, optional
423 The value to subtract from each pixel in the footprint when summing
424 as a very crude background subtraction. Does not change the original
425 image.
427 Returns
428 -------
429 flux : `float`
430 The flux in the footprint
431 """
432 if backgroundValue: # only do the subtraction if non-zero for speed
433 xy0 = parentImage.getBBox().getMin()
434 return footprint.computeFluxFromArray(parentImage.array - backgroundValue, xy0)
435 return footprint.computeFluxFromImage(parentImage)
438def humanNameForCelestialObject(objName: str) -> list[str]:
439 """Returns a list of all human names for obj, or [] if none are found.
441 Parameters
442 ----------
443 objName : `str`
444 The/a name of the object.
446 Returns
447 -------
448 names : `list` of `str`
449 The names found for the object
450 """
451 from astroquery.simbad import Simbad
453 results = []
454 try:
455 simbadResult = Simbad.query_objectids(objName)
456 for row in simbadResult:
457 if row["ID"].startswith("NAME"):
458 results.append(row["ID"].replace("NAME ", ""))
459 return results
460 except Exception:
461 return [] # same behavior as for found but un-named objects
464def _getAltAzZenithsFromSeqNum(
465 butler: dafButler.Butler, dayObs: int, seqNumList: list[int]
466) -> tuple[list[float], list[float], list[float]]:
467 """Get the alt, az and zenith angle for the seqNums of a given dayObs.
469 Parameters
470 ----------
471 butler : `lsst.daf.butler.Butler`
472 The butler to query.
473 dayObs : `int`
474 The dayObs.
475 seqNumList : `list` of `int`
476 The seqNums for which to return the alt, az and zenith
478 Returns
479 -------
480 azimuths : `list` of `float`
481 List of the azimuths for each seqNum
482 elevations : `list` of `float`
483 List of the elevations for each seqNum
484 zeniths : `list` of `float`
485 List of the zenith angles for each seqNum
486 """
487 azimuths, elevations, zeniths = [], [], []
488 for seqNum in seqNumList:
489 md = butler.get("raw.metadata", day_obs=dayObs, seq_num=seqNum, detector=0)
490 obsInfo = ObservationInfo(md)
491 alt = obsInfo.altaz_begin.alt.value
492 az = obsInfo.altaz_begin.az.value
493 elevations.append(alt)
494 zeniths.append(90 - alt)
495 azimuths.append(az)
496 return azimuths, elevations, zeniths
499def getFocusFromHeader(exp: afwImage.Exposure) -> float | None:
500 """Get the raw focus value from the header.
502 Parameters
503 ----------
504 exp : `lsst.afw.image.exposure`
505 The exposure.
507 Returns
508 -------
509 focus : `float` or `None`
510 The focus value if found, else ``None``.
511 """
512 md = exp.getMetadata()
513 if "FOCUSZ" in md:
514 return md["FOCUSZ"]
515 return None
518def checkStackSetup() -> None:
519 """Check which weekly tag is being used and which local packages are setup.
521 Designed primarily for use in notbooks/observing, this prints the weekly
522 tag(s) are setup for lsst_distrib, and lists any locally setup packages and
523 the path to each.
525 Notes
526 -----
527 Uses print() instead of logger messages as this should simply print them
528 without being vulnerable to any log messages potentially being diverted.
529 """
530 packages = packageUtils.getEnvironmentPackages(include_all=True)
532 lsstDistribHashAndTags = packages["lsst_distrib"] # looks something like 'g4eae7cb9+1418867f (w_2022_13)'
533 lsstDistribTags = lsstDistribHashAndTags.split()[1]
534 if len(lsstDistribTags.split()) == 1:
535 tag = lsstDistribTags.replace("(", "")
536 tag = tag.replace(")", "")
537 print(f"You are running {tag} of lsst_distrib")
538 else: # multiple weekly tags found for lsst_distrib!
539 print(f"The version of lsst_distrib you have is compatible with: {lsstDistribTags}")
541 localPackages = []
542 localPaths = []
543 for package, tags in packages.items():
544 if tags.startswith("LOCAL:"):
545 path = tags.split("LOCAL:")[1]
546 path = path.split("@")[0] # don't need the git SHA etc
547 localPaths.append(path)
548 localPackages.append(package)
550 if localPackages:
551 print("\nLocally setup packages:")
552 print("-----------------------")
553 maxLen = max(len(package) for package in localPackages)
554 for package, path in zip(localPackages, localPaths):
555 print(f"{package:<{maxLen}s} at {path}")
556 else:
557 print("\nNo locally setup packages (using a vanilla stack)")
560def setupLogging(longlog: bool = False) -> None:
561 """Setup logging in the same way as one would get from pipetask run.
563 Code that isn't run through the butler CLI defaults to WARNING level
564 messages and no logger names. This sets the behaviour to follow whatever
565 the pipeline default is, currently
566 <logger_name> <level>: <message> e.g.
567 lsst.isr INFO: Masking defects.
568 """
569 CliLog.initLog(longlog=longlog)
572@deprecated(
573 reason="getCurrentDayObs_datetime() has moved to lsst.summit.utils.dateTime. The function is unchanged,"
574 " but renamed to getCurrentDayObsDatetime and you should change to import from there. This function will"
575 " be removed after w_2026_01.",
576 version="w_2026_01",
577 category=FutureWarning,
578)
579def getCurrentDayObs_datetime() -> datetime.date:
580 """Get the current day_obs - the observatory rolls the date over at UTC-12
582 Returned as datetime.date(2022, 4, 28)
583 """
584 return newLocation.getCurrentDayObsDatetime()
587@deprecated(
588 reason="getCurrentDayObs_int() has moved to lsst.summit.utils.dateTime. The function is unchanged,"
589 " but renamed to getCurrentDayObsInt and you should change to import from there. This function will"
590 " be removed after w_2026_01.",
591 version="w_2026_01",
592 category=FutureWarning,
593)
594def getCurrentDayObs_int() -> int:
595 """Return the current dayObs as an int in the form 20220428"""
596 return newLocation.getCurrentDayObsInt()
599@deprecated(
600 reason="getCurrentDayObs_humanStr() has moved to lsst.summit.utils.dateTime. The function is unchanged,"
601 " but renamed to getCurrentDayObsHumanStr and you should change to import from there. This function will"
602 " be removed after w_2026_01.",
603 version="w_2026_01",
604 category=FutureWarning,
605)
606def getCurrentDayObs_humanStr() -> str:
607 """Return the current dayObs as a string in the form '2022-04-28'"""
608 return newLocation.getCurrentDayObsHumanStr()
611@deprecated(
612 reason="getExpRecordAge() has moved to lsst.summit.utils.dateTime. The function is unchanged, "
613 "but you should change to import from there. This function will be removed after w_2026_01.",
614 version="w_2026_01",
615 category=FutureWarning,
616)
617def getExpRecordAge(expRecord: dafButler.DimensionRecord) -> float:
618 """Get the time, in seconds, since the end of exposure.
620 Parameters
621 ----------
622 expRecord : `lsst.daf.butler.DimensionRecord`
623 The exposure record.
625 Returns
626 -------
627 age : `float`
628 The age of the exposure, in seconds.
629 """
630 return -1 * (expRecord.timespan.end - Time.now()).sec
633def getSite() -> str:
634 """Returns where the code is running.
636 Returns
637 -------
638 location : `str`
639 One of:
640 ['tucson', 'summit', 'base', 'staff-rsp', 'rubin-devl', 'jenkins',
641 'usdf-k8s']
642 If the location cannot be determined "UNKOWN" is returned.
644 """
645 # All nublado instances guarantee that EXTERNAL_URL is set and uniquely
646 # identifies it.
647 location = os.getenv("EXTERNAL_INSTANCE_URL", "")
648 if location == "https://tucson-teststand.lsst.codes": 648 ↛ 649line 648 didn't jump to line 649 because the condition on line 648 was never true
649 return "tucson"
650 elif location == "https://summit-lsp.lsst.codes": 650 ↛ 651line 650 didn't jump to line 651 because the condition on line 650 was never true
651 return "summit"
652 elif location == "https://base-lsp.lsst.codes": 652 ↛ 653line 652 didn't jump to line 653 because the condition on line 652 was never true
653 return "base"
654 elif location == "https://usdf-rsp.slac.stanford.edu": 654 ↛ 655line 654 didn't jump to line 655 because the condition on line 654 was never true
655 return "staff-rsp"
656 elif location == "https://usdf-rsp-dev.slac.stanford.edu": 656 ↛ 657line 656 didn't jump to line 657 because the condition on line 656 was never true
657 return "staff-rsp" # we don't care this is the dev RSP, it's basically the same env wrt paths etc
659 # if no EXTERNAL_URL, try HOSTNAME to see if we're on the dev nodes
660 # it is expected that this will be extensible to SLAC
661 hostname = os.getenv("HOSTNAME", "")
662 interactiveNodes = ("sdfrome", "sdfiana")
663 if hostname.startswith(interactiveNodes): 663 ↛ 664line 663 didn't jump to line 664 because the condition on line 663 was never true
664 return "rubin-devl"
665 elif hostname == "htcondor.ls.lsst.org": 665 ↛ 666line 665 didn't jump to line 666 because the condition on line 665 was never true
666 return "base"
667 elif hostname == "htcondor.cp.lsst.org": 667 ↛ 668line 667 didn't jump to line 668 because the condition on line 667 was never true
668 return "summit"
670 jenkinsHome = os.getenv("JENKINS_HOME", "")
671 if jenkinsHome != "": 671 ↛ 672line 671 didn't jump to line 672 because the condition on line 671 was never true
672 return "jenkins"
674 # we're probably inside a k8s pod doing rapid analysis work at this point
675 location = os.getenv("RAPID_ANALYSIS_LOCATION", "")
676 if location == "TTS": 676 ↛ 677line 676 didn't jump to line 677 because the condition on line 676 was never true
677 return "tucson"
678 if location == "BTS": 678 ↛ 679line 678 didn't jump to line 679 because the condition on line 678 was never true
679 return "base"
680 if location == "SUMMIT": 680 ↛ 681line 680 didn't jump to line 681 because the condition on line 680 was never true
681 return "summit"
682 if location == "USDF": 682 ↛ 683line 682 didn't jump to line 683 because the condition on line 682 was never true
683 return "usdf-k8s"
685 # we have failed
686 return "UNKOWN"
689def getAltAzFromSkyPosition(
690 skyPos: geom.SpherePoint,
691 visitInfo: afwImage.VisitInfo,
692 doCorrectRefraction: bool = False,
693 wavelength: float = 500.0,
694 pressureOverride: float | None = None,
695 temperatureOverride: float | None = None,
696 relativeHumidityOverride: float | None = None,
697) -> tuple[geom.Angle, geom.Angle]:
698 """Get the alt/az from the position on the sky and the time and location
699 of the observation.
701 The temperature, pressure and relative humidity are taken from the
702 visitInfo by default, but can be individually overridden as needed. It
703 should be noted that the visitInfo never contains a nominal wavelength, and
704 so this takes a default value of 500nm.
706 Parameters
707 ----------
708 skyPos : `lsst.geom.SpherePoint`
709 The position on the sky.
710 visitInfo : `lsst.afw.image.VisitInfo`
711 The visit info containing the time of the observation.
712 doCorrectRefraction : `bool`, optional
713 Correct for the atmospheric refraction?
714 wavelength : `float`, optional
715 The nominal wavelength in nanometers (e.g. 500.0), as a float.
716 pressureOverride : `float`, optional
717 The pressure, in bars (e.g. 0.770), to override the value supplied in
718 the visitInfo, as a float.
719 temperatureOverride : `float`, optional
720 The temperature, in Celsius (e.g. 10.0), to override the value supplied
721 in the visitInfo, as a float.
722 relativeHumidityOverride : `float`, optional
723 The relativeHumidity in the range 0..1 (i.e. not as a percentage), to
724 override the value supplied in the visitInfo, as a float.
726 Returns
727 -------
728 alt : `lsst.geom.Angle`
729 The altitude.
730 az : `lsst.geom.Angle`
731 The azimuth.
732 """
733 skyLocation = SkyCoord(skyPos.getRa().asRadians(), skyPos.getDec().asRadians(), unit=u.rad)
734 long = visitInfo.observatory.getLongitude()
735 lat = visitInfo.observatory.getLatitude()
736 ele = visitInfo.observatory.getElevation()
737 earthLocation = EarthLocation.from_geodetic(long.asDegrees(), lat.asDegrees(), ele)
739 refractionKwargs = {}
740 if doCorrectRefraction:
741 # wavelength is never supplied in the visitInfo so always take this
742 wavelength = wavelength * u.nm
744 if pressureOverride:
745 pressure = pressureOverride
746 else:
747 pressure = visitInfo.weather.getAirPressure()
748 # ObservationInfos (which are the "source of truth" use pascals) so
749 # convert from pascals to bars
750 pressure /= 100000.0
751 pressure = pressure * u.bar
753 if temperatureOverride:
754 temperature = temperatureOverride
755 else:
756 temperature = visitInfo.weather.getAirTemperature()
757 temperature = temperature * u.deg_C
759 if relativeHumidityOverride:
760 relativeHumidity = relativeHumidityOverride
761 else:
762 relativeHumidity = visitInfo.weather.getHumidity() / 100.0 # this is in percent
763 relativeHumidity = relativeHumidity
765 refractionKwargs = dict(
766 pressure=pressure, temperature=temperature, relative_humidity=relativeHumidity, obswl=wavelength
767 )
769 obsTime = visitInfo.date.toAstropy()
770 altAz = AltAz(obstime=obsTime, location=earthLocation, **refractionKwargs)
772 obsAltAz = skyLocation.transform_to(altAz)
773 alt = geom.Angle(obsAltAz.alt.degree, geom.degrees)
774 az = geom.Angle(obsAltAz.az.degree, geom.degrees)
776 return alt, az
779def getExpPositionOffset(
780 exp1: afwImage.Exposure,
781 exp2: afwImage.Exposure,
782 useWcs: bool = True,
783 allowDifferentPlateScales: bool = False,
784) -> pipeBase.Struct:
785 """Get the change in sky position between two exposures.
787 Given two exposures, calculate the offset on the sky between the images.
788 If useWcs then use the (fitted or unfitted) skyOrigin from their WCSs, and
789 calculate the alt/az from the observation times, otherwise use the nominal
790 values in the exposures' visitInfos. Note that if using the visitInfo
791 values that for a given pointing the ra/dec will be ~identical, regardless
792 of whether astrometric fitting has been performed.
794 Values are given as exp1-exp2.
796 Parameters
797 ----------
798 exp1 : `lsst.afw.image.Exposure`
799 The first exposure.
800 exp2 : `lsst.afw.image.Exposure`
801 The second exposure.
802 useWcs : `bool`
803 Use the WCS for the ra/dec and alt/az if True, else use the nominal/
804 boresight values from the exposures' visitInfos.
805 allowDifferentPlateScales : `bool`, optional
806 Use to disable checking that plate scales are the same. Generally,
807 differing plate scales would indicate an error, but where blind-solving
808 has been undertaken during commissioning plate scales can be different
809 enough to warrant setting this to ``True``.
811 Returns
812 -------
813 offsets : `lsst.pipe.base.Struct`
814 A struct containing the offsets:
815 ``deltaRa``
816 The diference in ra (`lsst.geom.Angle`)
817 ``deltaDec``
818 The diference in dec (`lsst.geom.Angle`)
819 ``deltaAlt``
820 The diference in alt (`lsst.geom.Angle`)
821 ``deltaAz``
822 The diference in az (`lsst.geom.Angle`)
823 ``deltaPixels``
824 The diference in pixels (`float`)
825 """
827 wcs1 = exp1.getWcs()
828 wcs2 = exp2.getWcs()
829 pixScaleArcSec = wcs1.getPixelScale().asArcseconds()
830 if not allowDifferentPlateScales:
831 assert np.isclose(
832 pixScaleArcSec, wcs2.getPixelScale().asArcseconds()
833 ), "Pixel scales in the exposures differ."
835 if useWcs:
836 p1 = wcs1.getSkyOrigin()
837 p2 = wcs2.getSkyOrigin()
838 alt1, az1 = getAltAzFromSkyPosition(p1, exp1.getInfo().getVisitInfo())
839 alt2, az2 = getAltAzFromSkyPosition(p2, exp2.getInfo().getVisitInfo())
840 ra1 = p1[0]
841 ra2 = p2[0]
842 dec1 = p1[1]
843 dec2 = p2[1]
844 else:
845 az1 = exp1.visitInfo.boresightAzAlt[0]
846 az2 = exp2.visitInfo.boresightAzAlt[0]
847 alt1 = exp1.visitInfo.boresightAzAlt[1]
848 alt2 = exp2.visitInfo.boresightAzAlt[1]
850 ra1 = exp1.visitInfo.boresightRaDec[0]
851 ra2 = exp2.visitInfo.boresightRaDec[0]
852 dec1 = exp1.visitInfo.boresightRaDec[1]
853 dec2 = exp2.visitInfo.boresightRaDec[1]
855 p1 = exp1.visitInfo.boresightRaDec
856 p2 = exp2.visitInfo.boresightRaDec
858 angular_offset = p1.separation(p2).asArcseconds()
859 deltaPixels = angular_offset / pixScaleArcSec
861 ret = pipeBase.Struct(
862 deltaRa=(ra1 - ra2).wrapNear(geom.Angle(0.0)),
863 deltaDec=dec1 - dec2,
864 deltaAlt=alt1 - alt2,
865 deltaAz=(az1 - az2).wrapNear(geom.Angle(0.0)),
866 deltaPixels=deltaPixels,
867 )
869 return ret
872def starTrackerFileToExposure(filename: str, logger: logging.Logger | None = None) -> afwImage.Exposure:
873 """Read the exposure from the file and set the wcs from the header.
875 Parameters
876 ----------
877 filename : `str`
878 The full path to the file.
879 logger : `logging.Logger`, optional
880 The logger to use for errors, created if not supplied.
882 Returns
883 -------
884 exp : `lsst.afw.image.Exposure`
885 The exposure.
886 """
887 if not logger:
888 logger = logging.getLogger(__name__)
889 exp = afwImage.ExposureF(filename)
890 try:
891 wcs = genericCameraHeaderToWcs(exp)
892 exp.setWcs(wcs)
893 except Exception as e:
894 logger.warning(f"Failed to set wcs from header: {e}")
896 # for some reason the date isn't being set correctly
897 # DATE-OBS is present in the original header, but it's being
898 # stripped out and somehow not set (plus it doesn't give the midpoint
899 # of the exposure), so set it manually from the midpoint here
900 try:
901 newArgs = {} # dict to unpack into visitInfo.copyWith - fill it with whatever needs to be replaced
902 md = exp.getMetadata()
904 begin = datetime.datetime.fromisoformat(md["DATE-BEG"])
905 end = datetime.datetime.fromisoformat(md["DATE-END"])
906 duration = end - begin
907 mid = begin + duration / 2
908 newTime = dafBase.DateTime(mid.isoformat(), dafBase.DateTime.Timescale.TAI)
909 newArgs["date"] = newTime
911 # AIRPRESS is being set as PRESSURE so afw doesn't pick it up
912 # once we're using the butler for data we will just set it to take
913 # PRESSURE in the translator instead of this
914 weather = exp.visitInfo.getWeather()
915 oldPressure = weather.getAirPressure()
916 if not np.isfinite(oldPressure):
917 pressure = md.get("PRESSURE")
918 if pressure is not None:
919 logger.info("Patching the weather info using the PRESSURE header keyword")
920 newWeather = Weather(weather.getAirTemperature(), pressure, weather.getHumidity())
921 newArgs["weather"] = newWeather
923 if newArgs:
924 newVi = exp.visitInfo.copyWith(**newArgs)
925 exp.info.setVisitInfo(newVi)
926 except Exception as e:
927 logger.warning(f"Failed to set date from header: {e}")
929 return exp
932def obsInfoToDict(obsInfo: ObservationInfo) -> dict:
933 """Convert an ObservationInfo to a dict.
935 Parameters
936 ----------
937 obsInfo : `astro_metadata_translator.ObservationInfo`
938 The ObservationInfo to convert.
940 Returns
941 -------
942 obsInfoDict : `dict`
943 The ObservationInfo as a dict.
944 """
945 return {prop: getattr(obsInfo, prop) for prop in obsInfo.all_properties.keys()}
948def getFieldNameAndTileNumber(
949 field: str, warn: bool = True, logger: logging.Logger | None = None
950) -> tuple[str, int | None]:
951 """Get the tile name and number of an observed field.
953 It is assumed to always be appended, with an underscore, to the rest of the
954 field name. Returns the name and number as a tuple, or the name unchanged
955 if no tile number is found.
957 Parameters
958 ----------
959 field : `str`
960 The name of the field
962 Returns
963 -------
964 fieldName : `str`
965 The name of the field without the trailing tile number, if present.
966 tileNum : `int`
967 The number of the tile, as an integer, or ``None`` if not found.
968 """
969 if warn and not logger:
970 logger = logging.getLogger("lsst.summit.utils.utils.getFieldNameAndTileNumber")
972 if "_" not in field:
973 if warn and logger is not None:
974 logger.warning(
975 f"Field {field} does not contain an underscore," " so cannot determine the tile number."
976 )
977 return field, None
979 try:
980 fieldParts = field.split("_")
981 fieldNum = int(fieldParts[-1])
982 except ValueError:
983 if warn and logger is not None:
984 logger.warning(
985 f"Field {field} does not contain only an integer after the final underscore"
986 " so cannot determine the tile number."
987 )
988 return field, None
990 return "_".join(fieldParts[:-1]), fieldNum
993def getAirmassSeeingCorrection(airmass: float) -> float:
994 """Get the correction factor for seeing due to airmass.
996 Parameters
997 ----------
998 airmass : `float`
999 The airmass, greater than or equal to 1.
1001 Returns
1002 -------
1003 correctionFactor : `float`
1004 The correction factor to apply to the seeing.
1006 Raises
1007 ------
1008 ValueError raised for unphysical airmasses.
1009 """
1010 if airmass < 1:
1011 raise ValueError(f"Invalid airmass: {airmass}")
1012 return airmass ** (-0.6)
1015@deprecated(
1016 reason=". Will be removed after v28.0.",
1017 version="v27.0",
1018 category=FutureWarning,
1019)
1020def getFilterSeeingCorrection(filterName: str) -> float:
1021 """Get the correction factor for seeing due to a filter.
1023 Parameters
1024 ----------
1025 filterName : `str`
1026 The name of the filter, e.g. 'SDSSg_65mm'.
1028 Returns
1029 -------
1030 correctionFactor : `float`
1031 The correction factor to apply to the seeing.
1033 Raises
1034 ------
1035 ValueError raised for unknown filters.
1036 """
1037 return getBandpassSeeingCorrection(filterName)
1040def getBandpassSeeingCorrection(filterName: str) -> float:
1041 """Get the correction factor for seeing due to a filter.
1043 Parameters
1044 ----------
1045 filterName : `str`
1046 The name of the filter, e.g. 'SDSSg_65mm'.
1048 Returns
1049 -------
1050 correctionFactor : `float`
1051 The correction factor to apply to the seeing.
1053 Raises
1054 ------
1055 ValueError raised for unknown filters.
1056 """
1057 match filterName:
1058 case "SDSSg_65mm": # LATISS
1059 return (474.41 / 500.0) ** 0.2
1060 case "SDSSr_65mm": # LATISS
1061 return (628.47 / 500.0) ** 0.2
1062 case "SDSSi_65mm": # LATISS
1063 return (769.51 / 500.0) ** 0.2
1064 case "SDSSz_65mm": # LATISS
1065 return (871.45 / 500.0) ** 0.2
1066 case "SDSSy_65mm": # LATISS
1067 return (986.8 / 500.0) ** 0.2
1068 case "u_02": # ComCam
1069 return (370.697 / 500.0) ** 0.2
1070 case "g_01": # ComCam
1071 return (476.359 / 500.0) ** 0.2
1072 case "r_03": # ComCam
1073 return (619.383 / 500.0) ** 0.2
1074 case "i_06": # ComCam
1075 return (754.502 / 500.0) ** 0.2
1076 case "z_03": # ComCam
1077 return (866.976 / 500.0) ** 0.2
1078 case "y_04": # ComCam
1079 return (972.713 / 500.0) ** 0.2
1080 case "u_24": # LSSTCam
1081 return (365.1397 / 500.0) ** 0.2
1082 case "g_6": # LSSTCam
1083 return (475.6104 / 500.0) ** 0.2
1084 case "r_57": # LSSTCam
1085 return (618.6055 / 500.0) ** 0.2
1086 case "i_39": # LSSTCam
1087 return (753.5490 / 500.0) ** 0.2
1088 case "z_20": # LSSTCam
1089 return (868.0935 / 500.0) ** 0.2
1090 case "y_10": # LSSTCam
1091 return (972.5979 / 500.0) ** 0.2
1092 case _:
1093 raise ValueError(f"Unknown filter name: {filterName}")
1096def getCdf(data: np.ndarray, scale: int, nBinsMax: int = 300_000) -> tuple[np.ndarray | float, float, float]:
1097 """Return an approximate cumulative distribution function scaled to
1098 the [0, scale] range.
1100 If the input data is all nan, then the output cdf will be nan as well as
1101 the min and max values.
1103 Parameters
1104 ----------
1105 data : `np.array`
1106 The input data.
1107 scale : `int`
1108 The scaling range of the output.
1109 nBinsMax : `int`, optional
1110 Maximum number of bins to use.
1112 Returns
1113 -------
1114 cdf : `np.array` of `int`
1115 A monotonically increasing sequence that represents a scaled
1116 cumulative distribution function, starting with the value at
1117 minVal, then at (minVal + 1), and so on.
1118 minVal : `float`
1119 An integer smaller than the minimum value in the input data.
1120 maxVal : `float`
1121 An integer larger than the maximum value in the input data.
1122 """
1123 flatData = data.ravel()
1124 size = flatData.size - np.count_nonzero(np.isnan(flatData))
1126 minVal = np.floor(np.nanmin(flatData))
1127 maxVal = np.ceil(np.nanmax(flatData)) + 1.0
1129 if np.isnan(minVal) or np.isnan(maxVal):
1130 # if either the min or max are nan, then the data is all nan as we're
1131 # using nanmin and nanmax. Given this, we can't calculate a cdf, so
1132 # return nans for all values
1133 return np.nan, np.nan, np.nan
1135 nBins = np.clip(int(maxVal) - int(minVal), 1, nBinsMax)
1137 hist, binEdges = np.histogram(flatData, bins=nBins, range=(int(minVal), int(maxVal)))
1139 cdf = (scale * np.cumsum(hist) / size).astype(np.int64)
1140 return cdf, minVal, maxVal
1143def getQuantiles(data: npt.NDArray[np.float64], nColors: int) -> npt.NDArray[np.floating[Any]]:
1144 """Get a set of boundaries that equally distribute data into
1145 nColors intervals. The output can be used to make a colormap of nColors
1146 colors.
1148 This is equivalent to using the numpy function:
1149 np.nanquantile(data, np.linspace(0, 1, nColors + 1))
1150 but with a coarser precision, yet sufficient for our use case. This
1151 implementation gives a significant speed-up. In the case of large
1152 ranges, np.nanquantile is used because it is more memory efficient.
1154 If all elements of ``data`` are nan then the output ``boundaries`` will
1155 also all be ``nan`` to keep the interface consistent.
1157 Parameters
1158 ----------
1159 data : `np.array`
1160 The input image data.
1161 nColors : `int`
1162 The number of intervals to distribute data into.
1164 Returns
1165 -------
1166 boundaries: `list` of `float`
1167 A monotonically increasing sequence of size (nColors + 1). These are
1168 the edges of nColors intervals.
1169 """
1170 dataRange = np.nanmax(data) - np.nanmin(data)
1171 if dataRange > 300_000:
1172 # Use slower but memory efficient nanquantile
1173 logger = logging.getLogger(__name__)
1174 logger.warning(f"Data range is very large ({dataRange}); using slower quantile code.")
1175 boundaries = np.nanquantile(data, np.linspace(0, 1, nColors + 1))
1176 else:
1177 cdf, minVal, maxVal = getCdf(data, nColors)
1178 if np.isnan(minVal): # cdf calculation has failed because all data is nan
1179 return np.asarray([np.nan for _ in range(nColors)])
1180 assert isinstance(cdf, np.ndarray), "cdf is not an np.ndarray"
1181 scale = (maxVal - minVal) / len(cdf)
1183 boundaries = np.asarray([np.argmax(cdf >= i) * scale + minVal for i in range(nColors)] + [maxVal])
1185 return boundaries
1188def digitizeData(data: npt.NDArray[np.float64], nColors: int = 256) -> npt.NDArray[np.integer]:
1189 """
1190 Scale data into nColors using its cumulative distribution function.
1192 Parameters
1193 ----------
1194 data : `np.array`
1195 The input image data.
1196 nColors : `int`
1197 The number of intervals to distribute data into.
1199 Returns
1200 -------
1201 data: `np.array` of `int`
1202 Scaled data in the [0, nColors - 1] range.
1203 """
1204 cdf, minVal, maxVal = getCdf(data, nColors - 1)
1205 assert isinstance(cdf, np.ndarray), "cdf is not an np.ndarray"
1206 scale = (maxVal - minVal) / len(cdf)
1207 bins = np.floor((data * scale - minVal)).astype(np.int64)
1208 return cdf[bins]
1211def getBboxAround(centroid: geom.Point, boxSize: int, exp: afwImage.Exposure) -> geom.Box2I:
1212 """Get a bbox centered on a point, clipped to the exposure.
1214 If the bbox would extend beyond the bounds of the exposure it is clipped to
1215 the exposure, resulting in a non-square bbox.
1217 Parameters
1218 ----------
1219 centroid : `lsst.geom.Point`
1220 The source centroid.
1221 boxsize : `int`
1222 The size of the box to centre around the centroid.
1223 exp : `lsst.afw.image.Exposure`
1224 The exposure, so the bbox can be clipped to not overrun the bounds.
1226 Returns
1227 -------
1228 bbox : `lsst.geom.Box2I`
1229 The bounding box, centered on the centroid unless clipping to the
1230 exposure causes it to be non-square.
1231 """
1232 bbox = geom.BoxI().makeCenteredBox(centroid, geom.Extent2I(boxSize, boxSize))
1233 bbox = bbox.clippedTo(exp.getBBox())
1234 return bbox
1237def bboxToMatplotlibRectanle(bbox: geom.Box2I | geom.Box2D) -> matplotlib.patches.Rectangle:
1238 """Convert a bbox to a matplotlib Rectangle for plotting.
1240 Parameters
1241 ----------
1242 results : `lsst.geom.Box2I` or `lsst.geom.Box2D`
1243 The bbox to convert.
1245 Returns
1246 -------
1247 rectangle : `matplotlib.patches.Rectangle`
1248 The rectangle.
1249 """
1250 ll = bbox.minX, bbox.minY
1251 width, height = bbox.getDimensions()
1252 return Rectangle(ll, width, height)
1255def computeExposureId(instrument: str, controller: str, dayObs: int, seqNum: int) -> int:
1256 instrument = instrument.lower()
1257 if instrument == "latiss":
1258 return lsst.obs.lsst.translators.LatissTranslator.compute_exposure_id(dayObs, seqNum, controller)
1259 elif instrument == "lsstcomcam":
1260 return lsst.obs.lsst.translators.LsstComCamTranslator.compute_exposure_id(dayObs, seqNum, controller)
1261 elif instrument == "lsstcomcamsim":
1262 return lsst.obs.lsst.translators.LsstComCamSimTranslator.compute_exposure_id(
1263 dayObs, seqNum, controller
1264 )
1265 elif instrument == "lsstcam":
1266 return lsst.obs.lsst.translators.LsstCamTranslator.compute_exposure_id(dayObs, seqNum, controller)
1267 else:
1268 raise ValueError("Unknown instrument {instrument}")
1271def computeCcdExposureId(instrument: str, exposureId: int, detector: int) -> int:
1272 instrument = instrument.lower()
1273 if instrument == "latiss":
1274 if detector != 0:
1275 raise ValueError("Invalid detector {detector} for LATISS")
1276 return lsst.obs.lsst.translators.LatissTranslator.compute_detector_exposure_id(exposureId, detector)
1277 elif instrument == "lsstcomcam":
1278 if detector < 0 or detector >= 9:
1279 raise ValueError("Invalid detector {detector} for LSSTComCam")
1280 return lsst.obs.lsst.translators.LsstComCamTranslator.compute_detector_exposure_id(
1281 exposureId, detector
1282 )
1283 elif instrument == "lsstcomcamsim":
1284 if detector < 0 or detector >= 9:
1285 raise ValueError("Invalid detector {detector} for LSSTComCamSim")
1286 return lsst.obs.lsst.translators.LsstComCamSimTranslator.compute_detector_exposure_id(
1287 exposureId, detector
1288 )
1289 elif instrument == "lsstcam":
1290 if detector < 0 or detector >= 205:
1291 raise ValueError("Invalid detector {detector} for LSSTCam")
1292 return lsst.obs.lsst.translators.LsstCamTranslator.compute_detector_exposure_id(exposureId, detector)
1293 else:
1294 raise ValueError("Unknown instrument {instrument}")
1297def getDetectorIds(instrumentName: str) -> list[int]:
1298 """Get a list of detector IDs for a given instrument.
1300 Parameters
1301 ----------
1302 instrumentName : `str`
1303 The name of the instrument.
1305 Returns
1306 -------
1307 detectorIds : `list` of `int`
1308 The list of detector IDs.
1309 """
1310 camera = getCameraFromInstrumentName(instrumentName)
1311 return [detector.getId() for detector in camera]
1314def getCameraFromInstrumentName(instrumentName: str) -> Camera:
1315 """Get the camera object given the instrument name (case insenstive).
1317 Parameters
1318 ----------
1319 instrumentName : `str`
1320 The name of the instrument, e.g. "LATISS" or "LSSTCam". Case
1321 insenstive.
1323 Returns
1324 -------
1325 camera: `lsst.afw.cameraGeom.Camera`
1326 The camera object corresponding to the instrument name.
1328 Raises
1329 ------
1330 ValueError
1331 If the instrument name is not supported.
1332 """
1334 _instrument = instrumentName.lower()
1336 match _instrument:
1337 case "lsstcam":
1338 camera = LsstCam.getCamera()
1339 case "lsstcomcam":
1340 camera = LsstComCam.getCamera()
1341 case "lsstcomcamsim":
1342 camera = LsstComCamSim.getCamera()
1343 case "latiss":
1344 camera = Latiss.getCamera()
1345 case _:
1346 raise ValueError(f"Unsupported instrument: {instrumentName}")
1347 return camera
1350def calcEclipticCoords(ra: float, dec: float) -> tuple[float, float]:
1351 """Get the ecliptic coordinates of the specified ra and dec.
1353 Transform J2000 (ra, dec), both in degrees, to
1354 IAU1976 Ecliptic coordinates (also returning degrees).
1356 Matches the results of:
1358 from astropy.coordinates import SkyCoord, HeliocentricEclipticIAU76
1359 from dataclasses import dataclass
1360 import astropy.units as u
1362 p = SkyCoord(ra=ra0*u.deg, dec=dec0*u.deg, distance=1*u.au, frame='hcrs')
1363 ecl = p.transform_to(HeliocentricEclipticIAU76)
1364 print(ecl.lon.value, ecl.lat.value)
1366 except that it's fast.
1368 Parameters
1369 ----------
1370 ra : `float`
1371 The right ascension, in degrees.
1372 dec : `float`
1373 The declination, in degrees.
1375 Returns
1376 -------
1377 lambda : `float`
1378 The ecliptic longitude in degrees.
1379 beta : `float`
1380 The ecliptic latitude in degrees.
1381 """
1383 ra, dec = np.deg2rad(ra), np.deg2rad(dec)
1385 # IAU 1976 obliquity
1386 epsilon = np.deg2rad(23.43929111111111)
1387 cos_eps, sin_eps = np.cos(epsilon), np.sin(epsilon)
1389 sra, cra = np.sin(ra), np.cos(ra)
1390 sdec, cdec = np.sin(dec), np.cos(dec)
1392 lambda_ = np.arctan2((sra * cos_eps + sdec / cdec * sin_eps), cra)
1393 beta = np.arcsin(sdec * cos_eps - cdec * sin_eps * sra)
1395 # normalize
1396 if lambda_ < 0:
1397 lambda_ += np.pi * 2
1399 return (np.rad2deg(lambda_), np.rad2deg(beta))
1402def getImageArray(
1403 inputData: np.ndarray | afwImage.Exposure | afwImage.Image | afwImage.MaskedImage,
1404) -> np.ndarray:
1405 """Get the image data from anything image-like.
1407 Parameters
1408 ----------
1409 inputData : `numpy.ndarray`, `lsst.afw.image.Exposure`,
1410 `lsst.afw.image.Image`, or `lsst.afw.image.MaskedImage`
1411 The input data.
1412 Returns
1413 -------
1414 imageData : `numpy.ndarray`
1415 The image data.
1416 Raises
1417 ------
1418 TypeError
1419 If the input data is not a numpy array, lsst.afw.image.Exposure,
1420 lsst.afw.image.Image, or lsst.afw.image.MaskedImage.
1421 """
1422 match inputData:
1423 case np.ndarray():
1424 imageData = inputData
1425 case afwImage.MaskedImage():
1426 imageData = inputData.image.array
1427 case afwImage.Image():
1428 imageData = inputData.array
1429 case afwImage.Exposure():
1430 imageData = inputData.image.array
1431 case _:
1432 raise TypeError(
1433 "This function accepts numpy array, lsst.afw.image.Exposure components."
1434 f" Got {type(inputData)}"
1435 )
1436 return imageData
1439def getSunAngle(time: Time | None = None) -> float:
1440 """Get the angle of the sun to the horizon at the specified time.
1442 If no time is specified, the current time is used. Positive numbers means
1443 the sun is above the horizon, negative means it is below.
1445 Returns
1446 -------
1447 sun_alt : `float`
1448 The angle of the sun to the horizon, in degrees.
1449 """
1450 if time is None:
1451 time = Time.now()
1452 sun_altaz = get_sun(time).transform_to(AltAz(obstime=time, location=SIMONYI_LOCATION))
1453 return sun_altaz.alt.deg
1456@dataclass(slots=True)
1457class RobustFitResult:
1458 slope: float
1459 intercept: float
1460 scatter: float
1461 slopePValue: float
1462 slopeStdErr: float
1463 slopeTValue: float
1464 interceptPValue: float
1465 interceptStdErr: float
1466 interceptTValue: float
1467 outlierMask: np.ndarray
1470class RobustFitter:
1471 """
1472 Robust linear fit using RANSAC + OLS, reusable across datasets.
1474 This class fits a robust linear trend to x and y using scikit-learn's
1475 RANSAC for inlier detection, followed by an OLS fit on inliers to compute
1476 slope, intercept, scatter, standard errors, t-values, and p-values.
1477 Configuration (e.g., minSamples) is set at init. Call `fit()` per dataset.
1478 Results are returned as a `RobustFitResult` and stored internally for later
1479 access (e.g., plotting).
1481 Parameters
1482 ----------
1483 minSamples : `float`, optional
1484 Minimum fraction of samples chosen randomly from the original data.
1485 """
1487 def __init__(self, *, minSamples: float = 0.2) -> None:
1488 self.minSamples = minSamples
1489 self._clearState()
1491 def _clearState(self) -> None:
1492 """Reset stored fit state."""
1493 self.model: Any = None
1494 self.olsModel: Any = None
1495 self.outlierMask: np.ndarray | None = None
1496 self.x: np.ndarray | None = None
1497 self.y: np.ndarray | None = None
1498 self.slope: float = np.nan
1499 self.intercept: float = np.nan
1500 self.ciSlope: tuple[float, float] = (np.nan, np.nan)
1501 self.ciIntercept: tuple[float, float] = (np.nan, np.nan)
1502 self.slopePValue = np.nan
1503 self.slopeStdErr = np.nan
1504 self.slopeTValue = np.nan
1505 self.scatter = np.nan
1507 @staticmethod
1508 def _defaultResidualThreshold(y: np.ndarray) -> float:
1509 """Compute default residual threshold from finite y values."""
1510 yFinite = np.asarray(y)[np.isfinite(y)]
1511 if yFinite.size == 0:
1512 raise ValueError("Cannot compute residual threshold: no finite y.")
1513 return 1.5 * float(mad_std(yFinite))
1515 def fit(
1516 self,
1517 x: np.ndarray,
1518 y: np.ndarray,
1519 *,
1520 residualThreshold: float | None = None,
1521 randomState: int = 42,
1522 ) -> RobustFitResult:
1523 """
1524 Fit robust line to (x, y) using RANSAC + OLS on inliers.
1526 Recomputes residual threshold each call if not provided.
1528 Parameters
1529 ----------
1530 x : array-like
1531 Independent variable values.
1532 y : array-like
1533 Dependent variable values.
1534 residualThreshold : float, optional
1535 Residual threshold for inlier detection. If None, computed
1536 as 1.5 × MAD of y.
1537 randomState : int, optional
1538 Random seed for RANSAC.
1540 Returns
1541 -------
1542 result : `RobustFitResult`
1543 Best-fit parameters and statistics from the OLS inlier fit.
1545 Raises
1546 ------
1547 ValueError
1548 If no finite x/y values are available for fitting.
1549 """
1550 self._clearState()
1552 x = np.asarray(x)
1553 y = np.asarray(y)
1555 finiteMask = np.isfinite(x) & np.isfinite(y)
1556 if not finiteMask.any():
1557 raise ValueError("No finite x/y values to fit.")
1559 X = x[finiteMask].reshape(-1, 1)
1560 yFit = y[finiteMask]
1562 if residualThreshold is None:
1563 residualThreshold = self._defaultResidualThreshold(yFit)
1565 ransac = RANSACRegressor(
1566 estimator=LinearRegression(),
1567 min_samples=self.minSamples,
1568 residual_threshold=residualThreshold,
1569 random_state=randomState,
1570 )
1571 ransac.fit(X, yFit)
1573 slope = float(ransac.estimator_.coef_[0])
1574 intercept = float(ransac.estimator_.intercept_)
1576 self.x = x
1577 self.y = y
1579 outlierMask = np.ones_like(x, dtype=bool)
1580 outlierMask[finiteMask] = ~ransac.inlier_mask_
1581 self.outlierMask = outlierMask
1583 xIn = x[~outlierMask]
1584 yIn = y[~outlierMask]
1585 XDesign = sm.add_constant(xIn)
1586 ols = sm.OLS(yIn, XDesign).fit()
1587 ci = ols.conf_int(alpha=0.32)
1588 ciIntercept = (float(ci[0][0]), float(ci[0][1]))
1589 ciSlope = (float(ci[1][0]), float(ci[1][1]))
1590 scatter = float(np.nanstd(ols.resid, ddof=1)) if ols.resid.size > 1 else np.nan
1592 self.model = ransac
1593 self.olsModel = ols
1594 self.slope = slope
1595 self.intercept = intercept
1596 self.ciSlope = ciSlope
1597 self.ciIntercept = ciIntercept
1598 self.slopePValue = float(ols.pvalues[1]) if np.isfinite(ols.pvalues[1]) else np.nan
1599 self.slopeStdErr = float(ols.bse[1]) if np.isfinite(ols.bse[1]) else np.nan
1600 self.slopeTValue = float(ols.tvalues[1]) if np.isfinite(ols.tvalues[1]) else np.nan
1601 self.scatter = scatter
1603 return RobustFitResult(
1604 slope=slope,
1605 intercept=intercept,
1606 scatter=scatter,
1607 slopePValue=self.slopePValue,
1608 slopeStdErr=self.slopeStdErr,
1609 slopeTValue=self.slopeTValue,
1610 interceptPValue=float(ols.pvalues[0]) if np.isfinite(ols.pvalues[0]) else np.nan,
1611 interceptStdErr=float(ols.bse[0]) if np.isfinite(ols.bse[0]) else np.nan,
1612 interceptTValue=float(ols.tvalues[0]) if np.isfinite(ols.tvalues[0]) else np.nan,
1613 outlierMask=self.outlierMask,
1614 )
1616 def reset(self) -> None:
1617 """Clear any stored fit state."""
1618 self._clearState()
1620 def plotBestFit(self, ax: matplotlib.axes.Axes, label=None, color=None, alphaBand=0.2, lw=2, nBins=5):
1621 """Plot the best fit line, confidence interval,
1622 and optionally scatter/binned data.
1624 Parameters
1625 ----------
1626 ax : matplotlib.axes.Axes, optional
1627 The axes to plot on. If None, uses current axes.
1628 label : str, optional
1629 Label for the best fit line.
1630 color : str or None, optional
1631 Color for the fit line and confidence band.
1632 alphaBand : float, optional
1633 Alpha transparency for the confidence interval band.
1634 lw : int, optional
1635 Line width for the best fit line.
1636 nBins : int, optional
1637 Number of bins for binned statistics.
1639 Returns
1640 -------
1641 ax : matplotlib.axes.Axes
1642 The axes with the plot.
1643 """
1644 # Handle case where self.x contains only NaNs
1645 if self.x is None or self.y is None or self.outlierMask is None or self.olsModel is None:
1646 raise RuntimeError("Fit must be called before plotting.")
1648 if np.isnan(self.x).all():
1649 raise ValueError("All x values are NaN; cannot plot best fit.")
1651 xx = np.linspace(np.nanmin(self.x), np.nanmax(self.x), 200)
1652 X_design = sm.add_constant(xx)
1654 pred = self.olsModel.get_prediction(X_design)
1655 summary_frame = pred.summary_frame(alpha=0.05) # 95% intervals
1657 mean = summary_frame["mean"]
1658 ci_lo = summary_frame["mean_ci_lower"]
1659 ci_hi = summary_frame["mean_ci_upper"]
1661 # Plot best fit
1662 ax.plot(xx, mean, color=color, label=label, lw=lw)
1663 # Plot confidence interval band
1664 ax.fill_between(xx, ci_lo, ci_hi, color=color, alpha=alphaBand)
1666 mask = self.outlierMask
1667 xin, yin = self.x[~mask], self.y[~mask] # inliers
1668 xout, _ = self.x[mask], self.y[mask] # outliers
1670 # Bin data in nbins
1671 if np.all(np.isnan(xout)):
1672 # Skip binning if all values are NaN
1673 pass
1674 else:
1675 bin_centers, means, stds, bin_counts = self.getBinnedData(xin, yin, nBins)
1677 # Plot binned means and stds
1678 ax.errorbar(
1679 bin_centers,
1680 means,
1681 color=color or "black",
1682 yerr=stds,
1683 fmt="o",
1684 capsize=4,
1685 markersize=6,
1686 alpha=0.7,
1687 zorder=10,
1688 )
1690 return ax
1692 @staticmethod
1693 def getBinnedData(
1694 x: np.ndarray, y: np.ndarray, nbins: int
1695 ) -> tuple[np.ndarray, list[float], list[float], list[int]]:
1696 """Get binned statistics for x and y."""
1697 bins = np.linspace(np.nanmin(x), np.nanmax(x), nbins + 1)
1698 bin_centers = 0.5 * (bins[:-1] + bins[1:])
1699 digitized = np.digitize(x, bins) - 1
1700 means = []
1701 stds = []
1702 bin_counts = []
1703 for i in range(nbins):
1704 bin_y = y[digitized == i]
1705 if len(bin_y) > 0:
1706 means.append(np.nanmedian(bin_y))
1707 if len(bin_y) > 1:
1708 stds.append(np.nanstd(bin_y, ddof=1) / np.sqrt(len(bin_y)))
1709 else:
1710 stds.append(np.nan)
1711 bin_counts.append(len(bin_y))
1712 else:
1713 means.append(np.nan)
1714 stds.append(np.nan)
1715 bin_counts.append(0)
1716 return bin_centers, means, stds, bin_counts