Coverage for python / lsst / summit / utils / utils.py: 18%
623 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-05-07 09:02 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-05-07 09:02 +0000
1# This file is part of summit_utils.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21from __future__ import annotations
23import datetime
24import logging
25import os
26from collections.abc import Iterable, Sequence
27from dataclasses import dataclass
28from typing import TYPE_CHECKING, Any
30import astropy.units as u
31import matplotlib
32import numpy as np
33import numpy.typing as npt
34import statsmodels.api as sm # type: ignore[import-untyped]
35from astro_metadata_translator import ObservationInfo
36from astropy.coordinates import AltAz, EarthLocation, SkyCoord, get_sun
37from astropy.stats import mad_std
38from astropy.time import Time
39from deprecated.sphinx import deprecated
40from matplotlib.patches import Rectangle
41from scipy.ndimage import gaussian_filter
42from sklearn.linear_model import LinearRegression, RANSACRegressor # type: ignore[import-untyped]
44import lsst.afw.detection as afwDetect
45import lsst.afw.detection as afwDetection
46import lsst.afw.image as afwImage
47import lsst.afw.math as afwMath
48import lsst.daf.base as dafBase
49import lsst.daf.butler as dafButler
50import lsst.geom as geom
51import lsst.obs.lsst.translators
52import lsst.pipe.base as pipeBase
53import lsst.utils.packages as packageUtils
54from lsst.afw.coord import Weather
55from lsst.afw.detection import Footprint, FootprintSet
56from lsst.daf.butler.cli.cliLog import CliLog
57from lsst.obs.lsst import Latiss, LsstCam, LsstComCam, LsstComCamSim
58from lsst.obs.lsst.translators.latiss import AUXTEL_LOCATION
59from lsst.obs.lsst.translators.lsst import FILTER_DELIMITER, SIMONYI_LOCATION
61from . import dateTime as newLocation
62from .astrometry.utils import genericCameraHeaderToWcs
64if TYPE_CHECKING:
65 from lsst.afw.cameraGeom import Camera
67__all__ = [
68 "SIGMATOFWHM",
69 "FWHMTOSIGMA",
70 "EFD_CLIENT_MISSING_MSG",
71 "GOOGLE_CLOUD_MISSING_MSG",
72 "AUXTEL_LOCATION",
73 "countPixels",
74 "quickSmooth",
75 "argMax2d",
76 "dayObsIntToString", # deprecated
77 "dayObsSeqNumToVisitId",
78 "getImageStats",
79 "detectObjectsInExp",
80 "fluxesFromFootprints",
81 "fluxFromFootprint",
82 "humanNameForCelestialObject",
83 "getFocusFromHeader",
84 "checkStackSetup",
85 "setupLogging",
86 "getCurrentDayObs_datetime", # deprecated
87 "getCurrentDayObs_int", # deprecated
88 "getCurrentDayObs_humanStr", # deprecated
89 "getExpRecordAge", # deprecated
90 "getSite",
91 "getAltAzFromSkyPosition",
92 "getExpPositionOffset",
93 "starTrackerFileToExposure",
94 "obsInfoToDict",
95 "getFieldNameAndTileNumber",
96 "getAirmassSeeingCorrection",
97 "getFilterSeeingCorrection", # deprecated
98 "getBandpassSeeingCorrection",
99 "getCdf",
100 "getQuantiles",
101 "digitizeData",
102 "getBboxAround",
103 "bboxToMatplotlibRectanle",
104 "computeExposureId",
105 "computeCcdExposureId",
106 "getDetectorIds",
107 "getImageArray",
108 "getSunAngle",
109 "RobustFitter",
110]
113SIGMATOFWHM = 2.0 * np.sqrt(2.0 * np.log(2.0))
114FWHMTOSIGMA = 1 / SIGMATOFWHM
116EFD_CLIENT_MISSING_MSG = (
117 "ImportError: lsst_efd_client not found. Please install with:\n" " pip install lsst-efd-client"
118)
120GOOGLE_CLOUD_MISSING_MSG = (
121 "ImportError: Google cloud storage not found. Please install with:\n"
122 " pip install google-cloud-storage"
123)
126def countPixels(maskedImage: afwImage.MaskedImage, maskPlane: str) -> int:
127 """Count the number of pixels in an image with a given mask bit set.
129 Parameters
130 ----------
131 maskedImage : `lsst.afw.image.MaskedImage`
132 The masked image,
133 maskPlane : `str`
134 The name of the bitmask.
136 Returns
137 -------
138 count : `int``
139 The number of pixels in with the selected mask bit
140 """
141 bit = maskedImage.mask.getPlaneBitMask(maskPlane)
142 return len(np.where(np.bitwise_and(maskedImage.mask.array, bit))[0])
145def quickSmooth(data: npt.NDArray[np.float64], sigma: float = 2) -> npt.NDArray[np.float64]:
146 """Perform a quick smoothing of the image.
148 Not to be used for scientific purposes, but improves the stretch and
149 visual rendering of low SNR against the sky background in cutouts.
151 Parameters
152 ----------
153 data : `np.array`
154 The image data to smooth
155 sigma : `float`, optional
156 The size of the smoothing kernel.
158 Returns
159 -------
160 smoothData : `np.array`
161 The smoothed data
162 """
163 kernel = [sigma, sigma]
164 smoothData = gaussian_filter(data, kernel, mode="constant")
165 return smoothData
168def argMax2d(array: npt.NDArray[np.float64]) -> tuple[tuple[float, float], bool, list[tuple[float, float]]]:
169 """Get the index of the max value of an array and whether it's unique.
171 If its not unique, returns a list of the other locations containing the
172 maximum value, e.g. returns
174 (12, 34), False, [(56,78), (910, 1112)]
176 Parameters
177 ----------
178 array : `np.array`
179 The data
181 Returns
182 -------
183 maxLocation : `tuple`
184 The coords of the first instance of the max value
185 unique : `bool`
186 Whether it's the only location
187 otherLocations : `list` of `tuple`
188 List of the other max values' locations, empty if False
189 """
190 uniqueMaximum = False
191 maxCoords = np.where(array == np.max(array))
192 # list of coords as tuples
193 listMaxCoords = [(float(coord[0]), float(coord[1])) for coord in zip(*maxCoords)]
194 if len(listMaxCoords) == 1: # single unambiguous value
195 uniqueMaximum = True
197 return listMaxCoords[0], uniqueMaximum, listMaxCoords[1:]
200@deprecated(
201 reason="dayObsIntToString() has moved to lsst.summit.utils.dateTime. The function is unchanged, "
202 "but you should change to import from there. This function will be removed after w_2026_01.",
203 version="w_2026_01",
204 category=FutureWarning,
205)
206def dayObsIntToString(dayObs: int) -> str:
207 """Convert an integer dayObs to a dash-delimited string.
209 e.g. convert the hard to read 20210101 to 2021-01-01
211 Parameters
212 ----------
213 dayObs : `int`
214 The dayObs.
216 Returns
217 -------
218 dayObs : `str`
219 The dayObs as a string.
220 """
221 return newLocation.dayObsIntToString(dayObs)
224def dayObsSeqNumToVisitId(dayObs: int, seqNum: int) -> int:
225 """Get the visit id for a given dayObs/seqNum.
227 Parameters
228 ----------
229 dayObs : `int`
230 The dayObs.
231 seqNum : `int`
232 The seqNum.
234 Returns
235 -------
236 visitId : `int`
237 The visitId.
239 Notes
240 -----
241 TODO: Remove this horrible hack once DM-30948 makes this possible
242 programatically/via the butler.
243 """
244 if dayObs < 19700101 or dayObs > 35000101:
245 raise ValueError(f"dayObs value {dayObs} outside plausible range")
246 return int(f"{dayObs}{seqNum:05}")
249def getImageStats(exp: afwImage.Exposure) -> pipeBase.Struct:
250 """Calculate a grab-bag of stats for an image. Must remain fast.
252 Parameters
253 ----------
254 exp : `lsst.afw.image.Exposure`
255 The input exposure.
257 Returns
258 -------
259 stats : `lsst.pipe.base.Struct`
260 A container with attributes containing measurements and statistics
261 for the image.
262 """
263 result = pipeBase.Struct()
265 vi = exp.visitInfo
266 expTime = vi.exposureTime
267 md = exp.getMetadata()
269 obj = vi.object
270 mjd = vi.getDate().get()
271 result.object = obj
272 result.mjd = mjd
274 fullFilterString = exp.filter.physicalLabel
275 filt = fullFilterString.split(FILTER_DELIMITER)[0]
276 grating = fullFilterString.split(FILTER_DELIMITER)[1]
278 airmass = vi.getBoresightAirmass()
279 rotangle = vi.getBoresightRotAngle().asDegrees()
281 azAlt = vi.getBoresightAzAlt()
282 az = azAlt[0].asDegrees()
283 el = azAlt[1].asDegrees()
285 result.expTime = expTime
286 result.filter = filt
287 result.grating = grating
288 result.airmass = airmass
289 result.rotangle = rotangle
290 result.az = az
291 result.el = el
292 result.focus = md.get("FOCUSZ")
294 data = exp.image.array
295 result.maxValue = np.max(data)
297 peak, uniquePeak, otherPeaks = argMax2d(data)
298 result.maxPixelLocation = peak
299 result.multipleMaxPixels = uniquePeak
301 result.nBadPixels = countPixels(exp.maskedImage, "BAD")
302 result.nSatPixels = countPixels(exp.maskedImage, "SAT")
303 result.percentile99 = np.percentile(data, 99)
304 result.percentile9999 = np.percentile(data, 99.99)
306 sctrl = afwMath.StatisticsControl()
307 sctrl.setNumSigmaClip(5)
308 sctrl.setNumIter(2)
309 statTypes = afwMath.MEANCLIP | afwMath.STDEVCLIP
310 stats = afwMath.makeStatistics(exp.maskedImage, statTypes, sctrl)
311 std, stderr = stats.getResult(afwMath.STDEVCLIP)
312 mean, meanerr = stats.getResult(afwMath.MEANCLIP)
314 result.clippedMean = mean
315 result.clippedStddev = std
317 return result
320def detectObjectsInExp(
321 exp: afwImage.Exposure, nSigma: float = 10, nPixMin: int = 10, grow: int = 0
322) -> afwDetect.FootprintSet:
323 """Quick and dirty object detection for an exposure.
325 Return the footPrintSet for the objects in a preferably-postISR exposure.
327 Parameters
328 ----------
329 exp : `lsst.afw.image.Exposure`
330 The exposure to detect objects in.
331 nSigma : `float`
332 The number of sigma for detection.
333 nPixMin : `int`
334 The minimum number of pixels in an object for detection.
335 grow : `int`
336 The number of pixels to grow the footprint by after detection.
338 Returns
339 -------
340 footPrintSet : `lsst.afw.detection.FootprintSet`
341 The set of footprints in the image.
342 """
343 median = np.nanmedian(exp.image.array)
344 exp.image -= median
346 threshold = afwDetect.Threshold(nSigma, afwDetect.Threshold.STDEV)
347 footPrintSet = afwDetect.FootprintSet(exp.getMaskedImage(), threshold, "DETECTED", nPixMin)
348 if grow > 0:
349 isotropic = True
350 footPrintSet = afwDetect.FootprintSet(footPrintSet, grow, isotropic)
352 exp.image += median # add back in to leave background unchanged
353 return footPrintSet
356def fluxesFromFootprints(
357 footprints: afwDetect.FootprintSet | afwDetect.Footprint | Iterable[afwDetect.Footprint],
358 parentImage: afwImage.Image,
359 subtractImageMedian: bool = False,
360) -> npt.NDArray[np.float64]:
361 """Calculate the flux from a set of footprints, given the parent image,
362 optionally subtracting the whole-image median from each pixel as a very
363 rough background subtraction.
365 Parameters
366 ----------
367 footprints : `lsst.afw.detection.FootprintSet` or
368 `lsst.afw.detection.Footprint` or
369 `iterable` of `lsst.afw.detection.Footprint`
370 The footprints to measure.
371 parentImage : `lsst.afw.image.Image`
372 The parent image.
373 subtractImageMedian : `bool`, optional
374 Subtract a whole-image median from each pixel in the footprint when
375 summing as a very crude background subtraction. Does not change the
376 original image.
378 Returns
379 -------
380 fluxes : `list` of `float`
381 The fluxes for each footprint.
383 Raises
384 ------
385 TypeError : raise for unsupported types.
386 """
387 median = 0
388 if subtractImageMedian:
389 median = np.nanmedian(parentImage.array)
391 # poor person's single dispatch
392 badTypeMsg = (
393 "This function works with FootprintSets, single Footprints, and iterables of Footprints. "
394 f"Got {type(footprints)}: {footprints}"
395 )
396 if isinstance(footprints, FootprintSet):
397 footprints = footprints.getFootprints()
398 elif isinstance(footprints, Sequence):
399 if not isinstance(footprints[0], Footprint):
400 raise TypeError(badTypeMsg)
401 elif isinstance(footprints, Footprint):
402 footprints = [footprints]
403 else:
404 raise TypeError(badTypeMsg)
406 return np.array([fluxFromFootprint(fp, parentImage, backgroundValue=median) for fp in footprints])
409def fluxFromFootprint(
410 footprint: afwDetection.Footprint, parentImage: afwImage.Image, backgroundValue: float = 0
411) -> float:
412 """Calculate the flux from a footprint, given the parent image, optionally
413 subtracting a single value from each pixel as a very rough background
414 subtraction, e.g. the image median.
416 Parameters
417 ----------
418 footprint : `lsst.afw.detection.Footprint`
419 The footprint to measure.
420 parentImage : `lsst.afw.image.Image`
421 Image containing the footprint.
422 backgroundValue : `bool`, optional
423 The value to subtract from each pixel in the footprint when summing
424 as a very crude background subtraction. Does not change the original
425 image.
427 Returns
428 -------
429 flux : `float`
430 The flux in the footprint
431 """
432 if backgroundValue: # only do the subtraction if non-zero for speed
433 xy0 = parentImage.getBBox().getMin()
434 return footprint.computeFluxFromArray(parentImage.array - backgroundValue, xy0)
435 return footprint.computeFluxFromImage(parentImage)
438def humanNameForCelestialObject(objName: str) -> list[str]:
439 """Returns a list of all human names for obj, or [] if none are found.
441 Parameters
442 ----------
443 objName : `str`
444 The/a name of the object.
446 Returns
447 -------
448 names : `list` of `str`
449 The names found for the object
450 """
451 from astroquery.simbad import Simbad
453 results = []
454 try:
455 simbadResult = Simbad.query_objectids(objName)
456 for row in simbadResult:
457 if row["ID"].startswith("NAME"):
458 results.append(row["ID"].replace("NAME ", ""))
459 return results
460 except Exception:
461 return [] # same behavior as for found but un-named objects
464def _getAltAzZenithsFromSeqNum(
465 butler: dafButler.Butler, dayObs: int, seqNumList: list[int]
466) -> tuple[list[float], list[float], list[float]]:
467 """Get the alt, az and zenith angle for the seqNums of a given dayObs.
469 Parameters
470 ----------
471 butler : `lsst.daf.butler.Butler`
472 The butler to query.
473 dayObs : `int`
474 The dayObs.
475 seqNumList : `list` of `int`
476 The seqNums for which to return the alt, az and zenith
478 Returns
479 -------
480 azimuths : `list` of `float`
481 List of the azimuths for each seqNum
482 elevations : `list` of `float`
483 List of the elevations for each seqNum
484 zeniths : `list` of `float`
485 List of the zenith angles for each seqNum
486 """
487 azimuths, elevations, zeniths = [], [], []
488 for seqNum in seqNumList:
489 md = butler.get("raw.metadata", day_obs=dayObs, seq_num=seqNum, detector=0)
490 obsInfo = ObservationInfo(md)
491 assert obsInfo.altaz_begin is not None, f"altaz_begin is None for seqNum={seqNum}"
492 alt = obsInfo.altaz_begin.alt.value
493 az = obsInfo.altaz_begin.az.value
494 elevations.append(alt)
495 zeniths.append(90 - alt)
496 azimuths.append(az)
497 return azimuths, elevations, zeniths
500def getFocusFromHeader(exp: afwImage.Exposure) -> float | None:
501 """Get the raw focus value from the header.
503 Parameters
504 ----------
505 exp : `lsst.afw.image.exposure`
506 The exposure.
508 Returns
509 -------
510 focus : `float` or `None`
511 The focus value if found, else ``None``.
512 """
513 md = exp.getMetadata()
514 if "FOCUSZ" in md:
515 return md["FOCUSZ"]
516 return None
519def checkStackSetup() -> None:
520 """Check which weekly tag is being used and which local packages are setup.
522 Designed primarily for use in notbooks/observing, this prints the weekly
523 tag(s) are setup for lsst_distrib, and lists any locally setup packages and
524 the path to each.
526 Notes
527 -----
528 Uses print() instead of logger messages as this should simply print them
529 without being vulnerable to any log messages potentially being diverted.
530 """
531 packages = packageUtils.getEnvironmentPackages(include_all=True)
533 lsstDistribHashAndTags = packages["lsst_distrib"] # looks something like 'g4eae7cb9+1418867f (w_2022_13)'
534 lsstDistribTags = lsstDistribHashAndTags.split()[1]
535 if len(lsstDistribTags.split()) == 1:
536 tag = lsstDistribTags.replace("(", "")
537 tag = tag.replace(")", "")
538 print(f"You are running {tag} of lsst_distrib")
539 else: # multiple weekly tags found for lsst_distrib!
540 print(f"The version of lsst_distrib you have is compatible with: {lsstDistribTags}")
542 localPackages = []
543 localPaths = []
544 for package, tags in packages.items():
545 if tags.startswith("LOCAL:"):
546 path = tags.split("LOCAL:")[1]
547 path = path.split("@")[0] # don't need the git SHA etc
548 localPaths.append(path)
549 localPackages.append(package)
551 if localPackages:
552 print("\nLocally setup packages:")
553 print("-----------------------")
554 maxLen = max(len(package) for package in localPackages)
555 for package, path in zip(localPackages, localPaths):
556 print(f"{package:<{maxLen}s} at {path}")
557 else:
558 print("\nNo locally setup packages (using a vanilla stack)")
561def setupLogging(longlog: bool = False) -> None:
562 """Setup logging in the same way as one would get from pipetask run.
564 Code that isn't run through the butler CLI defaults to WARNING level
565 messages and no logger names. This sets the behaviour to follow whatever
566 the pipeline default is, currently
567 <logger_name> <level>: <message> e.g.
568 lsst.isr INFO: Masking defects.
569 """
570 CliLog.initLog(longlog=longlog)
573@deprecated(
574 reason="getCurrentDayObs_datetime() has moved to lsst.summit.utils.dateTime. The function is unchanged,"
575 " but renamed to getCurrentDayObsDatetime and you should change to import from there. This function will"
576 " be removed after w_2026_01.",
577 version="w_2026_01",
578 category=FutureWarning,
579)
580def getCurrentDayObs_datetime() -> datetime.date:
581 """Get the current day_obs - the observatory rolls the date over at UTC-12
583 Returned as datetime.date(2022, 4, 28)
584 """
585 return newLocation.getCurrentDayObsDatetime()
588@deprecated(
589 reason="getCurrentDayObs_int() has moved to lsst.summit.utils.dateTime. The function is unchanged,"
590 " but renamed to getCurrentDayObsInt and you should change to import from there. This function will"
591 " be removed after w_2026_01.",
592 version="w_2026_01",
593 category=FutureWarning,
594)
595def getCurrentDayObs_int() -> int:
596 """Return the current dayObs as an int in the form 20220428"""
597 return newLocation.getCurrentDayObsInt()
600@deprecated(
601 reason="getCurrentDayObs_humanStr() has moved to lsst.summit.utils.dateTime. The function is unchanged,"
602 " but renamed to getCurrentDayObsHumanStr and you should change to import from there. This function will"
603 " be removed after w_2026_01.",
604 version="w_2026_01",
605 category=FutureWarning,
606)
607def getCurrentDayObs_humanStr() -> str:
608 """Return the current dayObs as a string in the form '2022-04-28'"""
609 return newLocation.getCurrentDayObsHumanStr()
612@deprecated(
613 reason="getExpRecordAge() has moved to lsst.summit.utils.dateTime. The function is unchanged, "
614 "but you should change to import from there. This function will be removed after w_2026_01.",
615 version="w_2026_01",
616 category=FutureWarning,
617)
618def getExpRecordAge(expRecord: dafButler.DimensionRecord) -> float:
619 """Get the time, in seconds, since the end of exposure.
621 Parameters
622 ----------
623 expRecord : `lsst.daf.butler.DimensionRecord`
624 The exposure record.
626 Returns
627 -------
628 age : `float`
629 The age of the exposure, in seconds.
630 """
631 return -1 * (expRecord.timespan.end - Time.now()).sec
634def getSite() -> str:
635 """Returns where the code is running.
637 Returns
638 -------
639 location : `str`
640 One of:
641 ['tucson', 'summit', 'base', 'staff-rsp', 'rubin-devl', 'jenkins',
642 'usdf-k8s', 'gha', 'local']
644 ``gha`` is returned when running under GitHub Actions, detected via the
645 ``GITHUB_ACTIONS=true`` env var that GitHub sets in every workflow run.
646 This works whether summit_utils is being tested on its own or as a dep
647 of a downstream repo. Like ``local``, it means "no functioning site
648 available"; the dedicated value lets CI-only branching (skipping
649 butler/uploader-bound tests) be expressed without dragging developer
650 laptops into the same opt-out.
652 ``local`` is returned when none of the known sites can be detected,
653 which usually means the code is running on a developer's laptop or
654 in some other environment without the standard env vars set. Callers
655 that need a real site (e.g. to talk to S3, EFD or a Butler repo)
656 should treat ``local`` (and ``gha``) as "no functioning site
657 available" and either raise or skip; callers that only branch on
658 ``site == "summit"`` style checks can treat them as a no-op
659 fallthrough.
660 """
661 # All nublado instances guarantee that EXTERNAL_URL is set and uniquely
662 # identifies it.
663 location = os.getenv("EXTERNAL_INSTANCE_URL", "")
664 if location == "https://tucson-teststand.lsst.codes": 664 ↛ 665line 664 didn't jump to line 665 because the condition on line 664 was never true
665 return "tucson"
666 elif location == "https://summit-lsp.lsst.codes": 666 ↛ 667line 666 didn't jump to line 667 because the condition on line 666 was never true
667 return "summit"
668 elif location == "https://base-lsp.lsst.codes": 668 ↛ 669line 668 didn't jump to line 669 because the condition on line 668 was never true
669 return "base"
670 elif location == "https://usdf-rsp.slac.stanford.edu": 670 ↛ 671line 670 didn't jump to line 671 because the condition on line 670 was never true
671 return "staff-rsp"
672 elif location == "https://usdf-rsp-dev.slac.stanford.edu": 672 ↛ 673line 672 didn't jump to line 673 because the condition on line 672 was never true
673 return "staff-rsp" # we don't care this is the dev RSP, it's basically the same env wrt paths etc
675 # if no EXTERNAL_URL, try HOSTNAME to see if we're on the dev nodes
676 # it is expected that this will be extensible to SLAC
677 hostname = os.getenv("HOSTNAME", "")
678 interactiveNodes = ("sdfrome", "sdfiana")
679 if hostname.startswith(interactiveNodes): 679 ↛ 680line 679 didn't jump to line 680 because the condition on line 679 was never true
680 return "rubin-devl"
681 elif hostname == "htcondor.ls.lsst.org": 681 ↛ 682line 681 didn't jump to line 682 because the condition on line 681 was never true
682 return "base"
683 elif hostname == "htcondor.cp.lsst.org": 683 ↛ 684line 683 didn't jump to line 684 because the condition on line 683 was never true
684 return "summit"
686 jenkinsHome = os.getenv("JENKINS_HOME", "")
687 if jenkinsHome != "": 687 ↛ 688line 687 didn't jump to line 688 because the condition on line 687 was never true
688 return "jenkins"
690 # GitHub Actions sets GITHUB_ACTIONS=true in every workflow run.
691 # Detect it independently of any downstream-package env vars so that
692 # summit_utils' own GHA jobs (and any dep that runs us under GHA)
693 # both get the right answer.
694 if os.getenv("GITHUB_ACTIONS") == "true": 694 ↛ 695line 694 didn't jump to line 695 because the condition on line 694 was never true
695 return "gha"
697 # we're probably inside a k8s pod doing rapid analysis work at this point
698 location = os.getenv("RAPID_ANALYSIS_LOCATION", "")
699 if location == "TTS": 699 ↛ 700line 699 didn't jump to line 700 because the condition on line 699 was never true
700 return "tucson"
701 if location == "BTS": 701 ↛ 702line 701 didn't jump to line 702 because the condition on line 701 was never true
702 return "base"
703 if location == "SUMMIT": 703 ↛ 704line 703 didn't jump to line 704 because the condition on line 703 was never true
704 return "summit"
705 if location == "USDF": 705 ↛ 706line 705 didn't jump to line 706 because the condition on line 705 was never true
706 return "usdf-k8s"
708 # No known site detected — assume this is a developer machine.
709 return "local"
712def getAltAzFromSkyPosition(
713 skyPos: geom.SpherePoint,
714 visitInfo: afwImage.VisitInfo,
715 doCorrectRefraction: bool = False,
716 wavelength: float = 500.0,
717 pressureOverride: float | None = None,
718 temperatureOverride: float | None = None,
719 relativeHumidityOverride: float | None = None,
720) -> tuple[geom.Angle, geom.Angle]:
721 """Get the alt/az from the position on the sky and the time and location
722 of the observation.
724 The temperature, pressure and relative humidity are taken from the
725 visitInfo by default, but can be individually overridden as needed. It
726 should be noted that the visitInfo never contains a nominal wavelength, and
727 so this takes a default value of 500nm.
729 Parameters
730 ----------
731 skyPos : `lsst.geom.SpherePoint`
732 The position on the sky.
733 visitInfo : `lsst.afw.image.VisitInfo`
734 The visit info containing the time of the observation.
735 doCorrectRefraction : `bool`, optional
736 Correct for the atmospheric refraction?
737 wavelength : `float`, optional
738 The nominal wavelength in nanometers (e.g. 500.0), as a float.
739 pressureOverride : `float`, optional
740 The pressure, in bars (e.g. 0.770), to override the value supplied in
741 the visitInfo, as a float.
742 temperatureOverride : `float`, optional
743 The temperature, in Celsius (e.g. 10.0), to override the value supplied
744 in the visitInfo, as a float.
745 relativeHumidityOverride : `float`, optional
746 The relativeHumidity in the range 0..1 (i.e. not as a percentage), to
747 override the value supplied in the visitInfo, as a float.
749 Returns
750 -------
751 alt : `lsst.geom.Angle`
752 The altitude.
753 az : `lsst.geom.Angle`
754 The azimuth.
755 """
756 skyLocation = SkyCoord(skyPos.getRa().asRadians(), skyPos.getDec().asRadians(), unit=u.rad)
757 long = visitInfo.observatory.getLongitude()
758 lat = visitInfo.observatory.getLatitude()
759 ele = visitInfo.observatory.getElevation()
760 earthLocation = EarthLocation.from_geodetic(long.asDegrees(), lat.asDegrees(), ele)
762 refractionKwargs = {}
763 if doCorrectRefraction:
764 # wavelength is never supplied in the visitInfo so always take this
765 wavelength = wavelength * u.nm
767 if pressureOverride:
768 pressure = pressureOverride
769 else:
770 pressure = visitInfo.weather.getAirPressure()
771 # ObservationInfos (which are the "source of truth" use pascals) so
772 # convert from pascals to bars
773 pressure /= 100000.0
774 pressure = pressure * u.bar
776 if temperatureOverride:
777 temperature = temperatureOverride
778 else:
779 temperature = visitInfo.weather.getAirTemperature()
780 temperature = temperature * u.deg_C
782 if relativeHumidityOverride:
783 relativeHumidity = relativeHumidityOverride
784 else:
785 relativeHumidity = visitInfo.weather.getHumidity() / 100.0 # this is in percent
786 relativeHumidity = relativeHumidity
788 refractionKwargs = dict(
789 pressure=pressure, temperature=temperature, relative_humidity=relativeHumidity, obswl=wavelength
790 )
792 obsTime = visitInfo.date.toAstropy()
793 altAz = AltAz(obstime=obsTime, location=earthLocation, **refractionKwargs)
795 obsAltAz = skyLocation.transform_to(altAz)
796 alt = geom.Angle(obsAltAz.alt.degree, geom.degrees)
797 az = geom.Angle(obsAltAz.az.degree, geom.degrees)
799 return alt, az
802def getExpPositionOffset(
803 exp1: afwImage.Exposure,
804 exp2: afwImage.Exposure,
805 useWcs: bool = True,
806 allowDifferentPlateScales: bool = False,
807) -> pipeBase.Struct:
808 """Get the change in sky position between two exposures.
810 Given two exposures, calculate the offset on the sky between the images.
811 If useWcs then use the (fitted or unfitted) skyOrigin from their WCSs, and
812 calculate the alt/az from the observation times, otherwise use the nominal
813 values in the exposures' visitInfos. Note that if using the visitInfo
814 values that for a given pointing the ra/dec will be ~identical, regardless
815 of whether astrometric fitting has been performed.
817 Values are given as exp1-exp2.
819 Parameters
820 ----------
821 exp1 : `lsst.afw.image.Exposure`
822 The first exposure.
823 exp2 : `lsst.afw.image.Exposure`
824 The second exposure.
825 useWcs : `bool`
826 Use the WCS for the ra/dec and alt/az if True, else use the nominal/
827 boresight values from the exposures' visitInfos.
828 allowDifferentPlateScales : `bool`, optional
829 Use to disable checking that plate scales are the same. Generally,
830 differing plate scales would indicate an error, but where blind-solving
831 has been undertaken during commissioning plate scales can be different
832 enough to warrant setting this to ``True``.
834 Returns
835 -------
836 offsets : `lsst.pipe.base.Struct`
837 A struct containing the offsets:
838 ``deltaRa``
839 The diference in ra (`lsst.geom.Angle`)
840 ``deltaDec``
841 The diference in dec (`lsst.geom.Angle`)
842 ``deltaAlt``
843 The diference in alt (`lsst.geom.Angle`)
844 ``deltaAz``
845 The diference in az (`lsst.geom.Angle`)
846 ``deltaPixels``
847 The diference in pixels (`float`)
848 """
850 wcs1 = exp1.getWcs()
851 wcs2 = exp2.getWcs()
852 pixScaleArcSec = wcs1.getPixelScale().asArcseconds()
853 if not allowDifferentPlateScales:
854 assert np.isclose(
855 pixScaleArcSec, wcs2.getPixelScale().asArcseconds()
856 ), "Pixel scales in the exposures differ."
858 if useWcs:
859 p1 = wcs1.getSkyOrigin()
860 p2 = wcs2.getSkyOrigin()
861 alt1, az1 = getAltAzFromSkyPosition(p1, exp1.getInfo().getVisitInfo())
862 alt2, az2 = getAltAzFromSkyPosition(p2, exp2.getInfo().getVisitInfo())
863 ra1 = p1[0]
864 ra2 = p2[0]
865 dec1 = p1[1]
866 dec2 = p2[1]
867 else:
868 az1 = exp1.visitInfo.boresightAzAlt[0]
869 az2 = exp2.visitInfo.boresightAzAlt[0]
870 alt1 = exp1.visitInfo.boresightAzAlt[1]
871 alt2 = exp2.visitInfo.boresightAzAlt[1]
873 ra1 = exp1.visitInfo.boresightRaDec[0]
874 ra2 = exp2.visitInfo.boresightRaDec[0]
875 dec1 = exp1.visitInfo.boresightRaDec[1]
876 dec2 = exp2.visitInfo.boresightRaDec[1]
878 p1 = exp1.visitInfo.boresightRaDec
879 p2 = exp2.visitInfo.boresightRaDec
881 angular_offset = p1.separation(p2).asArcseconds()
882 deltaPixels = angular_offset / pixScaleArcSec
884 ret = pipeBase.Struct(
885 deltaRa=(ra1 - ra2).wrapNear(geom.Angle(0.0)),
886 deltaDec=dec1 - dec2,
887 deltaAlt=alt1 - alt2,
888 deltaAz=(az1 - az2).wrapNear(geom.Angle(0.0)),
889 deltaPixels=deltaPixels,
890 )
892 return ret
895def starTrackerFileToExposure(filename: str, logger: logging.Logger | None = None) -> afwImage.Exposure:
896 """Read the exposure from the file and set the wcs from the header.
898 Parameters
899 ----------
900 filename : `str`
901 The full path to the file.
902 logger : `logging.Logger`, optional
903 The logger to use for errors, created if not supplied.
905 Returns
906 -------
907 exp : `lsst.afw.image.Exposure`
908 The exposure.
909 """
910 if not logger:
911 logger = logging.getLogger(__name__)
912 exp = afwImage.ExposureF(filename)
913 try:
914 wcs = genericCameraHeaderToWcs(exp)
915 exp.setWcs(wcs)
916 except Exception as e:
917 logger.warning(f"Failed to set wcs from header: {e}")
919 # for some reason the date isn't being set correctly
920 # DATE-OBS is present in the original header, but it's being
921 # stripped out and somehow not set (plus it doesn't give the midpoint
922 # of the exposure), so set it manually from the midpoint here
923 try:
924 newArgs = {} # dict to unpack into visitInfo.copyWith - fill it with whatever needs to be replaced
925 md = exp.getMetadata()
927 begin = datetime.datetime.fromisoformat(md["DATE-BEG"])
928 end = datetime.datetime.fromisoformat(md["DATE-END"])
929 duration = end - begin
930 mid = begin + duration / 2
931 newTime = dafBase.DateTime(mid.isoformat(), dafBase.DateTime.Timescale.TAI)
932 newArgs["date"] = newTime
934 # AIRPRESS is being set as PRESSURE so afw doesn't pick it up
935 # once we're using the butler for data we will just set it to take
936 # PRESSURE in the translator instead of this
937 weather = exp.visitInfo.getWeather()
938 oldPressure = weather.getAirPressure()
939 if not np.isfinite(oldPressure):
940 pressure = md.get("PRESSURE")
941 if pressure is not None:
942 logger.info("Patching the weather info using the PRESSURE header keyword")
943 newWeather = Weather(weather.getAirTemperature(), pressure, weather.getHumidity())
944 newArgs["weather"] = newWeather
946 if newArgs:
947 newVi = exp.visitInfo.copyWith(**newArgs)
948 exp.info.setVisitInfo(newVi)
949 except Exception as e:
950 logger.warning(f"Failed to set date from header: {e}")
952 return exp
955def obsInfoToDict(obsInfo: ObservationInfo) -> dict:
956 """Convert an ObservationInfo to a dict.
958 Parameters
959 ----------
960 obsInfo : `astro_metadata_translator.ObservationInfo`
961 The ObservationInfo to convert.
963 Returns
964 -------
965 obsInfoDict : `dict`
966 The ObservationInfo as a dict.
967 """
968 return {prop: getattr(obsInfo, prop) for prop in obsInfo.all_properties.keys()}
971def getFieldNameAndTileNumber(
972 field: str, warn: bool = True, logger: logging.Logger | None = None
973) -> tuple[str, int | None]:
974 """Get the tile name and number of an observed field.
976 It is assumed to always be appended, with an underscore, to the rest of the
977 field name. Returns the name and number as a tuple, or the name unchanged
978 if no tile number is found.
980 Parameters
981 ----------
982 field : `str`
983 The name of the field
985 Returns
986 -------
987 fieldName : `str`
988 The name of the field without the trailing tile number, if present.
989 tileNum : `int`
990 The number of the tile, as an integer, or ``None`` if not found.
991 """
992 if warn and not logger:
993 logger = logging.getLogger("lsst.summit.utils.utils.getFieldNameAndTileNumber")
995 if "_" not in field:
996 if warn and logger is not None:
997 logger.warning(
998 f"Field {field} does not contain an underscore," " so cannot determine the tile number."
999 )
1000 return field, None
1002 try:
1003 fieldParts = field.split("_")
1004 fieldNum = int(fieldParts[-1])
1005 except ValueError:
1006 if warn and logger is not None:
1007 logger.warning(
1008 f"Field {field} does not contain only an integer after the final underscore"
1009 " so cannot determine the tile number."
1010 )
1011 return field, None
1013 return "_".join(fieldParts[:-1]), fieldNum
1016def getAirmassSeeingCorrection(airmass: float) -> float:
1017 """Get the correction factor for seeing due to airmass.
1019 Parameters
1020 ----------
1021 airmass : `float`
1022 The airmass, greater than or equal to 1.
1024 Returns
1025 -------
1026 correctionFactor : `float`
1027 The correction factor to apply to the seeing.
1029 Raises
1030 ------
1031 ValueError raised for unphysical airmasses.
1032 """
1033 if airmass < 1:
1034 raise ValueError(f"Invalid airmass: {airmass}")
1035 return airmass ** (-0.6)
1038@deprecated(
1039 reason=". Will be removed after v28.0.",
1040 version="v27.0",
1041 category=FutureWarning,
1042)
1043def getFilterSeeingCorrection(filterName: str) -> float:
1044 """Get the correction factor for seeing due to a filter.
1046 Parameters
1047 ----------
1048 filterName : `str`
1049 The name of the filter, e.g. 'SDSSg_65mm'.
1051 Returns
1052 -------
1053 correctionFactor : `float`
1054 The correction factor to apply to the seeing.
1056 Raises
1057 ------
1058 ValueError raised for unknown filters.
1059 """
1060 return getBandpassSeeingCorrection(filterName)
1063def getBandpassSeeingCorrection(filterName: str) -> float:
1064 """Get the correction factor for seeing due to a filter.
1066 Parameters
1067 ----------
1068 filterName : `str`
1069 The name of the filter, e.g. 'SDSSg_65mm'.
1071 Returns
1072 -------
1073 correctionFactor : `float`
1074 The correction factor to apply to the seeing.
1076 Raises
1077 ------
1078 ValueError raised for unknown filters.
1079 """
1080 match filterName:
1081 case "SDSSg_65mm": # LATISS
1082 return (474.41 / 500.0) ** 0.2
1083 case "SDSSr_65mm": # LATISS
1084 return (628.47 / 500.0) ** 0.2
1085 case "SDSSi_65mm": # LATISS
1086 return (769.51 / 500.0) ** 0.2
1087 case "SDSSz_65mm": # LATISS
1088 return (871.45 / 500.0) ** 0.2
1089 case "SDSSy_65mm": # LATISS
1090 return (986.8 / 500.0) ** 0.2
1091 case "u_02": # ComCam
1092 return (370.697 / 500.0) ** 0.2
1093 case "g_01": # ComCam
1094 return (476.359 / 500.0) ** 0.2
1095 case "r_03": # ComCam
1096 return (619.383 / 500.0) ** 0.2
1097 case "i_06": # ComCam
1098 return (754.502 / 500.0) ** 0.2
1099 case "z_03": # ComCam
1100 return (866.976 / 500.0) ** 0.2
1101 case "y_04": # ComCam
1102 return (972.713 / 500.0) ** 0.2
1103 case "u_24": # LSSTCam
1104 return (365.1397 / 500.0) ** 0.2
1105 case "g_6": # LSSTCam
1106 return (475.6104 / 500.0) ** 0.2
1107 case "r_57": # LSSTCam
1108 return (618.6055 / 500.0) ** 0.2
1109 case "i_39": # LSSTCam
1110 return (753.5490 / 500.0) ** 0.2
1111 case "z_20": # LSSTCam
1112 return (868.0935 / 500.0) ** 0.2
1113 case "y_10": # LSSTCam
1114 return (972.5979 / 500.0) ** 0.2
1115 case _:
1116 raise ValueError(f"Unknown filter name: {filterName}")
1119def getCdf(data: np.ndarray, scale: int, nBinsMax: int = 300_000) -> tuple[np.ndarray | float, float, float]:
1120 """Return an approximate cumulative distribution function scaled to
1121 the [0, scale] range.
1123 If the input data is all nan, then the output cdf will be nan as well as
1124 the min and max values.
1126 Parameters
1127 ----------
1128 data : `np.array`
1129 The input data.
1130 scale : `int`
1131 The scaling range of the output.
1132 nBinsMax : `int`, optional
1133 Maximum number of bins to use.
1135 Returns
1136 -------
1137 cdf : `np.array` of `int`
1138 A monotonically increasing sequence that represents a scaled
1139 cumulative distribution function, starting with the value at
1140 minVal, then at (minVal + 1), and so on.
1141 minVal : `float`
1142 An integer smaller than the minimum value in the input data.
1143 maxVal : `float`
1144 An integer larger than the maximum value in the input data.
1145 """
1146 flatData = data.ravel()
1147 size = flatData.size - np.count_nonzero(np.isnan(flatData))
1149 minVal = np.floor(np.nanmin(flatData))
1150 maxVal = np.ceil(np.nanmax(flatData)) + 1.0
1152 if np.isnan(minVal) or np.isnan(maxVal):
1153 # if either the min or max are nan, then the data is all nan as we're
1154 # using nanmin and nanmax. Given this, we can't calculate a cdf, so
1155 # return nans for all values
1156 return np.nan, np.nan, np.nan
1158 nBins = np.clip(int(maxVal) - int(minVal), 1, nBinsMax)
1160 hist, binEdges = np.histogram(flatData, bins=nBins, range=(int(minVal), int(maxVal)))
1162 cdf = (scale * np.cumsum(hist) / size).astype(np.int64)
1163 return cdf, minVal, maxVal
1166def getQuantiles(data: npt.NDArray[np.float64], nColors: int) -> npt.NDArray[np.floating[Any]]:
1167 """Get a set of boundaries that equally distribute data into
1168 nColors intervals. The output can be used to make a colormap of nColors
1169 colors.
1171 This is equivalent to using the numpy function:
1172 np.nanquantile(data, np.linspace(0, 1, nColors + 1))
1173 but with a coarser precision, yet sufficient for our use case. This
1174 implementation gives a significant speed-up. In the case of large
1175 ranges, np.nanquantile is used because it is more memory efficient.
1177 If all elements of ``data`` are nan then the output ``boundaries`` will
1178 also all be ``nan`` to keep the interface consistent.
1180 Parameters
1181 ----------
1182 data : `np.array`
1183 The input image data.
1184 nColors : `int`
1185 The number of intervals to distribute data into.
1187 Returns
1188 -------
1189 boundaries: `list` of `float`
1190 A monotonically increasing sequence of size (nColors + 1). These are
1191 the edges of nColors intervals.
1192 """
1193 dataRange = np.nanmax(data) - np.nanmin(data)
1194 if dataRange > 300_000:
1195 # Use slower but memory efficient nanquantile
1196 logger = logging.getLogger(__name__)
1197 logger.warning(f"Data range is very large ({dataRange}); using slower quantile code.")
1198 boundaries = np.nanquantile(data, np.linspace(0, 1, nColors + 1))
1199 else:
1200 cdf, minVal, maxVal = getCdf(data, nColors)
1201 if np.isnan(minVal): # cdf calculation has failed because all data is nan
1202 return np.asarray([np.nan for _ in range(nColors)])
1203 assert isinstance(cdf, np.ndarray), "cdf is not an np.ndarray"
1204 scale = (maxVal - minVal) / len(cdf)
1206 boundaries = np.asarray([np.argmax(cdf >= i) * scale + minVal for i in range(nColors)] + [maxVal])
1208 return boundaries
1211def digitizeData(data: npt.NDArray[np.float64], nColors: int = 256) -> npt.NDArray[np.integer]:
1212 """
1213 Scale data into nColors using its cumulative distribution function.
1215 Parameters
1216 ----------
1217 data : `np.array`
1218 The input image data.
1219 nColors : `int`
1220 The number of intervals to distribute data into.
1222 Returns
1223 -------
1224 data: `np.array` of `int`
1225 Scaled data in the [0, nColors - 1] range.
1226 """
1227 cdf, minVal, maxVal = getCdf(data, nColors - 1)
1228 assert isinstance(cdf, np.ndarray), "cdf is not an np.ndarray"
1229 scale = (maxVal - minVal) / len(cdf)
1230 bins = np.floor((data * scale - minVal)).astype(np.int64)
1231 return cdf[bins]
1234def getBboxAround(centroid: geom.Point, boxSize: int, exp: afwImage.Exposure) -> geom.Box2I:
1235 """Get a bbox centered on a point, clipped to the exposure.
1237 If the bbox would extend beyond the bounds of the exposure it is clipped to
1238 the exposure, resulting in a non-square bbox.
1240 Parameters
1241 ----------
1242 centroid : `lsst.geom.Point`
1243 The source centroid.
1244 boxsize : `int`
1245 The size of the box to centre around the centroid.
1246 exp : `lsst.afw.image.Exposure`
1247 The exposure, so the bbox can be clipped to not overrun the bounds.
1249 Returns
1250 -------
1251 bbox : `lsst.geom.Box2I`
1252 The bounding box, centered on the centroid unless clipping to the
1253 exposure causes it to be non-square.
1254 """
1255 bbox = geom.BoxI().makeCenteredBox(centroid, geom.Extent2I(boxSize, boxSize))
1256 bbox = bbox.clippedTo(exp.getBBox())
1257 return bbox
1260def bboxToMatplotlibRectanle(bbox: geom.Box2I | geom.Box2D) -> matplotlib.patches.Rectangle:
1261 """Convert a bbox to a matplotlib Rectangle for plotting.
1263 Parameters
1264 ----------
1265 results : `lsst.geom.Box2I` or `lsst.geom.Box2D`
1266 The bbox to convert.
1268 Returns
1269 -------
1270 rectangle : `matplotlib.patches.Rectangle`
1271 The rectangle.
1272 """
1273 ll = bbox.minX, bbox.minY
1274 width, height = bbox.getDimensions()
1275 return Rectangle(ll, width, height)
1278def computeExposureId(instrument: str, controller: str, dayObs: int, seqNum: int) -> int:
1279 instrument = instrument.lower()
1280 if instrument == "latiss":
1281 return lsst.obs.lsst.translators.LatissTranslator.compute_exposure_id(dayObs, seqNum, controller)
1282 elif instrument == "lsstcomcam":
1283 return lsst.obs.lsst.translators.LsstComCamTranslator.compute_exposure_id(dayObs, seqNum, controller)
1284 elif instrument == "lsstcomcamsim":
1285 return lsst.obs.lsst.translators.LsstComCamSimTranslator.compute_exposure_id(
1286 dayObs, seqNum, controller
1287 )
1288 elif instrument == "lsstcam":
1289 return lsst.obs.lsst.translators.LsstCamTranslator.compute_exposure_id(dayObs, seqNum, controller)
1290 else:
1291 raise ValueError("Unknown instrument {instrument}")
1294def computeCcdExposureId(instrument: str, exposureId: int, detector: int) -> int:
1295 instrument = instrument.lower()
1296 if instrument == "latiss":
1297 if detector != 0:
1298 raise ValueError("Invalid detector {detector} for LATISS")
1299 return lsst.obs.lsst.translators.LatissTranslator.compute_detector_exposure_id(exposureId, detector)
1300 elif instrument == "lsstcomcam":
1301 if detector < 0 or detector >= 9:
1302 raise ValueError("Invalid detector {detector} for LSSTComCam")
1303 return lsst.obs.lsst.translators.LsstComCamTranslator.compute_detector_exposure_id(
1304 exposureId, detector
1305 )
1306 elif instrument == "lsstcomcamsim":
1307 if detector < 0 or detector >= 9:
1308 raise ValueError("Invalid detector {detector} for LSSTComCamSim")
1309 return lsst.obs.lsst.translators.LsstComCamSimTranslator.compute_detector_exposure_id(
1310 exposureId, detector
1311 )
1312 elif instrument == "lsstcam":
1313 if detector < 0 or detector >= 205:
1314 raise ValueError("Invalid detector {detector} for LSSTCam")
1315 return lsst.obs.lsst.translators.LsstCamTranslator.compute_detector_exposure_id(exposureId, detector)
1316 else:
1317 raise ValueError("Unknown instrument {instrument}")
1320def getDetectorIds(instrumentName: str) -> list[int]:
1321 """Get a list of detector IDs for a given instrument.
1323 Parameters
1324 ----------
1325 instrumentName : `str`
1326 The name of the instrument.
1328 Returns
1329 -------
1330 detectorIds : `list` of `int`
1331 The list of detector IDs.
1332 """
1333 camera = getCameraFromInstrumentName(instrumentName)
1334 return [detector.getId() for detector in camera]
1337def getCameraFromInstrumentName(instrumentName: str) -> Camera:
1338 """Get the camera object given the instrument name (case insenstive).
1340 Parameters
1341 ----------
1342 instrumentName : `str`
1343 The name of the instrument, e.g. "LATISS" or "LSSTCam". Case
1344 insenstive.
1346 Returns
1347 -------
1348 camera: `lsst.afw.cameraGeom.Camera`
1349 The camera object corresponding to the instrument name.
1351 Raises
1352 ------
1353 ValueError
1354 If the instrument name is not supported.
1355 """
1357 _instrument = instrumentName.lower()
1359 match _instrument:
1360 case "lsstcam":
1361 camera = LsstCam.getCamera()
1362 case "lsstcomcam":
1363 camera = LsstComCam.getCamera()
1364 case "lsstcomcamsim":
1365 camera = LsstComCamSim.getCamera()
1366 case "latiss":
1367 camera = Latiss.getCamera()
1368 case _:
1369 raise ValueError(f"Unsupported instrument: {instrumentName}")
1370 return camera
1373def calcEclipticCoords(ra: float, dec: float) -> tuple[float, float]:
1374 """Get the ecliptic coordinates of the specified ra and dec.
1376 Transform J2000 (ra, dec), both in degrees, to
1377 IAU1976 Ecliptic coordinates (also returning degrees).
1379 Matches the results of:
1381 from astropy.coordinates import SkyCoord, HeliocentricEclipticIAU76
1382 from dataclasses import dataclass
1383 import astropy.units as u
1385 p = SkyCoord(ra=ra0*u.deg, dec=dec0*u.deg, distance=1*u.au, frame='hcrs')
1386 ecl = p.transform_to(HeliocentricEclipticIAU76)
1387 print(ecl.lon.value, ecl.lat.value)
1389 except that it's fast.
1391 Parameters
1392 ----------
1393 ra : `float`
1394 The right ascension, in degrees.
1395 dec : `float`
1396 The declination, in degrees.
1398 Returns
1399 -------
1400 lambda : `float`
1401 The ecliptic longitude in degrees.
1402 beta : `float`
1403 The ecliptic latitude in degrees.
1404 """
1406 ra, dec = np.deg2rad(ra), np.deg2rad(dec)
1408 # IAU 1976 obliquity
1409 epsilon = np.deg2rad(23.43929111111111)
1410 cos_eps, sin_eps = np.cos(epsilon), np.sin(epsilon)
1412 sra, cra = np.sin(ra), np.cos(ra)
1413 sdec, cdec = np.sin(dec), np.cos(dec)
1415 lambda_ = np.arctan2((sra * cos_eps + sdec / cdec * sin_eps), cra)
1416 beta = np.arcsin(sdec * cos_eps - cdec * sin_eps * sra)
1418 # normalize
1419 if lambda_ < 0:
1420 lambda_ += np.pi * 2
1422 return (np.rad2deg(lambda_), np.rad2deg(beta))
1425def getImageArray(
1426 inputData: np.ndarray | afwImage.Exposure | afwImage.Image | afwImage.MaskedImage,
1427) -> np.ndarray:
1428 """Get the image data from anything image-like.
1430 Parameters
1431 ----------
1432 inputData : `numpy.ndarray`, `lsst.afw.image.Exposure`,
1433 `lsst.afw.image.Image`, or `lsst.afw.image.MaskedImage`
1434 The input data.
1435 Returns
1436 -------
1437 imageData : `numpy.ndarray`
1438 The image data.
1439 Raises
1440 ------
1441 TypeError
1442 If the input data is not a numpy array, lsst.afw.image.Exposure,
1443 lsst.afw.image.Image, or lsst.afw.image.MaskedImage.
1444 """
1445 match inputData:
1446 case np.ndarray():
1447 imageData = inputData
1448 case afwImage.MaskedImage():
1449 imageData = inputData.image.array
1450 case afwImage.Image():
1451 imageData = inputData.array
1452 case afwImage.Exposure():
1453 imageData = inputData.image.array
1454 case _:
1455 raise TypeError(
1456 "This function accepts numpy array, lsst.afw.image.Exposure components."
1457 f" Got {type(inputData)}"
1458 )
1459 return imageData
1462def getSunAngle(time: Time | None = None) -> float:
1463 """Get the angle of the sun to the horizon at the specified time.
1465 If no time is specified, the current time is used. Positive numbers means
1466 the sun is above the horizon, negative means it is below.
1468 Returns
1469 -------
1470 sun_alt : `float`
1471 The angle of the sun to the horizon, in degrees.
1472 """
1473 if time is None:
1474 time = Time.now()
1475 sun_altaz = get_sun(time).transform_to(AltAz(obstime=time, location=SIMONYI_LOCATION))
1476 return sun_altaz.alt.deg
1479@dataclass(slots=True)
1480class RobustFitResult:
1481 slope: float
1482 intercept: float
1483 scatter: float
1484 slopePValue: float
1485 slopeStdErr: float
1486 slopeTValue: float
1487 interceptPValue: float
1488 interceptStdErr: float
1489 interceptTValue: float
1490 outlierMask: np.ndarray
1493class RobustFitter:
1494 """
1495 Robust linear fit using RANSAC + OLS, reusable across datasets.
1497 This class fits a robust linear trend to x and y using scikit-learn's
1498 RANSAC for inlier detection, followed by an OLS fit on inliers to compute
1499 slope, intercept, scatter, standard errors, t-values, and p-values.
1500 Configuration (e.g., minSamples) is set at init. Call `fit()` per dataset.
1501 Results are returned as a `RobustFitResult` and stored internally for later
1502 access (e.g., plotting).
1504 Parameters
1505 ----------
1506 minSamples : `float`, optional
1507 Minimum fraction of samples chosen randomly from the original data.
1508 """
1510 def __init__(self, *, minSamples: float = 0.2) -> None:
1511 self.minSamples = minSamples
1512 self._clearState()
1514 def _clearState(self) -> None:
1515 """Reset stored fit state."""
1516 self.model: Any = None
1517 self.olsModel: Any = None
1518 self.outlierMask: np.ndarray | None = None
1519 self.x: np.ndarray | None = None
1520 self.y: np.ndarray | None = None
1521 self.slope: float = np.nan
1522 self.intercept: float = np.nan
1523 self.ciSlope: tuple[float, float] = (np.nan, np.nan)
1524 self.ciIntercept: tuple[float, float] = (np.nan, np.nan)
1525 self.slopePValue = np.nan
1526 self.slopeStdErr = np.nan
1527 self.slopeTValue = np.nan
1528 self.scatter = np.nan
1530 @staticmethod
1531 def _defaultResidualThreshold(y: np.ndarray) -> float:
1532 """Compute default residual threshold from finite y values."""
1533 yFinite = np.asarray(y)[np.isfinite(y)]
1534 if yFinite.size == 0:
1535 raise ValueError("Cannot compute residual threshold: no finite y.")
1536 return 1.5 * float(mad_std(yFinite))
1538 def fit(
1539 self,
1540 x: np.ndarray,
1541 y: np.ndarray,
1542 *,
1543 residualThreshold: float | None = None,
1544 randomState: int = 42,
1545 ) -> RobustFitResult:
1546 """
1547 Fit robust line to (x, y) using RANSAC + OLS on inliers.
1549 Recomputes residual threshold each call if not provided.
1551 Parameters
1552 ----------
1553 x : array-like
1554 Independent variable values.
1555 y : array-like
1556 Dependent variable values.
1557 residualThreshold : float, optional
1558 Residual threshold for inlier detection. If None, computed
1559 as 1.5 × MAD of y.
1560 randomState : int, optional
1561 Random seed for RANSAC.
1563 Returns
1564 -------
1565 result : `RobustFitResult`
1566 Best-fit parameters and statistics from the OLS inlier fit.
1568 Raises
1569 ------
1570 ValueError
1571 If no finite x/y values are available for fitting.
1572 """
1573 self._clearState()
1575 x = np.asarray(x)
1576 y = np.asarray(y)
1578 finiteMask = np.isfinite(x) & np.isfinite(y)
1579 if not finiteMask.any():
1580 raise ValueError("No finite x/y values to fit.")
1582 X = x[finiteMask].reshape(-1, 1)
1583 yFit = y[finiteMask]
1585 if residualThreshold is None:
1586 residualThreshold = self._defaultResidualThreshold(yFit)
1588 ransac = RANSACRegressor(
1589 estimator=LinearRegression(),
1590 min_samples=self.minSamples,
1591 residual_threshold=residualThreshold,
1592 random_state=randomState,
1593 )
1594 ransac.fit(X, yFit)
1596 slope = float(ransac.estimator_.coef_[0])
1597 intercept = float(ransac.estimator_.intercept_)
1599 self.x = x
1600 self.y = y
1602 outlierMask = np.ones_like(x, dtype=bool)
1603 outlierMask[finiteMask] = ~ransac.inlier_mask_
1604 self.outlierMask = outlierMask
1606 xIn = x[~outlierMask]
1607 yIn = y[~outlierMask]
1608 XDesign = sm.add_constant(xIn)
1609 ols = sm.OLS(yIn, XDesign).fit()
1610 ci = ols.conf_int(alpha=0.32)
1611 ciIntercept = (float(ci[0][0]), float(ci[0][1]))
1612 ciSlope = (float(ci[1][0]), float(ci[1][1]))
1613 scatter = float(np.nanstd(ols.resid, ddof=1)) if ols.resid.size > 1 else np.nan
1615 self.model = ransac
1616 self.olsModel = ols
1617 self.slope = slope
1618 self.intercept = intercept
1619 self.ciSlope = ciSlope
1620 self.ciIntercept = ciIntercept
1621 self.slopePValue = float(ols.pvalues[1]) if np.isfinite(ols.pvalues[1]) else np.nan
1622 self.slopeStdErr = float(ols.bse[1]) if np.isfinite(ols.bse[1]) else np.nan
1623 self.slopeTValue = float(ols.tvalues[1]) if np.isfinite(ols.tvalues[1]) else np.nan
1624 self.scatter = scatter
1626 return RobustFitResult(
1627 slope=slope,
1628 intercept=intercept,
1629 scatter=scatter,
1630 slopePValue=self.slopePValue,
1631 slopeStdErr=self.slopeStdErr,
1632 slopeTValue=self.slopeTValue,
1633 interceptPValue=float(ols.pvalues[0]) if np.isfinite(ols.pvalues[0]) else np.nan,
1634 interceptStdErr=float(ols.bse[0]) if np.isfinite(ols.bse[0]) else np.nan,
1635 interceptTValue=float(ols.tvalues[0]) if np.isfinite(ols.tvalues[0]) else np.nan,
1636 outlierMask=self.outlierMask,
1637 )
1639 def reset(self) -> None:
1640 """Clear any stored fit state."""
1641 self._clearState()
1643 def plotBestFit(self, ax: matplotlib.axes.Axes, label=None, color=None, alphaBand=0.2, lw=2, nBins=5):
1644 """Plot the best fit line, confidence interval,
1645 and optionally scatter/binned data.
1647 Parameters
1648 ----------
1649 ax : matplotlib.axes.Axes, optional
1650 The axes to plot on. If None, uses current axes.
1651 label : str, optional
1652 Label for the best fit line.
1653 color : str or None, optional
1654 Color for the fit line and confidence band.
1655 alphaBand : float, optional
1656 Alpha transparency for the confidence interval band.
1657 lw : int, optional
1658 Line width for the best fit line.
1659 nBins : int, optional
1660 Number of bins for binned statistics.
1662 Returns
1663 -------
1664 ax : matplotlib.axes.Axes
1665 The axes with the plot.
1666 """
1667 # Handle case where self.x contains only NaNs
1668 if self.x is None or self.y is None or self.outlierMask is None or self.olsModel is None:
1669 raise RuntimeError("Fit must be called before plotting.")
1671 if np.isnan(self.x).all():
1672 raise ValueError("All x values are NaN; cannot plot best fit.")
1674 xx = np.linspace(np.nanmin(self.x), np.nanmax(self.x), 200)
1675 X_design = sm.add_constant(xx)
1677 pred = self.olsModel.get_prediction(X_design)
1678 summary_frame = pred.summary_frame(alpha=0.05) # 95% intervals
1680 mean = summary_frame["mean"]
1681 ci_lo = summary_frame["mean_ci_lower"]
1682 ci_hi = summary_frame["mean_ci_upper"]
1684 # Plot best fit
1685 ax.plot(xx, mean, color=color, label=label, lw=lw)
1686 # Plot confidence interval band
1687 ax.fill_between(xx, ci_lo, ci_hi, color=color, alpha=alphaBand)
1689 mask = self.outlierMask
1690 xin, yin = self.x[~mask], self.y[~mask] # inliers
1691 xout, _ = self.x[mask], self.y[mask] # outliers
1693 # Bin data in nbins
1694 if np.all(np.isnan(xout)):
1695 # Skip binning if all values are NaN
1696 pass
1697 else:
1698 bin_centers, means, stds, bin_counts = self.getBinnedData(xin, yin, nBins)
1700 # Plot binned means and stds
1701 ax.errorbar(
1702 bin_centers,
1703 means,
1704 color=color or "black",
1705 yerr=stds,
1706 fmt="o",
1707 capsize=4,
1708 markersize=6,
1709 alpha=0.7,
1710 zorder=10,
1711 )
1713 return ax
1715 @staticmethod
1716 def getBinnedData(
1717 x: np.ndarray, y: np.ndarray, nbins: int
1718 ) -> tuple[np.ndarray, list[float], list[float], list[int]]:
1719 """Get binned statistics for x and y."""
1720 bins = np.linspace(np.nanmin(x), np.nanmax(x), nbins + 1)
1721 bin_centers = 0.5 * (bins[:-1] + bins[1:])
1722 digitized = np.digitize(x, bins) - 1
1723 means = []
1724 stds = []
1725 bin_counts = []
1726 for i in range(nbins):
1727 bin_y = y[digitized == i]
1728 if len(bin_y) > 0:
1729 means.append(np.nanmedian(bin_y))
1730 if len(bin_y) > 1:
1731 stds.append(np.nanstd(bin_y, ddof=1) / np.sqrt(len(bin_y)))
1732 else:
1733 stds.append(np.nan)
1734 bin_counts.append(len(bin_y))
1735 else:
1736 means.append(np.nan)
1737 stds.append(np.nan)
1738 bin_counts.append(0)
1739 return bin_centers, means, stds, bin_counts