Coverage for python / lsst / summit / utils / assessPeekExposure.py: 0%
160 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-26 09:44 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-26 09:44 +0000
1# This file is part of summit_utils.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
23import argparse
24import atexit
25import logging
26import multiprocessing
27import os
28import shutil
29import time
30import warnings
31from collections import namedtuple
32from pathlib import Path
33from typing import Any
35import astropy
36import matplotlib.pyplot as plt
37import numpy as np
38from astropy.table import Table
39from astropy.utils.exceptions import AstropyUserWarning
40from tabulate import tabulate
41from tqdm import tqdm
43import lsst.afw.display as afwDisplay
44from lsst.afw.geom import ellipses
45from lsst.daf.butler.datastore.cache_manager import DatastoreCacheManager
46from lsst.pipe.tasks.peekExposure import PeekExposureTask
48from .bestEffort import BestEffortIsr
50# Set logger level to higher than CRITICAL to suppress all output
51silentLogger = logging.getLogger("silentLogger")
52silentLogger.setLevel(logging.CRITICAL + 1)
55# global vars for passing items from multiprocessing initializer to doWork.
56AssessPeekExposureGlobals = namedtuple(
57 "AssessPeekExposureGlobals", ["bestEffort", "pet", "display", "fig", "ax"]
58)
61SIGMA_TO_FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0))
62AUXTEL_PIXEL_SCALE = 0.1 # arcsec/pixel
64apeGlobals: None | AssessPeekExposureGlobals = None
67# get one BestEffortIsr,afwDisplay per subprocess
68def initializePoolProcess() -> None:
69 """Initialize the multiprocessing pool process.
71 Sets up some variables we only want to create once per process, including
72 the BestEffortIsr, and afwDisplay + associated matplotlib figure objects.
74 Places these into a global variable, as that's the only way I know of to
75 effectively pass these to the doWork function.
76 """
77 global apeGlobals
78 fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(9, 9), facecolor="k")
79 display = afwDisplay.Display(fig, backend="matplotlib")
80 display.scale("asinh", -1, 31)
81 petConfig = PeekExposureTask.ConfigClass()
82 pet = PeekExposureTask(config=petConfig, log=silentLogger, display=display)
83 bestEffort = BestEffortIsr(embargo=True)
84 apeGlobals = AssessPeekExposureGlobals(bestEffort, pet, display, fig, ax)
87# retrieve best-effort-isr and run PET on it
88def doWork(idx: int, row: astropy.table.Row, doPlot: bool) -> tuple[str, str, float, int]:
89 """Run PeekExposureTask on a single exposure.
91 Parameters
92 ----------
93 idx : `int`
94 Index of the exposure in the table.
95 row : `astropy.table.Row`
96 Row of the table containing the exposure metadata.
97 doPlot : `bool`
98 Whether to plot the results.
100 Returns
101 -------
102 inTag : `str`
103 Input tag of the exposure.
104 outTag : `str`
105 Output tag of the exposure.
106 runtime : `float`
107 Runtime of the exposure in seconds.
108 exposureId : `int`
109 Exposure ID of the exposure.
110 """
111 global apeGlobals # noqa F824
112 assert apeGlobals is not None, "initializePoolProcess must be called before doWork"
113 bestEffort, pet, display, fig, ax = apeGlobals
114 exposureId = row["exposureId"]
115 dataId = {"instrument": "LATISS", "exposure": exposureId, "detector": 0}
116 exp = bestEffort.getExposure(dataId)
117 mode = "auto"
118 binSize = None
119 donutDiameter = None
121 # Until DM-41335 is fixed, we manually set mode for following exposures.
122 # Incorrectly labeled as photo
123 if exposureId >= 2022101200584 and exposureId <= 2022101200588:
124 mode = "donut"
125 donutDiameter = 1700
126 # Incorrectly labeled as giant donut
127 if exposureId >= 2022101200589 and exposureId <= 2022101200672:
128 mode = "photo"
129 if exposureId >= 2022101200673 and exposureId <= 2022101200676:
130 mode = "donut"
131 donutDiameter = 200
132 if exposureId >= 2022101200677 and exposureId <= 2022101200873:
133 mode = "photo"
134 if exposureId >= 2022101200874 and exposureId <= 2022101200875:
135 mode = "donut"
136 donutDiameter = 200
137 if exposureId >= 2022101200876 and exposureId <= 2022101201060:
138 mode = "photo"
139 if exposureId >= 2022101201061 and exposureId <= 2022101201062:
140 mode = "donut"
141 donutDiameter = 200
142 if exposureId >= 2022101201063 and exposureId <= 2022101201247:
143 mode = "photo"
145 t0 = time.time()
146 result = pet.run(exp, mode=mode, doDisplay=doPlot, binSize=binSize, donutDiameter=donutDiameter)
147 t1 = time.time()
148 runtime = t1 - t0
150 ax.text(
151 0.01,
152 0.97,
153 f"Runtime: {runtime:.2f} s",
154 c="w",
155 transform=ax.transAxes,
156 fontsize=12,
157 )
159 inTag = row["finalTag"]
160 trueX = row["centroid_x"]
161 trueY = row["centroid_y"]
162 foundX = result.brightestCentroid.x
163 foundY = result.brightestCentroid.y
165 if np.isfinite(trueX):
166 nx = (trueX - (result.binSize - 1) / 2) / result.binSize
167 ny = (trueY - (result.binSize - 1) / 2) / result.binSize
168 if doPlot:
169 display.dot("o", nx, ny, ctype=afwDisplay.MAGENTA, size=20)
171 if np.isfinite(foundX):
172 dist = np.sqrt((trueX - foundX) ** 2 + (trueY - foundY) ** 2)
173 if dist < 20:
174 outTag = "<2"
175 elif dist < 100:
176 outTag = "<10"
177 else:
178 outTag = ">10"
179 else:
180 outTag = "noStar"
181 else:
182 if np.isfinite(foundX):
183 outTag = "noTruth"
184 else:
185 outTag = "nothing"
187 if doPlot:
188 if np.isfinite(result.psfPixelShape.getIxx()):
189 distortion = ellipses.SeparableDistortionDeterminantRadius(result.psfPixelShape)
190 fwhm = SIGMA_TO_FWHM * distortion.getDeterminantRadius() * AUXTEL_PIXEL_SCALE
191 else:
192 fwhm = np.nan
193 display.show_colorbar(False)
194 ax.set_title(f"{exposureId=} {inTag=} {outTag=} {fwhm=:.2f} arcsec", color="w")
195 path = Path(args.plotdir)
196 path.mkdir(parents=True, exist_ok=True)
197 fn = f"test_pet_{idx:04d}_{exposureId:d}_{inTag}_{outTag}.png"
198 fig.savefig(path / fn)
200 return inTag, outTag, runtime, exposureId
203def main(args: argparse.Namespace) -> None:
204 # Set up cache directory and register cleanup
205 defined, cacheDir = DatastoreCacheManager.set_fallback_cache_directory_if_unset()
206 if defined:
207 atexit.register(shutil.rmtree, cacheDir, ignore_errors=True)
209 # Loop through Merlin's curated data
210 path = Path(os.environ["SUMMIT_EXTRAS_DIR"]) / "data" / "qfm_baseline_assessment.parq"
211 with warnings.catch_warnings():
212 warnings.simplefilter("ignore", category=AstropyUserWarning)
213 table = Table.read(path)
214 table["exposureId"] = table["day_obs"] * 100_000 + table["sequence_number"]
215 table = table[args.start : args.end]
216 intags = np.unique(table["finalTag"])
218 # Possible PET outcomes
219 outtags = [
220 "noStar", # PET didn't find star, but catalog had one
221 "noTruth", # No catalog star to compare to, but PET found one
222 "nothing", # No catalog or PET star
223 "<2", # PET brightest star agrees with catalog to better than 2 arcsec
224 "<10", # agrees to 10 arcsec
225 ">10", # disagrees by more than 10 arcsec
226 ]
227 results: dict[str, Any] = {}
228 for intag in intags:
229 results[intag] = {}
230 for outtag in outtags:
231 results[intag][outtag] = []
233 if args.nproc == 1:
234 initializePoolProcess()
235 for idx, row in enumerate(tqdm(table, smoothing=0.05)):
236 inTag, outTag, runtime, exposureId = doWork(idx + args.start, row, args.plot)
237 results[inTag][outTag].append((exposureId, runtime))
238 with multiprocessing.Pool(args.nproc, initializer=initializePoolProcess) as pool:
239 futures = []
240 for idx, row in enumerate(table):
241 futures.append(pool.apply_async(doWork, (idx + args.start, row, args.plot)))
243 for row, future in zip(tqdm(table, smoothing=0.05), futures):
244 inTag, outTag, runtime, exposureId = future.get()
245 results[inTag][outTag].append((exposureId, runtime))
247 table = []
248 for inKey, outDict in results.items():
249 row = [inKey]
250 for _, expIds in outDict.items():
251 row.append(f"{len(expIds)}")
252 table.append(row)
253 print(tabulate(table, headers=["input", *outtags]))
255 table = []
256 for inKey, outDict in results.items():
257 row = [inKey]
258 for _, expIds in outDict.items():
259 row.append(f"{np.nanmedian([x[1] for x in expIds]):.2f}")
260 table.append(row)
261 print(tabulate(table, headers=["input", *outtags]))
263 print("G >10")
264 print(results["G"][">10"])
265 print()
268if __name__ == "__main__":
269 from argparse import ArgumentParser
271 parser = ArgumentParser()
272 parser.add_argument("--start", type=int, default=0)
273 parser.add_argument("--end", type=int, default=6105)
274 parser.add_argument("--nproc", type=int, default=20)
275 parser.add_argument("--plot", action="store_true")
276 parser.add_argument("--plotdir", type=str, default="./")
277 args = parser.parse_args()
278 main(args)