Coverage for python/lsst/summit/utils/tmaUtils.py: 18%
650 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-03-23 14:48 +0000
« prev ^ index » next coverage.py v7.4.4, created at 2024-03-23 14:48 +0000
1# This file is part of summit_utils.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
22import datetime
23import enum
24import itertools
25import logging
26import re
27from dataclasses import dataclass, field
29import humanize
30import matplotlib.dates as mdates
31import matplotlib.pyplot as plt
32import numpy as np
33import pandas as pd
34from astropy.time import Time
35from matplotlib.ticker import FuncFormatter
37from lsst.utils.iteration import ensure_iterable
39from .blockUtils import BlockParser
40from .efdUtils import (
41 COMMAND_ALIASES,
42 clipDataToEvent,
43 efdTimestampToAstropy,
44 getCommands,
45 getDayObsEndTime,
46 getDayObsForTime,
47 getDayObsStartTime,
48 getEfdData,
49 makeEfdClient,
50)
51from .enums import AxisMotionState, PowerState
52from .utils import dayObsIntToString, getCurrentDayObs_int
54__all__ = (
55 "TMAStateMachine",
56 "TMAEvent",
57 "TMAEventMaker",
58 "TMAState",
59 "AxisMotionState",
60 "PowerState",
61 "getSlewsFromEventList",
62 "getTracksFromEventList",
63 "getTorqueMaxima",
64 "filterBadValues",
65)
67# we don't want to use `None` for a no data sentinel because dict.get('key')
68# returns None if the key isn't present, and also we need to mark that the data
69# was queried for and no data was found, whereas the key not being present
70# means that we've not yet looked for the data.
71NO_DATA_SENTINEL = "NODATA"
73# The known time difference between the TMA demand position and the TMA
74# position when tracking. 20Hz data times three points = 150ms.
75TRACKING_RESIDUAL_TAIL_CLIP = -0.15 # seconds
78def getSlewsFromEventList(events):
79 """Get the slew events from a list of TMAEvents.
81 Parameters
82 ----------
83 events : `list` of `lsst.summit.utils.tmaUtils.TMAEvent`
84 The list of events to filter.
86 Returns
87 -------
88 events : `list` of `lsst.summit.utils.tmaUtils.TMAEvent`
89 The filtered list of events.
90 """
91 return [e for e in events if e.type == TMAState.SLEWING]
94def getTracksFromEventList(events):
95 """Get the tracking events from a list of TMAEvents.
97 Parameters
98 ----------
99 events : `list` of `lsst.summit.utils.tmaUtils.TMAEvent`
100 The list of events to filter.
102 Returns
103 -------
104 events : `list` of `lsst.summit.utils.tmaUtils.TMAEvent`
105 The filtered list of events.
106 """
107 return [e for e in events if e.type == TMAState.TRACKING]
110def getTorqueMaxima(table):
111 """Print the maximum positive and negative azimuth and elevation torques.
113 Designed to be used with the table as downloaded from RubinTV.
115 Parameters
116 ----------
117 table : `pd.DataFrame`
118 The table of data to use, as generated by Rapid Analysis.
119 """
120 for axis in ["elevation", "azimuth"]:
121 col = f"Largest {axis} torque"
122 maxPos = np.argmax(table[col])
123 maxVal = table[col].iloc[maxPos]
124 print(f"Max positive {axis:9} torque during seqNum {maxPos:>4}: {maxVal/1000:>7.1f}kNm")
125 minPos = np.argmin(table[col])
126 minVal = table[col].iloc[minPos]
127 print(f"Max negative {axis:9} torque during seqNum {minPos:>4}: {minVal/1000:>7.1f}kNm")
130def getAzimuthElevationDataForEvent(
131 client,
132 event,
133 prePadding=0,
134 postPadding=0,
135):
136 """Get the data for the az/el telemetry topics for a given TMAEvent.
138 The error between the actual and demanded positions is calculated and added
139 to the dataframes in the az/elError columns. For TRACKING type events, this
140 error should be extremely close to zero, whereas for SLEWING type events,
141 this error represents the how far the TMA is from the demanded position,
142 and is therefore arbitrarily large, and tends to zero as the TMA get closer
143 to tracking the sky.
145 Parameters
146 ----------
147 client : `lsst_efd_client.efd_helper.EfdClient`
148 The EFD client to use.
149 event : `lsst.summit.utils.tmaUtils.TMAEvent`
150 The event to get the data for.
151 prePadding : `float`, optional
152 The amount of time to pad the event with before the start time, in
153 seconds.
154 postPadding : `float`, optional
155 The amount of time to pad the event with after the end time, in
156 seconds.
158 Returns
159 -------
160 azimuthData : `pd.DataFrame`
161 The azimuth data for the specified event.
162 elevationData : `pd.DataFrame`
163 The elevation data for the specified event.
164 """
165 azimuthData = getEfdData(
166 client, "lsst.sal.MTMount.azimuth", event=event, prePadding=prePadding, postPadding=postPadding
167 )
168 elevationData = getEfdData(
169 client, "lsst.sal.MTMount.elevation", event=event, prePadding=prePadding, postPadding=postPadding
170 )
172 azValues = azimuthData["actualPosition"].values
173 elValues = elevationData["actualPosition"].values
174 azDemand = azimuthData["demandPosition"].values
175 elDemand = elevationData["demandPosition"].values
177 azError = (azValues - azDemand) * 3600
178 elError = (elValues - elDemand) * 3600
180 azimuthData["azError"] = azError
181 elevationData["elError"] = elError
183 return azimuthData, elevationData
186def filterBadValues(values, maxDelta=0.1, maxConsecutiveValues=3):
187 """Filter out bad values from a dataset, replacing them in-place.
189 This function replaces non-physical points in the dataset with an
190 extrapolation of the preceding two values. No more than 3 successive data
191 points are allowed to be replaced. Minimum length of the input is 3 points.
193 Parameters
194 ----------
195 values : `list` or `np.ndarray`
196 The dataset containing the values to be filtered.
197 maxDelta : `float`, optional
198 The maximum allowed difference between consecutive values. Values with
199 a difference greater than `maxDelta` will be considered as bad values
200 and replaced with an extrapolation.
201 maxConsecutiveValues : `int`, optional
202 The maximum number of consecutive values to replace. Defaults to 3.
204 Returns
205 -------
206 nBadPoints : `int`
207 The number of bad values that were replaced out.
208 """
209 # Find non-physical points and replace with extrapolation. No more than
210 # maxConsecutiveValues successive data points can be replaced.
211 badCounter = 0
212 consecutiveCounter = 0
214 log = logging.getLogger(__name__)
216 median = np.nanmedian(values)
217 # if either of the the first two points are more than maxDelta away from
218 # the median, replace them with the median
219 for i in range(2):
220 if abs(values[i] - median) > maxDelta:
221 log.warning(f"Replacing bad value of {values[i]} at index {i} with {median=}")
222 values[i] = median
223 badCounter += 1
225 # from the second element of the array, walk through and calculate the
226 # difference between each element and the previous one. If the difference
227 # is greater than maxDelta, replace the element with the average of the
228 # previous two known good values, i.e. ones which have not been replaced.
229 # if the first two points differ from the median by more than maxDelta,
230 # replace them with the median
231 lastGoodValue1 = values[1] # the most recent good value
232 lastGoodValue2 = values[0] # the second most recent good value
233 replacementValue = (lastGoodValue1 + lastGoodValue2) / 2.0 # in case we have to replace the first value
234 for i in range(2, len(values)):
235 if abs(values[i] - lastGoodValue1) >= maxDelta:
236 if consecutiveCounter < maxConsecutiveValues:
237 consecutiveCounter += 1
238 badCounter += 1
239 log.warning(f"Replacing value at index {i} with {replacementValue}")
240 values[i] = replacementValue
241 else:
242 log.warning(
243 f"More than 3 consecutive replacements at index {i}. Stopping replacements"
244 " until the next good value."
245 )
246 else:
247 lastGoodValue2 = lastGoodValue1
248 lastGoodValue1 = values[i]
249 replacementValue = (lastGoodValue1 + lastGoodValue2) / 2.0
250 consecutiveCounter = 0
251 return badCounter
254def plotEvent(
255 client,
256 event,
257 fig=None,
258 prePadding=0,
259 postPadding=0,
260 commands={},
261 azimuthData=None,
262 elevationData=None,
263 doFilterResiduals=False,
264 maxDelta=0.1,
265):
266 """Plot the TMA axis positions over the course of a given TMAEvent.
268 Plots the axis motion profiles for the given event, with optional padding
269 at the start and end of the event. If the data is provided via the
270 azimuthData and elevationData parameters, it will be used, otherwise it
271 will be queried from the EFD.
273 Optionally plots any commands issued during or around the event, if these
274 are supplied. Commands are supplied as a dictionary of the command topic
275 strings, with values as astro.time.Time objects at which the command was
276 issued.
278 Due to a problem with the way the data is uploaded to the EFD, there are
279 occasional points in the tracking error plots that are very much larger
280 than the typical mount jitter. These points are unphysical, since it is not
281 possible for the mount to move that fast. We don't want these points, which
282 are not true mount problems, to distract from any real mount problems, and
283 these can be filtered out via the ``doFilterResiduals`` kwarg, which
284 replaces these non-physical points with an extrapolation of the average of
285 the preceding two known-good points. If the first two points are bad these
286 are replaced with the median of the dataset. The maximum difference between
287 the model and the actual data, in arcseconds, to allow before filtering a
288 data point can be set with the ``maxDelta`` kwarg.
290 Parameters
291 ----------
292 client : `lsst_efd_client.efd_helper.EfdClient`
293 The EFD client to use.
294 event : `lsst.summit.utils.tmaUtils.TMAEvent`
295 The event to plot.
296 fig : `matplotlib.figure.Figure`, optional
297 The figure to plot on. If not specified, a new figure will be created.
298 prePadding : `float`, optional
299 The amount of time to pad the event with before the start time, in
300 seconds.
301 postPadding : `float`, optional
302 The amount of time to pad the event with after the end time, in
303 seconds.
304 commands : `dict` [`pd.Timestamp`, `str`], or
305 `dict` [`datetime.datetime`, `str`], oroptional
306 A dictionary of commands to plot on the figure. The keys are the times
307 at which a command was issued, and the value is the command string, as
308 returned by efdUtils.getCommands().
309 azimuthData : `pd.DataFrame`, optional
310 The azimuth data to plot. If not specified, it will be queried from the
311 EFD.
312 elevationData : `pd.DataFrame`, optional
313 The elevation data to plot. If not specified, it will be queried from
314 the EFD.
315 doFilterResiduals : 'bool', optional
316 Enables filtering of unphysical data points in the tracking residuals.
317 maxDelta : `float`, optional
318 The maximum difference between the model and the actual data, in
319 arcseconds, to allow before filtering the data point. Ignored if
320 ``doFilterResiduals`` is `False`.
321 Returns
322 -------
323 fig : `matplotlib.figure.Figure`
324 The figure on which the plot was made.
325 """
327 def tickFormatter(value, tick_number):
328 # Convert the value to a string without subtracting large numbers
329 # tick_number is unused.
330 return f"{value:.2f}"
332 def getPlotTime(time):
333 """Get the right time to plot a point from the various time formats."""
334 match time:
335 case pd.Timestamp():
336 return time.to_pydatetime()
337 case Time():
338 return time.utc.datetime
339 case datetime.datetime():
340 return time
341 case _:
342 raise ValueError(f"Unknown type for commandTime: {type(time)}")
344 # plot any commands we might have
345 if not isinstance(commands, dict):
346 raise TypeError("commands must be a dict of command names with values as" " astropy.time.Time values")
348 if fig is None:
349 fig = plt.figure(figsize=(10, 8))
350 log = logging.getLogger(__name__)
351 log.warning(
352 "Making new matplotlib figure - if this is in a loop you're going to have a bad time."
353 " Pass in a figure with fig = plt.figure(figsize=(10, 8)) to avoid this warning."
354 )
356 fig.clear()
357 ax1p5 = None # need to always be defined
358 if event.type.name == "TRACKING":
359 ax1, ax1p5, ax2 = fig.subplots(
360 3, sharex=True, gridspec_kw={"wspace": 0, "hspace": 0, "height_ratios": [2.5, 1, 1]}
361 )
362 else:
363 ax1, ax2 = fig.subplots(
364 2, sharex=True, gridspec_kw={"wspace": 0, "hspace": 0, "height_ratios": [2.5, 1]}
365 )
367 if azimuthData is None or elevationData is None:
368 azimuthData, elevationData = getAzimuthElevationDataForEvent(
369 client, event, prePadding=prePadding, postPadding=postPadding
370 )
372 # Use the native color cycle for the lines. Because they're on different
373 # axes they don't cycle by themselves
374 lineColors = [p["color"] for p in plt.rcParams["axes.prop_cycle"]]
375 colorCounter = 0
377 ax1.plot(azimuthData["actualPosition"], label="Azimuth position", c=lineColors[colorCounter])
378 colorCounter += 1
379 ax1.yaxis.set_major_formatter(FuncFormatter(tickFormatter))
380 ax1.set_ylabel("Azimuth (degrees)")
382 ax1_twin = ax1.twinx()
383 ax1_twin.plot(elevationData["actualPosition"], label="Elevation position", c=lineColors[colorCounter])
384 colorCounter += 1
385 ax1_twin.yaxis.set_major_formatter(FuncFormatter(tickFormatter))
386 ax1_twin.set_ylabel("Elevation (degrees)")
387 ax1.set_xticks([]) # remove x tick labels on the hidden upper x-axis
389 ax2_twin = ax2.twinx()
390 ax2.plot(azimuthData["actualTorque"], label="Azimuth torque", c=lineColors[colorCounter])
391 colorCounter += 1
392 ax2_twin.plot(elevationData["actualTorque"], label="Elevation torque", c=lineColors[colorCounter])
393 colorCounter += 1
394 ax2.set_ylabel("Azimuth torque (Nm)")
395 ax2_twin.set_ylabel("Elevation torque (Nm)")
396 ax2.set_xlabel("Time (UTC)") # yes, it really is UTC, matplotlib converts this automatically!
398 # put the ticks at an angle, and right align with the tick marks
399 ax2.set_xticks(ax2.get_xticks()) # needed to supress a user warning
400 xlabels = ax2.get_xticks()
401 ax2.set_xticklabels(xlabels, rotation=40, ha="right")
402 ax2.xaxis.set_major_locator(mdates.AutoDateLocator())
403 ax2.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M:%S"))
405 if event.type.name == "TRACKING":
406 # returns a copy
407 clippedAzimuthData = clipDataToEvent(azimuthData, event, postPadding=TRACKING_RESIDUAL_TAIL_CLIP)
408 clippedElevationData = clipDataToEvent(elevationData, event, postPadding=TRACKING_RESIDUAL_TAIL_CLIP)
410 azError = clippedAzimuthData["azError"].values
411 elError = clippedElevationData["elError"].values
412 elVals = clippedElevationData["actualPosition"].values
413 if doFilterResiduals:
414 # Filtering out bad values
415 nReplacedAz = filterBadValues(azError, maxDelta)
416 nReplacedEl = filterBadValues(elError, maxDelta)
417 clippedAzimuthData["azError"] = azError
418 clippedElevationData["elError"] = elError
419 # Calculate RMS
420 az_rms = np.sqrt(np.mean(azError * azError))
421 el_rms = np.sqrt(np.mean(elError * elError))
423 # Calculate Image impact RMS
424 # We are less sensitive to Az errors near the zenith
425 image_az_rms = az_rms * np.cos(elVals[0] * np.pi / 180.0)
426 image_el_rms = el_rms
427 image_impact_rms = np.sqrt(image_az_rms**2 + image_el_rms**2)
428 ax1p5.plot(clippedAzimuthData["azError"], label="Azimuth tracking error", c=lineColors[colorCounter])
429 colorCounter += 1
430 ax1p5.plot(
431 clippedElevationData["elError"], label="Elevation tracking error", c=lineColors[colorCounter]
432 )
433 colorCounter += 1
434 ax1p5.axhline(0.01, ls="-.", color="black")
435 ax1p5.axhline(-0.01, ls="-.", color="black")
436 ax1p5.yaxis.set_major_formatter(FuncFormatter(tickFormatter))
437 ax1p5.set_ylabel("Tracking error (arcsec)")
438 ax1p5.set_xticks([]) # remove x tick labels on the hidden upper x-axis
439 ax1p5.set_ylim(-0.05, 0.05)
440 ax1p5.set_yticks([-0.04, -0.02, 0.0, 0.02, 0.04])
441 ax1p5.legend()
442 ax1p5.text(0.1, 0.9, f"Image impact RMS = {image_impact_rms:.3f} arcsec", transform=ax1p5.transAxes)
443 if doFilterResiduals:
444 ax1p5.text(
445 0.1,
446 0.8,
447 f"{nReplacedAz} bad azimuth values and {nReplacedEl} bad elevation values were replaced",
448 transform=ax1p5.transAxes,
449 )
451 if prePadding or postPadding:
452 # note the conversion to utc because the x-axis from the dataframe
453 # already got automagically converted when plotting before, so this is
454 # necessary for things to line up
455 ax1_twin.axvline(event.begin.utc.datetime, c="k", ls="--", alpha=0.5, label="Event begin/end")
456 ax1_twin.axvline(event.end.utc.datetime, c="k", ls="--", alpha=0.5)
457 # extend lines down across lower plot, but do not re-add label
458 ax2_twin.axvline(event.begin.utc.datetime, c="k", ls="--", alpha=0.5)
459 ax2_twin.axvline(event.end.utc.datetime, c="k", ls="--", alpha=0.5)
460 if ax1p5:
461 ax1p5.axvline(event.begin.utc.datetime, c="k", ls="--", alpha=0.5)
462 ax1p5.axvline(event.end.utc.datetime, c="k", ls="--", alpha=0.5)
464 for commandTime, command in commands.items():
465 plotTime = getPlotTime(commandTime)
466 ax1_twin.axvline(plotTime, c=lineColors[colorCounter], ls="--", alpha=0.75, label=f"{command}")
467 # extend lines down across lower plot, but do not re-add label
468 ax2_twin.axvline(plotTime, c=lineColors[colorCounter], ls="--", alpha=0.75)
469 if ax1p5:
470 ax1p5.axvline(plotTime, c=lineColors[colorCounter], ls="--", alpha=0.75)
471 colorCounter += 1
473 # combine the legends and put inside the plot
474 handles1a, labels1a = ax1.get_legend_handles_labels()
475 handles1b, labels1b = ax1_twin.get_legend_handles_labels()
476 handles2a, labels2a = ax2.get_legend_handles_labels()
477 handles2b, labels2b = ax2_twin.get_legend_handles_labels()
479 handles = handles1a + handles1b + handles2a + handles2b
480 labels = labels1a + labels1b + labels2a + labels2b
481 # ax2 is "in front" of ax1 because it has the vlines plotted on it, and
482 # vlines are on ax2 so that they appear at the bottom of the legend, so
483 # make sure to plot the legend on ax2, otherwise the vlines will go on top
484 # of the otherwise-opaque legend.
485 ax1_twin.legend(handles, labels, facecolor="white", framealpha=1)
487 # Add title with the event name, type etc
488 dayObsStr = dayObsIntToString(event.dayObs)
489 title = (
490 # top line is the event title, the details go on the line below
491 f"{dayObsStr} - seqNum {event.seqNum} (version {event.version})"
492 f"\nDuration = {event.duration:.2f}s"
493 f" Event type: {event.type.name}"
494 f" End reason: {event.endReason.name}"
495 )
496 ax1_twin.set_title(title)
497 return fig
500def getCommandsDuringEvent(
501 client,
502 event,
503 commands=("raDecTarget"),
504 prePadding=0,
505 postPadding=0,
506 timeFormat="python",
507 log=None,
508 doLog=True,
509):
510 """Get the commands issued during an event.
512 Get the times at which the specified commands were issued during the event.
514 Parameters
515 ----------
516 client : `lsst_efd_client.efd_helper.EfdClient`
517 The EFD client to use.
518 event : `lsst.summit.utils.tmaUtils.TMAEvent`
519 The event to plot.
520 commands : `list` of `str`, optional
521 The commands or command aliases to look for. Defaults to
522 ['raDecTarget'].
523 prePadding : `float`, optional
524 The amount of time to pad the event with before the start time, in
525 seconds.
526 postPadding : `float`, optional
527 The amount of time to pad the event with after the end time, in
528 seconds.
529 timeFormat : `str`, optional
530 One of 'pandas' or 'astropy' or 'python'. If 'pandas', the dictionary
531 keys will be pandas timestamps, if 'astropy' they will be astropy times
532 and if 'python' they will be python datetimes.
533 log : `logging.Logger`, optional
534 The logger to use. If not specified, a new logger will be created if
535 needed.
536 doLog : `bool`, optional
537 Whether to log messages. Defaults to True.
539 Returns
540 -------
541 commandTimes : `dict` [`time`, `str`]
542 A dictionary of the times at which the commands where issued. The type
543 that `time` takes is determined by the format key, and defaults to
544 python datetime.
545 """
546 commands = list(ensure_iterable(commands))
547 fullCommands = [c if c not in COMMAND_ALIASES else COMMAND_ALIASES[c] for c in commands]
548 del commands # make sure we always use their full names
550 commandTimes = getCommands(
551 client,
552 fullCommands,
553 begin=event.begin,
554 end=event.end,
555 prePadding=prePadding,
556 postPadding=postPadding,
557 timeFormat=timeFormat,
558 )
560 if not commandTimes and doLog:
561 log = logging.getLogger(__name__)
562 log.info(f"Found no commands in {fullCommands} issued during event {event.seqNum}")
564 return commandTimes
567def _initializeTma(tma):
568 """Helper function to turn a TMA into a valid state for testing.
570 Do not call directly in normal usage or code, as this just arbitrarily
571 sets values to make the TMA valid.
573 Parameters
574 ----------
575 tma : `lsst.summit.utils.tmaUtils.TMAStateMachine`
576 The TMA state machine model to initialize.
577 """
578 tma._parts["azimuthInPosition"] = False
579 tma._parts["azimuthMotionState"] = AxisMotionState.STOPPED
580 tma._parts["azimuthSystemState"] = PowerState.ON
581 tma._parts["elevationInPosition"] = False
582 tma._parts["elevationMotionState"] = AxisMotionState.STOPPED
583 tma._parts["elevationSystemState"] = PowerState.ON
586@dataclass(kw_only=True, frozen=True)
587class TMAEvent:
588 """A movement event for the TMA.
590 Contains the dayObs on which the event occured, using the standard
591 observatory definition of the dayObs, and the sequence number of the event,
592 which is unique for each event on a given dayObs.
594 The event type can be either 'SLEWING' or 'TRACKING', defined as:
595 - SLEWING: some part of the TMA is in motion
596 - TRACKING: both axes are in position and tracking the sky
598 The end reason can be 'STOPPED', 'TRACKING', 'FAULT', 'SLEWING', or 'OFF'.
599 - SLEWING: The previous event was a TRACKING event, and one or more of
600 the TMA components either stopped being in position, or stopped
601 moving, or went into fault, or was turned off, and hence we are now
602 only slewing and no longer tracking the sky.
603 - TRACKING: the TMA started tracking the sky when it wasn't previously.
604 Usualy this would always be preceded by directly by a SLEWING
605 event, but this is not strictly true, as the EUI seems to be able
606 to make the TMA start tracking the sky without slewing first.
607 - STOPPED: the components of the TMA transitioned to the STOPPED state.
608 - FAULT: the TMA went into fault.
609 - OFF: the TMA components were turned off.
611 Note that this class is not intended to be instantiated directly, but
612 rather to be returned by the ``TMAEventMaker.getEvents()`` function.
614 Parameters
615 ----------
616 dayObs : `int`
617 The dayObs on which the event occured.
618 seqNum : `int`
619 The sequence number of the event,
620 type : `lsst.summit.utils.tmaUtils.TMAState`
621 The type of the event, either 'SLEWING' or 'TRACKING'.
622 endReason : `lsst.summit.utils.tmaUtils.TMAState`
623 The reason the event ended, either 'STOPPED', 'TRACKING', 'FAULT',
624 'SLEWING', or 'OFF'.
625 duration : `float`
626 The duration of the event, in seconds.
627 begin : `astropy.time.Time`
628 The time the event began.
629 end : `astropy.time.Time`
630 The time the event ended.
631 blockInfos : `list` of `lsst.summit.utils.tmaUtils.BlockInfo`, or `None`
632 The block infomation, if any, relating to the event. Could be `None`,
633 or one or more block informations.
634 version : `int`
635 The version of the TMAEvent class. Equality between events is only
636 valid for a given version of the class. If the class definition
637 changes, the time ranges can change, and hence the equality between
638 events is ``False``.
639 _startRow : `int`
640 The first row in the merged EFD data which is part of the event.
641 _endRow : `int`
642 The last row in the merged EFD data which is part of the event.
643 """
645 dayObs: int
646 seqNum: int
647 type: str # can be 'SLEWING', 'TRACKING'
648 endReason: str # can be 'STOPPED', 'TRACKING', 'FAULT', 'SLEWING', 'OFF'
649 duration: float # seconds
650 begin: Time
651 end: Time
652 blockInfos: list = field(default_factory=list)
653 version: int = 0 # update this number any time a code change which could change event definitions is made
654 _startRow: int
655 _endRow: int
657 def __lt__(self, other):
658 if self.version != other.version:
659 raise ValueError(
660 f"Cannot compare TMAEvents with different versions: {self.version} != {other.version}"
661 )
662 if self.dayObs < other.dayObs:
663 return True
664 elif self.dayObs == other.dayObs:
665 return self.seqNum < other.seqNum
666 return False
668 def __repr__(self):
669 return (
670 f"TMAEvent(dayObs={self.dayObs}, seqNum={self.seqNum}, type={self.type!r},"
671 f" endReason={self.endReason!r}, duration={self.duration}, begin={self.begin!r},"
672 f" end={self.end!r}"
673 )
675 def __hash__(self):
676 # deliberately don't hash the blockInfos here, as they are not
677 # a core part of the event itself, and are listy and cause problems
678 return hash(
679 (
680 self.dayObs,
681 self.seqNum,
682 self.type,
683 self.endReason,
684 self.duration,
685 self.begin,
686 self.end,
687 self.version,
688 self._startRow,
689 self._endRow,
690 )
691 )
693 def _ipython_display_(self):
694 print(self.__str__())
696 def __str__(self):
697 def indent(string):
698 return "\n" + "\n".join([" " + s for s in string.splitlines()])
700 blockInfoStr = "None"
701 if self.blockInfos is not None:
702 blockInfoStr = "".join(indent(str(i)) for i in self.blockInfos)
704 return (
705 f"dayObs: {self.dayObs}\n"
706 f"seqNum: {self.seqNum}\n"
707 f"type: {self.type.name}\n"
708 f"endReason: {self.endReason.name}\n"
709 f"duration: {self.duration}\n"
710 f"begin: {self.begin!r}\n"
711 f"end: {self.end!r}\n"
712 f"blockInfos: {blockInfoStr}"
713 )
715 def associatedWith(self, block=None, blockSeqNum=None, ticket=None, salIndex=None):
716 """Check whether an event is associated with a set of parameters.
718 Check if an event is associated with a specific block and/or ticket
719 and/or salIndex. All specified parameters must match for the function
720 to return True. If checking if an event is in a block, the blockSeqNum
721 can also be specified to identify events which related to a given
722 running the specified block.
724 Parameters
725 ----------
726 block : `int`, optional
727 The block number to check for.
728 blockSeqNum : `int`, optional
729 The block sequence number to check for, if the block is specified.
730 ticket : `str`, optional
731 The ticket number to check for.
732 salIndex : `int`, optional
733 The salIndex to check for.
735 Returns
736 -------
737 relates : `bool`
738 Whether the event is associated with the specified block, ticket,
739 and salIndex.
740 """
741 if all([block is None, ticket is None, salIndex is None]):
742 raise ValueError("Must specify at least one of block, ticket, or salIndex")
744 if blockSeqNum is not None and block is None:
745 raise ValueError("block must be specified if blockSeqNum is specified")
747 for blockInfo in self.blockInfos:
748 # "X is None or" is used for each parameter to allow it to be None
749 # in the kwargs
750 blockMatches = False
751 if block is not None:
752 if blockSeqNum is None and blockInfo.blockNumber == block:
753 blockMatches = True
754 elif (
755 blockSeqNum is not None
756 and blockInfo.blockNumber == block
757 and blockInfo.seqNum == blockSeqNum
758 ):
759 blockMatches = True
760 else:
761 blockMatches = True # no block specified at all, so it matches
763 salIndexMatches = salIndex is None or salIndex in blockInfo.salIndices
764 ticketMatches = ticket is None or ticket in blockInfo.tickets
766 if blockMatches and salIndexMatches and ticketMatches:
767 return True
769 return False
772class TMAState(enum.IntEnum):
773 """Overall state of the TMA.
775 States are defined as follows:
777 UNINITIALIZED
778 We have not yet got data for all relevant components, so the overall
779 state is undefined.
780 STOPPED
781 All components are on, and none are moving.
782 TRACKING
783 We are tracking the sky.
784 SLEWING
785 One or more components are moving, and one or more are not tracking the
786 sky. This should probably be called MOVING, as it includes: slewing,
787 MOVING_POINT_TO_POINT, and JOGGING.
788 FAULT
789 All (if engineeringMode) or any (if not engineeringMode) components are
790 in fault.
791 OFF
792 All components are off.
793 """
795 UNINITIALIZED = -1
796 STOPPED = 0
797 TRACKING = 1
798 SLEWING = 2
799 FAULT = 3
800 OFF = 4
802 def __repr__(self):
803 return f"TMAState.{self.name}"
806def getAxisAndType(rowFor):
807 """Get the axis the data relates to, and the type of data it contains.
809 Parameters
810 ----------
811 rowFor : `str`
812 The column in the dataframe denoting what this row is for, e.g.
813 "elevationMotionState" or "azimuthInPosition", etc.
815 Returns
816 -------
817 axis : `str`
818 The axis the row is for, e.g. "azimuth", "elevation".
819 rowType : `str`
820 The type of the row, e.g. "MotionState", "SystemState", "InPosition".
821 """
822 regex = r"(azimuth|elevation)(InPosition|MotionState|SystemState)$" # matches the end of the line
823 matches = re.search(regex, rowFor)
824 if matches is None:
825 raise ValueError(f"Could not parse axis and rowType from {rowFor=}")
826 axis = matches.group(1)
827 rowType = matches.group(2)
829 assert rowFor.endswith(f"{axis}{rowType}")
830 return axis, rowType
833class ListViewOfDict:
834 """A class to allow making lists which contain references to an underlying
835 dictionary.
837 Normally, making a list of items from a dictionary would make a copy of the
838 items, but this class allows making a list which contains references to the
839 underlying dictionary items themselves. This is useful for making a list of
840 components, such that they can be manipulated in their logical sets.
841 """
843 def __init__(self, underlyingDictionary, keysToLink):
844 self.dictionary = underlyingDictionary
845 self.keys = keysToLink
847 def __getitem__(self, index):
848 return self.dictionary[self.keys[index]]
850 def __setitem__(self, index, value):
851 self.dictionary[self.keys[index]] = value
853 def __len__(self):
854 return len(self.keys)
857class TMAStateMachine:
858 """A state machine model of the TMA.
860 Note that this is currently only implemented for the azimuth and elevation
861 axes, but will be extended to include the rotator in the future.
863 Note that when used for event generation, changing ``engineeringMode`` to
864 False might change the resulting list of events, and that if the TMA moves
865 with some axis in fault, then these events will be missed. It is therefore
866 thought that ``engineeringMode=True`` should always be used when generating
867 events. The option, however, is there for completeness, as this will be
868 useful for knowing is the CSC would consider the TMA to be in fault in the
869 general case.
871 Parameters
872 ----------
873 engineeringMode : `bool`, optional
874 Whether the TMA is in engineering mode. Defaults to True. If False,
875 then the TMA will be in fault if any component is in fault. If True,
876 then the TMA will be in fault only if all components are in fault.
877 debug : `bool`, optional
878 Whether to log debug messages. Defaults to False.
879 """
881 _UNINITIALIZED_VALUE: int = -999
883 def __init__(self, engineeringMode=True, debug=False):
884 self.engineeringMode = engineeringMode
885 self.log = logging.getLogger("lsst.summit.utils.tmaUtils.TMA")
886 if debug:
887 self.log.level = logging.DEBUG
888 self._mostRecentRowTime = -1
890 # the actual components of the TMA
891 self._parts = {
892 "azimuthInPosition": self._UNINITIALIZED_VALUE,
893 "azimuthMotionState": self._UNINITIALIZED_VALUE,
894 "azimuthSystemState": self._UNINITIALIZED_VALUE,
895 "elevationInPosition": self._UNINITIALIZED_VALUE,
896 "elevationMotionState": self._UNINITIALIZED_VALUE,
897 "elevationSystemState": self._UNINITIALIZED_VALUE,
898 }
899 systemKeys = ["azimuthSystemState", "elevationSystemState"]
900 positionKeys = ["azimuthInPosition", "elevationInPosition"]
901 motionKeys = ["azimuthMotionState", "elevationMotionState"]
903 # references to the _parts as conceptual groupings
904 self.system = ListViewOfDict(self._parts, systemKeys)
905 self.motion = ListViewOfDict(self._parts, motionKeys)
906 self.inPosition = ListViewOfDict(self._parts, positionKeys)
908 # tuples of states for state collapsing. Note that STOP_LIKE +
909 # MOVING_LIKE must cover the full set of AxisMotionState enums
910 self.STOP_LIKE = (AxisMotionState.STOPPING, AxisMotionState.STOPPED, AxisMotionState.TRACKING_PAUSED)
911 self.MOVING_LIKE = (
912 AxisMotionState.MOVING_POINT_TO_POINT,
913 AxisMotionState.JOGGING,
914 AxisMotionState.TRACKING,
915 )
916 # Likewise, ON_LIKE + OFF_LIKE must cover the full set of PowerState
917 # enums
918 self.OFF_LIKE = (PowerState.OFF, PowerState.TURNING_OFF)
919 self.ON_LIKE = (PowerState.ON, PowerState.TURNING_ON)
920 self.FAULT_LIKE = (PowerState.FAULT,) # note the trailing comma - this must be an iterable
922 def apply(self, row):
923 """Apply a row of data to the TMA state.
925 Checks that the row contains data for a later time than any data
926 previously applied, and applies the relevant column entry to the
927 relevant component.
929 Parameters
930 ----------
931 row : `pd.Series`
932 The row of data to apply to the state machine.
933 """
934 timestamp = row["private_efdStamp"]
935 if timestamp < self._mostRecentRowTime: # NB equals is OK, technically, though it never happens
936 raise ValueError(
937 "TMA evolution must be monotonic increasing in time, tried to apply a row which"
938 " predates the most previous one"
939 )
940 self._mostRecentRowTime = timestamp
942 rowFor = row["rowFor"] # e.g. elevationMotionState
943 axis, rowType = getAxisAndType(rowFor) # e.g. elevation, MotionState
944 value = self._getRowPayload(row, rowType, rowFor)
945 self.log.debug(f"Setting {rowFor} to {repr(value)}")
946 self._parts[rowFor] = value
947 try:
948 # touch the state property as this executes the sieving, to make
949 # sure we don't fall through the sieve at any point in time
950 _ = self.state
951 except RuntimeError as e:
952 # improve error reporting, but always reraise this, as this is a
953 # full-blown failure
954 raise RuntimeError(f"Failed to apply {value} to {axis}{rowType} with state {self._parts}") from e
956 def _getRowPayload(self, row, rowType, rowFor):
957 """Get the relevant value from the row.
959 Given the row, and which component it relates to, get the relevant
960 value, as a bool or cast to the appropriate enum class.
962 Parameters
963 ----------
964 row : `pd.Series`
965 The row of data from the dataframe.
966 rowType : `str`
967 The type of the row, e.g. "MotionState", "SystemState",
968 "InPosition".
969 rowFor : `str`
970 The component the row is for, e.g. "azimuth", "elevation".
972 Returns
973 -------
974 value : `bool` or `enum`
975 The value of the row, as a bool or enum, depending on the
976 component, cast to the appropriate enum class or bool.
977 """
978 match rowType:
979 case "MotionState":
980 value = row[f"state_{rowFor}"]
981 return AxisMotionState(value)
982 case "SystemState":
983 value = row[f"powerState_{rowFor}"]
984 return PowerState(value)
985 case "InPosition":
986 value = row[f"inPosition_{rowFor}"]
987 return bool(value)
988 case _:
989 raise ValueError(f"Failed to get row payload with {rowType=} and {row=}")
991 @property
992 def _isValid(self):
993 """Has the TMA had a value applied to all its components?
995 If any component has not yet had a value applied, the TMA is not valid,
996 as those components will be in an unknown state.
998 Returns
999 -------
1000 isValid : `bool`
1001 Whether the TMA is fully initialized.
1002 """
1003 return not any([v == self._UNINITIALIZED_VALUE for v in self._parts.values()])
1005 # state inspection properties - a high level way of inspecting the state as
1006 # an API
1007 @property
1008 def isMoving(self):
1009 return self.state in [TMAState.TRACKING, TMAState.SLEWING]
1011 @property
1012 def isNotMoving(self):
1013 return not self.isMoving
1015 @property
1016 def isTracking(self):
1017 return self.state == TMAState.TRACKING
1019 @property
1020 def isSlewing(self):
1021 return self.state == TMAState.SLEWING
1023 @property
1024 def canMove(self):
1025 badStates = [PowerState.OFF, PowerState.TURNING_OFF, PowerState.FAULT, PowerState.UNKNOWN]
1026 return bool(
1027 self._isValid
1028 and self._parts["azimuthSystemState"] not in badStates
1029 and self._parts["elevationSystemState"] not in badStates
1030 )
1032 # Axis inspection properties, designed for internal use. These return
1033 # iterables so that they can be used in any() and all() calls, which make
1034 # the logic much easier to read, e.g. to see if anything is moving, we can
1035 # write `if not any(_axisInMotion):`
1036 @property
1037 def _axesInFault(self):
1038 return [x in self.FAULT_LIKE for x in self.system]
1040 @property
1041 def _axesOff(self):
1042 return [x in self.OFF_LIKE for x in self.system]
1044 @property
1045 def _axesOn(self):
1046 return [not x for x in self._axesOn]
1048 @property
1049 def _axesInMotion(self):
1050 return [x in self.MOVING_LIKE for x in self.motion]
1052 @property
1053 def _axesTRACKING(self):
1054 """Note this is deliberately named _axesTRACKING and not _axesTracking
1055 to make it clear that this is the AxisMotionState type of TRACKING and
1056 not the normal conceptual notion of tracking (the sky, i.e. as opposed
1057 to slewing).
1058 """
1059 return [x == AxisMotionState.TRACKING for x in self.motion]
1061 @property
1062 def _axesInPosition(self):
1063 return [x is True for x in self.inPosition]
1065 @property
1066 def state(self):
1067 """The overall state of the TMA.
1069 Note that this is both a property, and also the method which applies
1070 the logic sieve to determine the state at a given point in time.
1072 Returns
1073 -------
1074 state : `lsst.summit.utils.tmaUtils.TMAState`
1075 The overall state of the TMA.
1076 """
1077 # first, check we're valid, and if not, return UNINITIALIZED state, as
1078 # things are unknown
1079 if not self._isValid:
1080 return TMAState.UNINITIALIZED
1082 # if we're not in engineering mode, i.e. we're under normal CSC
1083 # control, then if anything is in fault, we're in fault. If we're
1084 # engineering then some axes will move when others are in fault
1085 if not self.engineeringMode:
1086 if any(self._axesInFault):
1087 return TMAState.FAULT
1088 else:
1089 # we're in engineering mode, so return fault state if ALL are in
1090 # fault
1091 if all(self._axesInFault):
1092 return TMAState.FAULT
1094 # if all axes are off, the TMA is OFF
1095 if all(self._axesOff):
1096 return TMAState.OFF
1098 # we know we're valid and at least some axes are not off, so see if
1099 # we're in motion if no axes are moving, we're stopped
1100 if not any(self._axesInMotion):
1101 return TMAState.STOPPED
1103 # now we know we're initialized, and that at least one axis is moving
1104 # so check axes for motion and in position. If all axes are tracking
1105 # and all are in position, we're tracking the sky
1106 if all(self._axesTRACKING) and all(self._axesInPosition):
1107 return TMAState.TRACKING
1109 # we now know explicitly that not everything is in position, so we no
1110 # longer need to check that. We do actually know that something is in
1111 # motion, but confirm that's the case and return SLEWING
1112 if any(self._axesInMotion):
1113 return TMAState.SLEWING
1115 # if we want to differentiate between MOVING_POINT_TO_POINT moves,
1116 # JOGGING moves and regular slews, the logic in the step above needs to
1117 # be changed and the new steps added here.
1119 raise RuntimeError("State error: fell through the state sieve - rewrite your logic!")
1122class TMAEventMaker:
1123 """A class to create per-dayObs TMAEvents for the TMA's movements.
1125 If this class is being used in tests, make sure to pass the EFD client in,
1126 and create it with `makeEfdClient(testing=True)`. This ensures that the
1127 USDF EFD is "used" as this is the EFD which has the recorded data available
1128 in the test suite via `vcr`.
1130 Example usage:
1131 >>> dayObs = 20230630
1132 >>> eventMaker = TMAEventMaker()
1133 >>> events = eventMaker.getEvents(dayObs)
1134 >>> print(f'Found {len(events)} for {dayObs=}')
1136 Parameters
1137 ----------
1138 client : `lsst_efd_client.efd_helper.EfdClient`, optional
1139 The EFD client to use, created if not provided.
1140 """
1142 # the topics which need logical combination to determine the overall mount
1143 # state. Will need updating as new components are added to the system.
1145 # relevant column: 'state'
1146 _movingComponents = [
1147 "lsst.sal.MTMount.logevent_azimuthMotionState",
1148 "lsst.sal.MTMount.logevent_elevationMotionState",
1149 ]
1151 # relevant column: 'inPosition'
1152 _inPositionComponents = [
1153 "lsst.sal.MTMount.logevent_azimuthInPosition",
1154 "lsst.sal.MTMount.logevent_elevationInPosition",
1155 ]
1157 # the components which, if in fault, put the TMA into fault
1158 # relevant column: 'powerState'
1159 _stateComponents = [
1160 "lsst.sal.MTMount.logevent_azimuthSystemState",
1161 "lsst.sal.MTMount.logevent_elevationSystemState",
1162 ]
1164 def __init__(self, client=None):
1165 if client is not None:
1166 self.client = client
1167 else:
1168 self.client = makeEfdClient()
1169 self.log = logging.getLogger(__name__)
1170 self._data = {}
1172 @dataclass(frozen=True)
1173 class ParsedState:
1174 eventStart: Time
1175 eventEnd: int
1176 previousState: TMAState
1177 state: TMAState
1179 @staticmethod
1180 def isToday(dayObs):
1181 """Find out if the specified dayObs is today, or in the past.
1183 If the day is today, the function returns ``True``, if it is in the
1184 past it returns ``False``. If the day is in the future, a
1185 ``ValueError`` is raised, as this indicates there is likely an
1186 off-by-one type error somewhere in the logic.
1188 Parameters
1189 ----------
1190 dayObs : `int`
1191 The dayObs to check, in the format YYYYMMDD.
1193 Returns
1194 -------
1195 isToday : `bool`
1196 ``True`` if the dayObs is today, ``False`` if it is in the past.
1198 Raises
1199 ValueError: if the dayObs is in the future.
1200 """
1201 todayDayObs = getCurrentDayObs_int()
1202 if dayObs == todayDayObs:
1203 return True
1204 if dayObs > todayDayObs:
1205 raise ValueError("dayObs is in the future")
1206 return False
1208 @staticmethod
1209 def _shortName(topic):
1210 """Get the short name of a topic.
1212 Parameters
1213 ----------
1214 topic : `str`
1215 The topic to get the short name of.
1217 Returns
1218 -------
1219 shortName : `str`
1220 The short name of the topic, e.g. 'azimuthInPosition'
1221 """
1222 # get, for example 'azimuthInPosition' from
1223 # lsst.sal.MTMount.logevent_azimuthInPosition
1224 return topic.split("_")[-1]
1226 def _mergeData(self, data):
1227 """Merge a dict of dataframes based on private_efdStamp, recording
1228 where each row came from.
1230 Given a dict or dataframes, keyed by topic, merge them into a single
1231 dataframe, adding a column to record which topic each row came from.
1233 Parameters
1234 ----------
1235 data : `dict` of `str` : `pd.DataFrame`
1236 The dataframes to merge.
1238 Returns
1239 -------
1240 merged : `pd.DataFrame`
1241 The merged dataframe.
1242 """
1243 excludeColumns = ["private_efdStamp", "rowFor"]
1245 mergeArgs = {
1246 "how": "outer",
1247 "sort": True,
1248 }
1250 merged = None
1251 originalRowCounter = 0
1253 # Iterate over the keys and merge the corresponding DataFrames
1254 for key, df in data.items():
1255 if df.empty:
1256 # Must skip the df if it's empty, otherwise the merge will fail
1257 # due to lack of private_efdStamp. Because other axes might
1258 # still be in motion, so we still want to merge what we have
1259 continue
1261 originalRowCounter += len(df)
1262 component = self._shortName(key) # Add suffix to column names to identify the source
1263 suffix = "_" + component
1265 df["rowFor"] = component
1267 columnsToSuffix = [col for col in df.columns if col not in excludeColumns]
1268 df_to_suffix = df[columnsToSuffix].add_suffix(suffix)
1269 df = pd.concat([df[excludeColumns], df_to_suffix], axis=1)
1271 if merged is None:
1272 merged = df.copy()
1273 else:
1274 merged = pd.merge(merged, df, **mergeArgs)
1276 merged = merged.loc[:, ~merged.columns.duplicated()] # Remove duplicate columns after merge
1278 if len(merged) != originalRowCounter:
1279 self.log.warning(
1280 "Merged data has a different number of rows to the original data, some"
1281 " timestamps (rows) will contain more than one piece of actual information."
1282 )
1284 # if the index is still a DatetimeIndex here then we didn't actually
1285 # merge any data, so there is only data from a single component.
1286 # This is likely to result in no events, but not necessarily, and for
1287 # generality, instead we convert to a range index to ensure consistency
1288 # in the returned data, and allow processing to continue.
1289 if isinstance(merged.index, pd.DatetimeIndex):
1290 self.log.warning("Data was only found for a single component in the EFD.")
1291 merged.reset_index(drop=True, inplace=True)
1293 return merged
1295 def getEvent(self, dayObs, seqNum):
1296 """Get a specific event for a given dayObs and seqNum.
1298 Repeated calls for the same ``dayObs`` will use the cached data if the
1299 day is in the past, and so will be much quicker. If the ``dayObs`` is
1300 the current day then the EFD will be queried for new data for each
1301 call, so a call which returns ``None`` on the first try might return an
1302 event on the next, if the TMA is still moving and thus generating
1303 events.
1305 Parameters
1306 ----------
1307 dayObs : `int`
1308 The dayObs to get the event for.
1309 seqNum : `int`
1310 The sequence number of the event to get.
1312 Returns
1313 -------
1314 event : `lsst.summit.utils.tmaUtils.TMAEvent`
1315 The event for the specified dayObs and seqNum, or `None` if the
1316 event was not found.
1317 """
1318 events = self.getEvents(dayObs)
1319 if seqNum <= len(events):
1320 event = events[seqNum]
1321 if event.seqNum != seqNum:
1322 # it's zero-indexed and contiguous so this must be true but
1323 # a sanity check doesn't hurt.
1324 raise AssertionError(f"Event sequence number mismatch: {event.seqNum} != {seqNum}")
1325 return event
1326 else:
1327 self.log.warning(f"Event {seqNum} not found for {dayObs}")
1328 return None
1330 def getEvents(self, dayObs, addBlockInfo=True):
1331 """Get the TMA events for the specified dayObs.
1333 Gets the required mount data from the cache or the EFD as required,
1334 handling whether we're working with live vs historical data. The
1335 dataframes from the EFD is merged and applied to the TMAStateMachine,
1336 and that series of state changes is used to generate a list of
1337 TmaEvents for the day's data.
1339 If the data is for the current day, i.e. if new events can potentially
1340 land, then if the last event is "open" (meaning that the TMA appears to
1341 be in motion and thus the event is growing with time), then that event
1342 is excluded from the event list as it is expected to be changing with
1343 time, and will likely close eventually. However, if that situation
1344 occurs on a day in the past, then that event can never close, and the
1345 event is therefore included, but a warning about the open event is
1346 logged.
1348 Parameters
1349 ----------
1350 dayObs : `int`
1351 The dayObs for which to get the events.
1352 addBlockInfo : `bool`, optional
1353 Whether to add block information to the events. This allows
1354 skipping this step for speed when generating events for purposes
1355 which don't need block information.
1357 Returns
1358 -------
1359 events : `list` of `lsst.summit.utils.tmaUtils.TMAState`
1360 The events for the specified dayObs.
1361 """
1362 workingLive = self.isToday(dayObs)
1363 data = None
1365 if workingLive:
1366 # it's potentially updating data, so we must update the date
1367 # regarless of whether we have it already or not
1368 self.log.info(f"Updating mount data for {dayObs} from the EFD")
1369 self._getEfdDataForDayObs(dayObs)
1370 data = self._data[dayObs]
1371 elif dayObs in self._data:
1372 # data is in the cache and it's not being updated, so use it
1373 data = self._data[dayObs]
1374 elif dayObs not in self._data:
1375 # we don't have the data yet, but it's not growing, so put it in
1376 # the cache and use it from there
1377 self.log.info(f"Retrieving mount data for {dayObs} from the EFD")
1378 self._getEfdDataForDayObs(dayObs)
1379 data = self._data[dayObs]
1380 else:
1381 raise RuntimeError("This should never happen")
1383 # if we don't have something to work with, log a warning and return
1384 if not self.dataFound(data):
1385 self.log.warning(f"No EFD data found for {dayObs=}")
1386 return []
1388 # applies the data to the state machine, and generates events from the
1389 # series of states which results
1390 events = self._calculateEventsFromMergedData(
1391 data, dayObs, dataIsForCurrentDay=workingLive, addBlockInfo=addBlockInfo
1392 )
1393 if not events:
1394 self.log.warning(f"Failed to calculate any events for {dayObs=} despite EFD data existing!")
1395 return events
1397 @staticmethod
1398 def dataFound(data):
1399 """Check if any data was found.
1401 Parameters
1402 ----------
1403 data : `pd.DataFrame`
1404 The merged dataframe to check.
1406 Returns
1407 -------
1408 dataFound : `bool`
1409 Whether data was found.
1410 """
1411 # You can't just compare to with data == NO_DATA_SENTINEL because
1412 # `data` is usually a dataframe, and you can't compare a dataframe to a
1413 # string directly.
1414 return not (isinstance(data, str) and data == NO_DATA_SENTINEL)
1416 def _getEfdDataForDayObs(self, dayObs):
1417 """Get the EFD data for the specified dayObs and store it in the cache.
1419 Gets the EFD data for all components, as a dict of dataframes keyed by
1420 component name. These are then merged into a single dataframe in time
1421 order, based on each row's `private_efdStamp`. This is then stored in
1422 self._data[dayObs].
1424 If no data is found, the value is set to ``NO_DATA_SENTINEL`` to
1425 differentiate this from ``None``, as this is what you'd get if you
1426 queried the cache with `self._data.get(dayObs)`. It also marks that we
1427 have already queried this day.
1429 Parameters
1430 ----------
1431 dayObs : `int`
1432 The dayObs to query.
1433 """
1434 data = {}
1435 for component in itertools.chain(
1436 self._movingComponents, self._inPositionComponents, self._stateComponents
1437 ):
1438 data[component] = getEfdData(self.client, component, dayObs=dayObs, warn=False)
1439 self.log.debug(f"Found {len(data[component])} for {component}")
1441 if all(dataframe.empty for dataframe in data.values()):
1442 # if every single dataframe is empty, set the sentinel and don't
1443 # try to merge anything, otherwise merge all the data we found
1444 self.log.debug(f"No data found for {dayObs=}")
1445 # a sentinel value that's not None
1446 self._data[dayObs] = NO_DATA_SENTINEL
1447 else:
1448 merged = self._mergeData(data)
1449 self._data[dayObs] = merged
1451 def _calculateEventsFromMergedData(self, data, dayObs, dataIsForCurrentDay, addBlockInfo):
1452 """Calculate the list of events from the merged data.
1454 Runs the merged data, row by row, through the TMA state machine (with
1455 ``tma.apply``) to get the overall TMA state at each row, building a
1456 dict of these states, keyed by row number.
1458 This time-series of TMA states are then looped over (in
1459 `_statesToEventTuples`), building a list of tuples representing the
1460 start and end of each event, the type of the event, and the reason for
1461 the event ending.
1463 This list of tuples is then passed to ``_makeEventsFromStateTuples``,
1464 which actually creates the ``TMAEvent`` objects.
1466 Parameters
1467 ----------
1468 data : `pd.DataFrame`
1469 The merged dataframe to use.
1470 dayObs : `int`
1471 The dayObs for the data.
1472 dataIsForCurrentDay : `bool`
1473 Whether the data is for the current day. Determines whether to
1474 allow an open last event or not.
1475 addBlockInfo : `bool`
1476 Whether to add block information to the events. This allows
1477 skipping this step for speed when generating events for purposes
1478 which don't need block information.
1480 Returns
1481 -------
1482 events : `list` of `lsst.summit.utils.tmaUtils.TMAEvent`
1483 The events for the specified dayObs.
1484 """
1485 engineeringMode = True
1486 tma = TMAStateMachine(engineeringMode=engineeringMode)
1488 # For now, we assume that the TMA starts each day able to move, but
1489 # stationary. If this turns out to cause problems, we will need to
1490 # change to loading data from the previous day(s), and looking back
1491 # through it in time until a state change has been found for every
1492 # axis. For now though, Bruno et. al think this is acceptable and
1493 # preferable.
1494 _initializeTma(tma)
1496 tmaStates = {}
1497 for rowNum, row in data.iterrows():
1498 tma.apply(row)
1499 tmaStates[rowNum] = tma.state
1501 stateTuples = self._statesToEventTuples(tmaStates, dataIsForCurrentDay)
1502 events = self._makeEventsFromStateTuples(stateTuples, dayObs, data)
1503 if addBlockInfo:
1504 self.addBlockDataToEvents(dayObs, events)
1505 return events
1507 def _statesToEventTuples(self, states, dataIsForCurrentDay):
1508 """Get the event-tuples from the dictionary of TMAStates.
1510 Chunks the states into blocks of the same state, so that we can create
1511 an event for each block in `_makeEventsFromStateTuples`. Off-type
1512 states are skipped over, with each event starting when the telescope
1513 next resumes motion or changes to a different type of motion state,
1514 i.e. from non-tracking type movement (MOVE_POINT_TO_POINT, JOGGING,
1515 TRACKING-but-not-in-position, i.e. slewing) to a tracking type
1516 movement, or vice versa.
1518 Parameters
1519 ----------
1520 states : `dict` of `int` : `lsst.summit.utils.tmaUtils.TMAState`
1521 The states of the TMA, keyed by row number.
1522 dataIsForCurrentDay : `bool`
1523 Whether the data is for the current day. Determines whether to
1524 allow and open last event or not.
1526 Returns
1527 -------
1528 parsedStates : `list` of `tuple`
1529 The parsed states, as a list of tuples of the form:
1530 ``(eventStart, eventEnd, eventType, endReason)``
1531 """
1532 # Consider rewriting this with states as a list and using pop(0)?
1533 skipStates = (TMAState.STOPPED, TMAState.OFF, TMAState.FAULT)
1535 parsedStates = []
1536 eventStart = None
1537 rowNum = 0
1538 nRows = len(states)
1539 while rowNum < nRows:
1540 previousState = None
1541 state = states[rowNum]
1542 # if we're not in an event, fast forward through off-like rows
1543 # until a new event starts
1544 if eventStart is None and state in skipStates:
1545 rowNum += 1
1546 continue
1548 # we've started a new event, so walk through it and find the end
1549 eventStart = rowNum
1550 previousState = state
1551 rowNum += 1 # move to the next row before starting the while loop
1552 if rowNum == nRows:
1553 # we've reached the end of the data, and we're still in an
1554 # event, so don't return this presumably in-progress event
1555 self.log.warning("Reached the end of the data while starting a new event")
1556 break
1557 state = states[rowNum]
1558 while state == previousState:
1559 rowNum += 1
1560 if rowNum == nRows:
1561 break
1562 state = states[rowNum]
1563 parsedStates.append(
1564 self.ParsedState(
1565 eventStart=eventStart, eventEnd=rowNum, previousState=previousState, state=state
1566 )
1567 )
1568 if state in skipStates:
1569 eventStart = None
1571 # done parsing, just check the last event is valid
1572 if parsedStates: # ensure we have at least one event
1573 lastEvent = parsedStates[-1]
1574 if lastEvent.eventEnd == nRows:
1575 # Generally, you *want* the timespan for an event to be the
1576 # first row of the next event, because you were in that state
1577 # right up until that state change. However, if that event is
1578 # a) the last one of the day and b) runs right up until the end
1579 # of the dataframe, then there isn't another row, so this will
1580 # overrun the array.
1581 #
1582 # If the data is for the current day then this isn't a worry,
1583 # as we're likely still taking data, and this event will likely
1584 # close yet, so we don't issue a warning, and simply drop the
1585 # event from the list.
1587 # However, if the data is for a past day then no new data will
1588 # come to close the event, so allow the event to be "open", and
1589 # issue a warning
1590 if dataIsForCurrentDay:
1591 self.log.info("Discarding open (likely in-progess) final event from current day's events")
1592 parsedStates = parsedStates[:-1]
1593 else:
1594 self.log.warning("Last event ends open, forcing it to end at end of the day's data")
1595 # it's a tuple, so (deliberately) awkward to modify
1596 parsedStates[-1] = self.ParsedState(
1597 eventStart=lastEvent.eventStart,
1598 eventEnd=lastEvent.eventEnd - 1,
1599 previousState=lastEvent.previousState,
1600 state=lastEvent.state,
1601 )
1603 return parsedStates
1605 def addBlockDataToEvents(self, dayObs, events):
1606 """Find all the block data in the EFD for the specified events.
1608 Finds all the block data in the EFD relating to the events, parses it,
1609 from the rows of the dataframe, and adds it to the events in place.
1611 Parameters
1612 ----------
1613 events : `lsst.summit.utils.tmaUtils.TMAEvent` or
1614 `list` of `lsst.summit.utils.tmaUtils.TMAEvent`
1615 One or more events to get the block data for.
1616 """
1617 try:
1618 blockParser = BlockParser(dayObs, client=self.client)
1619 except Exception as e:
1620 # adding the block data should never cause a failure so if we can't
1621 # get the block data, log a warning and return. It is, however,
1622 # never expected, so use log.exception to get the full traceback
1623 # and scare users so it gets reported
1624 self.log.exception(f"Failed to parse block data for {dayObs=}, {e}")
1625 return
1626 blocks = blockParser.getBlockNums()
1627 blockDict = {}
1628 for block in blocks:
1629 blockDict[block] = blockParser.getSeqNums(block)
1631 for block, seqNums in blockDict.items():
1632 for seqNum in seqNums:
1633 blockInfo = blockParser.getBlockInfo(block=block, seqNum=seqNum)
1635 relatedEvents = blockParser.getEventsForBlock(events, block=block, seqNum=seqNum)
1636 for event in relatedEvents:
1637 toSet = [blockInfo]
1638 if event.blockInfos is not None:
1639 existingInfo = event.blockInfos
1640 existingInfo.append(blockInfo)
1641 toSet = existingInfo
1643 # Add the blockInfo to the TMAEvent. Because this is a
1644 # frozen dataclass, use object.__setattr__ to set the
1645 # attribute. This is the correct way to set a frozen
1646 # dataclass attribute after creation.
1647 object.__setattr__(event, "blockInfos", toSet)
1649 def _makeEventsFromStateTuples(self, states, dayObs, data):
1650 """For the list of state-tuples, create a list of ``TMAEvent`` objects.
1652 Given the underlying data, and the start/stop points for each event,
1653 create the TMAEvent objects for the dayObs.
1655 Parameters
1656 ----------
1657 states : `list` of `tuple`
1658 The parsed states, as a list of tuples of the form:
1659 ``(eventStart, eventEnd, eventType, endReason)``
1660 dayObs : `int`
1661 The dayObs for the data.
1662 data : `pd.DataFrame`
1663 The merged dataframe.
1665 Returns
1666 -------
1667 events : `list` of `lsst.summit.utils.tmaUtils.TMAEvent`
1668 The events for the specified dayObs.
1669 """
1670 seqNum = 0
1671 events = []
1672 for parsedState in states:
1673 begin = data.iloc[parsedState.eventStart]["private_efdStamp"]
1674 end = data.iloc[parsedState.eventEnd]["private_efdStamp"]
1675 beginAstropy = efdTimestampToAstropy(begin)
1676 endAstropy = efdTimestampToAstropy(end)
1677 duration = end - begin
1678 event = TMAEvent(
1679 dayObs=dayObs,
1680 seqNum=seqNum,
1681 type=parsedState.previousState,
1682 endReason=parsedState.state,
1683 duration=duration,
1684 begin=beginAstropy,
1685 end=endAstropy,
1686 blockInfos=[], # this is added later
1687 _startRow=parsedState.eventStart,
1688 _endRow=parsedState.eventEnd,
1689 )
1690 events.append(event)
1691 seqNum += 1
1692 return events
1694 @staticmethod
1695 def printTmaDetailedState(tma):
1696 """Print the full state of all the components of the TMA.
1698 Currently this is the azimuth and elevation axes' power and motion
1699 states, and their respective inPosition statuses.
1701 Parameters
1702 ----------
1703 tma : `lsst.summit.utils.tmaUtils.TMAStateMachine`
1704 The TMA state machine in the state we want to print.
1705 """
1706 axes = ["azimuth", "elevation"]
1707 p = tma._parts
1708 axisPad = len(max(axes, key=len)) # length of the longest axis string == 9 here, but this is general
1709 motionPad = max(len(s.name) for s in AxisMotionState)
1710 powerPad = max(len(s.name) for s in PowerState)
1712 # example output to show what's being done with the padding:
1713 # azimuth - Power: ON Motion: STOPPED InPosition: True # noqa: W505
1714 # elevation - Power: ON Motion: MOVING_POINT_TO_POINT InPosition: False # noqa: W505
1715 for axis in axes:
1716 print(
1717 f"{axis:>{axisPad}} - "
1718 f"Power: {p[f'{axis}SystemState'].name:>{powerPad}} "
1719 f"Motion: {p[f'{axis}MotionState'].name:>{motionPad}} "
1720 f"InPosition: {p[f'{axis}InPosition']}"
1721 )
1722 print(f"Overall system state: {tma.state.name}")
1724 def printFullDayStateEvolution(self, dayObs, taiOrUtc="utc"):
1725 """Print the full TMA state evolution for the specified dayObs.
1727 Replays all the data from the EFD for the specified dayObs through
1728 the TMA state machine, and prints both the overall and detailed state
1729 of the TMA for each row.
1731 Parameters
1732 ----------
1733 dayObs : `int`
1734 The dayObs for which to print the state evolution.
1735 taiOrUtc : `str`, optional
1736 Whether to print the timestamps in TAI or UTC. Default is UTC.
1737 """
1738 # create a fake event which spans the whole day, and then use
1739 # printEventDetails code while skipping the header to print the
1740 # evolution.
1741 _ = self.getEvents(dayObs) # ensure the data has been retrieved from the EFD
1742 data = self._data[dayObs]
1743 lastRowNum = len(data) - 1
1745 fakeEvent = TMAEvent(
1746 dayObs=dayObs,
1747 seqNum=-1, # anything will do
1748 type=TMAState.OFF, # anything will do
1749 endReason=TMAState.OFF, # anything will do
1750 duration=-1, # anything will do
1751 begin=efdTimestampToAstropy(data.iloc[0]["private_efdStamp"]),
1752 end=efdTimestampToAstropy(data.iloc[-1]["private_efdStamp"]),
1753 _startRow=0,
1754 _endRow=lastRowNum,
1755 )
1756 self.printEventDetails(fakeEvent, taiOrUtc=taiOrUtc, printHeader=False)
1758 def printEventDetails(self, event, taiOrUtc="tai", printHeader=True):
1759 """Print a detailed breakdown of all state transitions during an event.
1761 Note: this is not the most efficient way to do this, but it is much the
1762 cleanest with respect to the actual state machine application and event
1763 generation code, and is easily fast enough for the cases it will be
1764 used for. It is not worth complicating the normal state machine logic
1765 to try to use this code.
1767 Parameters
1768 ----------
1769 event : `lsst.summit.utils.tmaUtils.TMAEvent`
1770 The event to display the details of.
1771 taiOrUtc : `str`, optional
1772 Whether to display time strings in TAI or UTC. Defaults to TAI.
1773 Case insensitive.
1774 printHeader : `bool`, optional
1775 Whether to print the event summary. Defaults to True. The primary
1776 reason for the existence of this option is so that this same
1777 printing function can be used to show the evolution of a whole day
1778 by supplying a fake event which spans the whole day, but this event
1779 necessarily has a meaningless summary, and so needs suppressing.
1780 """
1781 taiOrUtc = taiOrUtc.lower()
1782 if taiOrUtc not in ["tai", "utc"]:
1783 raise ValueError(f"Got unsuppoted value for {taiOrUtc=}")
1784 useUtc = taiOrUtc == "utc"
1786 if printHeader:
1787 print(
1788 f"Details for {event.duration:.2f}s {event.type.name} event dayObs={event.dayObs}"
1789 f" seqNum={event.seqNum}:"
1790 )
1791 print(f"- Event began at: {event.begin.utc.isot if useUtc else event.begin.isot}")
1792 print(f"- Event ended at: {event.end.utc.isot if useUtc else event.end.isot}")
1794 dayObs = event.dayObs
1795 data = self._data[dayObs]
1796 startRow = event._startRow
1797 endRow = event._endRow
1798 nRowsToApply = endRow - startRow + 1
1799 print(f"\nTotal number of rows in the merged dataframe: {len(data)}")
1800 if printHeader:
1801 print(f"of which rows {startRow} to {endRow} (inclusive) relate to this event.")
1803 # reconstruct all the states
1804 tma = TMAStateMachine(engineeringMode=True)
1805 _initializeTma(tma)
1807 tmaStates = {}
1808 firstAppliedRow = True # flag to print a header on the first row that's applied
1809 for rowNum, row in data.iterrows(): # must replay rows right from start to get full correct state
1810 if rowNum == startRow:
1811 # we've not yet applied this row, so this is the state just
1812 # before event
1813 print(f"\nBefore the event the TMA was in state {tma.state.name}:")
1814 self.printTmaDetailedState(tma)
1816 if rowNum >= startRow and rowNum <= endRow:
1817 if firstAppliedRow: # only print this intro on the first row we're applying
1818 print(
1819 f"\nThen, applying the {nRowsToApply} rows of data for this event, the state"
1820 " evolved as follows:\n"
1821 )
1822 firstAppliedRow = False
1824 # break the row down and print its details
1825 rowFor = row["rowFor"]
1826 axis, rowType = getAxisAndType(rowFor) # e.g. elevation, MotionState
1827 value = tma._getRowPayload(row, rowType, rowFor)
1828 valueStr = f"{str(value) if isinstance(value, bool) else value.name}"
1829 rowTime = efdTimestampToAstropy(row["private_efdStamp"])
1830 print(
1831 f"On row {rowNum} the {axis} axis had the {rowType} set to {valueStr} at"
1832 f" {rowTime.utc.isot if useUtc else rowTime.isot}"
1833 )
1835 # then apply it as usual, printing the state right afterwards
1836 tma.apply(row)
1837 tmaStates[rowNum] = tma.state
1838 self.printTmaDetailedState(tma)
1839 print()
1841 else:
1842 # if it's not in the range of interest then just apply it
1843 # silently as usual
1844 tma.apply(row)
1845 tmaStates[rowNum] = tma.state
1847 def findEvent(self, time):
1848 """Find the event which contains the specified time.
1850 If the specified time lies within an event, that event is returned. If
1851 it is at the exact start, that is logged, and if that start point is
1852 shared by the end of the previous event, that is logged too. If the
1853 event lies between events, the events either side are logged, but
1854 ``None`` is returned. If the time lies before the first event of the
1855 day a warning is logged, as for times after the last event of the day.
1857 Parameters
1858 ----------
1859 time : `astropy.time.Time`
1860 The time.
1862 Returns
1863 -------
1864 event : `lsst.summit.utils.tmaUtils.TMAEvent` or `None`
1865 The event which contains the specified time, or ``None`` if the
1866 time doesn't fall during an event.
1867 """
1868 # there are five possible cases:
1869 # 1) the time lies before the first event of the day
1870 # 2) the time lies after the last event of the day
1871 # 3) the time lies within an event
1872 # 3a) the time is exactly at the start of an event
1873 # 3b) if so, time can be shared by the end of the previous event if
1874 # they are contiguous
1875 # 4) the time lies between two events
1876 # 5) the time is exactly at end of the last event of the day. This is
1877 # an issue because event end times are exclusive, so this time is
1878 # not technically in that event, it's the moment it closes (and if
1879 # there *was* an event which followed contiguously, it would be in
1880 # that event instead, which is what motivates this definition of
1881 # lies within what event)
1883 dayObs = getDayObsForTime(time)
1884 # we know this is on the right day, and definitely before the specified
1885 # time, but sanity check this before continuing as this needs to be
1886 # true for this to give the correct answer
1887 assert getDayObsStartTime(dayObs) <= time
1888 assert getDayObsEndTime(dayObs) > time
1890 # command start to many log messages so define once here
1891 logStart = f"Specified time {time.isot} falls on {dayObs=}"
1893 events = self.getEvents(dayObs)
1894 if len(events) == 0:
1895 self.log.warning(f"There are no events found for {dayObs}")
1896 return None
1898 # check case 1)
1899 if time < events[0].begin:
1900 self.log.warning(f"{logStart} and is before the first event of the day")
1901 return None
1903 # check case 2)
1904 if time > events[-1].end:
1905 self.log.warning(f"{logStart} and is after the last event of the day")
1906 return None
1908 # check case 5)
1909 if time == events[-1].end:
1910 self.log.warning(
1911 f"{logStart} and is exactly at the end of the last event of the day"
1912 f" (seqnum={events[-1].seqNum}). Because event intervals are half-open, this"
1913 " time does not technically lie in any event"
1914 )
1915 return None
1917 # we are now either in an event, or between events. Walk through the
1918 # events, and if the end of the event is after the specified time, then
1919 # we're either in it or past it, so check if we're in.
1920 for eventNum, event in enumerate(events):
1921 if event.end > time: # case 3) we are now into or past the right event
1922 # the event end encloses the time, so note the > and not >=,
1923 # this must be strictly greater, we check the overlap case
1924 # later
1925 if time >= event.begin: # we're fully inside the event, so return it.
1926 # 3a) before returning, check if we're exactly at the start
1927 # of the event, and if so, log it. Then 3b) also check if
1928 # we're at the exact end of the previous event, and if so,
1929 # log that too.
1930 if time == event.begin:
1931 self.log.info(f"{logStart} and is exactly at the start of event" f" {eventNum}")
1932 if eventNum == 0: # I think this is actually impossible, but check anyway
1933 return event # can't check the previous event so return here
1934 previousEvent = events[eventNum - 1]
1935 if previousEvent.end == time:
1936 self.log.info(
1937 "Previous event is contiguous, so this time is also at the exact"
1938 f" end of {eventNum - 1}"
1939 )
1940 return event
1941 else: # case 4)
1942 # the event end is past the time, but it's not inside the
1943 # event, so we're between events. Log which we're between
1944 # and return None
1945 previousEvent = events[eventNum - 1]
1946 timeAfterPrev = (time - previousEvent.end).to_datetime()
1947 naturalTimeAfterPrev = humanize.naturaldelta(timeAfterPrev, minimum_unit="MICROSECONDS")
1948 timeBeforeCurrent = (event.begin - time).to_datetime()
1949 naturalTimeBeforeCurrent = humanize.naturaldelta(
1950 timeBeforeCurrent, minimum_unit="MICROSECONDS"
1951 )
1952 self.log.info(
1953 f"{logStart} and lies"
1954 f" {naturalTimeAfterPrev} after the end of event {previousEvent.seqNum}"
1955 f" and {naturalTimeBeforeCurrent} before the start of event {event.seqNum}."
1956 )
1957 return None
1959 raise RuntimeError(
1960 "Event finding logic fundamentally failed, which should never happen - the code" " needs fixing"
1961 )