Coverage for python/lsst/analysis/tools/interfaces/datastore/_dispatcher.py: 13%
275 statements
« prev ^ index » next coverage.py v7.2.5, created at 2023-05-04 11:09 +0000
« prev ^ index » next coverage.py v7.2.5, created at 2023-05-04 11:09 +0000
1# This file is part of analysis_tools.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
22from __future__ import annotations
24__all__ = ("SasquatchDispatchPartialFailure", "SasquatchDispatchFailure", "SasquatchDispatcher")
26"""Sasquatch datastore"""
27import calendar
28import datetime
29import json
30import logging
31import math
32import re
33from collections.abc import Mapping, MutableMapping, Sequence
34from dataclasses import dataclass
35from typing import TYPE_CHECKING, Any, cast
36from uuid import UUID, uuid4
38import requests
39from lsst.daf.butler import DatasetRef
40from lsst.resources import ResourcePath
41from lsst.utils.packages import getEnvironmentPackages
43if TYPE_CHECKING: 43 ↛ 44line 43 didn't jump to line 44, because the condition on line 43 was never true
44 from .. import MetricMeasurementBundle
47log = logging.getLogger(__name__)
49# Constants assocated with SasquatchDispatcher
50PARTITIONS = 1
51REPLICATION_FACTOR = 3
53IDENTIFIER_KEYS = [
54 "detector",
55 "patch",
56 "skymap",
57 "visit",
58 "tract",
59 "physical_filter",
60 "instrument",
61 "band",
62 "exposure",
63]
66class SasquatchDispatchPartialFailure(RuntimeError):
67 """This indicates that a Sasquatch dispatch was partially successful."""
69 pass
72class SasquatchDispatchFailure(RuntimeError):
73 """This indicates that dispatching a
74 `~lsst.analysis.tool.interface.MetricMeasurementBundle` failed.
75 """
77 pass
80def _tag2VersionTime(productStr: str) -> tuple[str, float]:
81 """Determine versions and dates from the string returned from
82 getEnvironmentPackages.
84 The `~lsst.utils.packages.genEnvironmentPackages` function returns the
85 setup version associated with a product, along with a list of tags that
86 have been added to it.
88 This method splits up that return string, and determines the earliest date
89 associated with the setup package version.
91 Parameters
92 ----------
93 productStr : `str`
94 The product string returned from a lookup on the result of a call to
95 `~lsst.utils.packages.getEnvironmentPackages`.
97 Returns
98 -------
99 result : `tuple` of `str`, `datetime.datetime`
100 The first `str` is the version of the package, and the second is the
101 datetime object associated with that released version.
103 Raises
104 ------
105 ValueError
106 Raised if there are no tags which correspond to dates.
107 """
108 times: list[datetime.datetime] = []
109 version = productStr.split()[0]
110 tags: str = re.findall("[(](.*)[)]", productStr)[0]
111 for tag in tags.split():
112 numDots = tag.count(".")
113 numUnder = tag.count("_")
114 separator = "_"
115 if numDots > numUnder:
116 separator = "."
117 match tag.split(separator):
118 # Daily tag branch.
119 case ("d", year, month, day):
120 dt = datetime.datetime(year=int(year), month=int(month), day=int(day))
121 # Weekly tag branch.
122 case ("w", year, week):
123 iyear = int(year)
124 iweek = int(week)
125 # Use 4 as the day because releases are available starting
126 # on Thursday
127 dayOfWeek = 4
129 # Find the first week to contain a thursday in it
130 cal = calendar.Calendar()
131 cal.setfirstweekday(6)
132 i = 0
133 for i, iterWeek in enumerate(cal.monthdatescalendar(iyear, 1)):
134 if iterWeek[dayOfWeek].month == 1:
135 break
136 # Handle fromisocalendar not being able to handle week 53
137 # in the case were the date was going to subtract 7 days anyway
138 if i and iweek == 53:
139 i = 0
140 iweek = 52
141 delta = datetime.timedelta(days=7 * i)
143 # Correct for a weekly being issued in the last week of the
144 # previous year, as Thursdays don't always line up evenly in
145 # a week / year split.
146 dt = datetime.datetime.fromisocalendar(iyear, iweek, dayOfWeek) - delta
147 # Skip tags that can't be understood.
148 case _:
149 continue
150 times.append(dt)
151 if len(times) == 0:
152 raise ValueError("Could not find any tags corresponding to dates")
153 minTime = min(times)
154 minTime.replace(tzinfo=datetime.timezone.utc)
155 return version, minTime.timestamp()
158@dataclass
159class SasquatchDispatcher:
160 """This class mediates the transfer of MetricMeasurementBundles to a
161 Sasquatch http kafka proxy server.
162 """
164 url: str
165 """Url of the Sasquatch proxy server"""
167 token: str
168 """Authentication token used in communicating with the proxy server"""
170 namespace: str = "lsst.debug"
171 """The namespace in Sasquatch in which to write the uploaded metrics"""
173 def __post_init__(self) -> None:
174 match ResourcePath(self.url).scheme:
175 case "http" | "https":
176 pass
177 case _:
178 raise ValueError("Proxy server must be locatable with either http or https")
180 self._cluster_id: str | None = None
182 @property
183 def clusterId(self) -> str:
184 """ClusterId of the Kafka proxy
186 Notes
187 -----
188 The cluster Id will be fetched with a network call if it is not
189 already cached.
190 """
191 if self._cluster_id is None:
192 self._populateClusterId()
193 return cast(str, self._cluster_id)
195 def _populateClusterId(self) -> None:
196 """Get Sasquatch kafka cluster ID."""
198 headers = {"content-type": "application/json"}
199 r = requests.get(f"{self.url}/v3/clusters", headers=headers)
201 if r.status_code == requests.codes.ok:
202 cluster_id = r.json()["data"][0]["cluster_id"]
204 self._cluster_id = str(cluster_id)
205 else:
206 log.error("Could not retrieve the cluster id for the specified url")
207 raise SasquatchDispatchFailure("Could not retrieve the cluster id for the specified url")
209 def _create_topic(self, topic_name: str) -> bool:
210 """Create a kafka topic in Sasquatch.
212 Parameters
213 ----------
214 topic_name : `str`
215 The name of the kafka topic to create
217 returns : `bool`
218 If this does not encounter an error it will return a True success
219 code, else it will return a False code.
221 """
223 headers = {"content-type": "application/json"}
225 topic_config = {
226 "topic_name": f"{self.namespace}.{topic_name}",
227 "partitions_count": PARTITIONS,
228 "replication_factor": REPLICATION_FACTOR,
229 }
231 r = requests.post(
232 f"{self.url}/v3/clusters/{self.clusterId}/topics", json=topic_config, headers=headers
233 )
235 if r.status_code == requests.codes.created:
236 log.debug("Created topic %s.%s", self.namespace, topic_name)
237 return True
238 elif r.status_code == requests.codes.bad_request:
239 log.debug("Topic %s.%s already exists.", self.namespace, topic_name)
240 return True
241 else:
242 log.error("Uknown error occured creating kafka topic %s %s", r.status_code, r.json())
243 return False
245 def _generateAvroSchema(self, metric: str, record: MutableMapping[str, Any]) -> tuple[str, bool]:
246 """Infer the Avro schema from the record payload.
248 Parameters
249 ----------
250 metric : `str`
251 The name of the metric
252 record : `MutableMapping`
253 The prepared record for which a schema is to be generated
255 Returns
256 -------
257 resultSchema : `str`
258 A json encoded string of the resulting avro schema
259 errorCode : bool
260 A boolean indicating if any record fields had to be trimmed because
261 a suitable schema could not be generated. True if records were
262 removed, False otherwise.
263 """
264 schema: dict[str, Any] = {"type": "record", "namespace": self.namespace, "name": metric}
266 # Record if any records needed to be trimmed
267 resultsTrimmed = False
269 fields = list()
270 # If avro schemas cant be generated for values, they should be removed
271 # from the records.
272 keysToRemove: list[str] = []
273 for key in record:
274 value = record[key]
275 avroType: Mapping[str, Any]
276 if "timestamp" in key:
277 avroType = {"type": "double"}
278 else:
279 avroType = self._python2Avro(value)
280 if len(avroType) == 0:
281 continue
282 if avroType.get("error_in_conversion"):
283 keysToRemove.append(key)
284 resultsTrimmed = True
285 continue
286 fields.append({"name": key, **avroType})
288 # remove any key that failed to have schema generated
289 for key in keysToRemove:
290 record.pop(key)
292 schema["fields"] = fields
294 return json.dumps(schema), resultsTrimmed
296 def _python2Avro(self, value: Any) -> Mapping:
297 """Map python type to avro schema
299 Parameters
300 ----------
301 value : `Any`
302 Any python parameter.
304 Returns
305 -------
306 result : `Mapping`
307 Return a mapping that represents an entry in an avro schema.
308 """
309 match value:
310 case float() | None:
311 return {"type": "float", "default": 0.0}
312 case str():
313 return {"type": "string", "default": ""}
314 case int():
315 return {"type": "int", "default": 0}
316 case Sequence():
317 tmp = {self._python2Avro(item)["type"] for item in value}
318 if len(tmp) == 0:
319 return {}
320 if len(tmp) > 1:
321 log.error(
322 "Sequence contains mixed types: %s, must be homogeneous for avro conversion "
323 "skipping record",
324 tmp,
325 )
326 return {"error_in_conversion": True}
327 return {"type": "array", "items": tmp.pop()}
328 case _:
329 log.error("Unsupported type %s, skipping record", type(value))
330 return {}
332 def _handleReferencePackage(self, meta: MutableMapping, bundle: MetricMeasurementBundle) -> None:
333 """Check to see if there is a reference package.
335 if there is a reference package, determine the datetime associated with
336 this reference package. Save the package, the version, and the date to
337 the common metric fields.
339 Parameters
340 ----------
341 meta : `MutableMapping`
342 A mapping which corresponds to fields which should be encoded in
343 all records.
344 bundle : `MetricMeasurementBundle`
345 The bundled metrics
346 """
347 package_version, package_timestamp = "", 0.0
348 if ref_package := getattr(bundle, "reference_package", ""):
349 ref_package = bundle.reference_package
350 packages = getEnvironmentPackages(True)
351 if package_info := packages.get(ref_package):
352 try:
353 package_version, package_timestamp = _tag2VersionTime(package_info)
354 except ValueError:
355 # Could not extract package timestamp leaving empty
356 pass
357 meta["reference_package"] = ref_package
358 meta["reference_package_version"] = package_version
359 meta["reference_package_timestamp"] = package_timestamp
361 def _handleTimes(self, meta: MutableMapping, bundle: MetricMeasurementBundle, run: str) -> None:
362 """Add times to the meta fields mapping.
364 Add all appropriate timestamp fields to the meta field mapping. These
365 will be added to all records.
367 This method will also look at the bundle to see if it defines a
368 preferred time. It so it sets that time as the main time stamp to be
369 used for this record.
371 Parameters
372 ----------
373 meta : `MutableMapping`
374 A mapping which corresponds to fields which should be encoded in
375 all records.
376 bundle : `MetricMeasurementBundle`
377 The bundled metrics
378 run : `str`
379 The `~lsst.daf.butler.Butler` collection where the
380 `MetricMeasurementBundle` is stored.
381 """
382 # Determine the timestamp associated with the run, if someone abused
383 # the run collection, use the current timestamp
384 if re.match(r"\d{8}T\d{6}Z", stamp := run.split("/")[-1]):
385 run_timestamp = datetime.datetime.strptime(stamp, r"%Y%m%dT%H%M%S%z")
386 else:
387 run_timestamp = datetime.datetime.now()
388 meta["run_timestamp"] = run_timestamp.timestamp()
390 # If the bundle supports supplying timestamps, dispatch on the type
391 # specified.
392 if hasattr(bundle, "timestamp_version") and bundle.timestamp_version:
393 match bundle.timestamp_version:
394 case "reference_package_timestamp":
395 if not meta["reference_package_timestamp"]:
396 log.error("Reference package timestamp is empty, using run_timestamp")
397 meta["timestamp"] = meta["run_timestamp"]
398 else:
399 meta["timestamp"] = meta["reference_package_timestamp"]
400 case "run_timestamp":
401 meta["timestamp"] = meta["run_timestamp"]
402 case "current_timestamp":
403 timeStamp = datetime.datetime.now()
404 meta["timestamp"] = timeStamp.timestamp()
405 case "dataset_timestamp":
406 log.error("dataset timestamps are not yet supported, run_timestamp will be used")
407 meta["timestamp"] = meta["run_timestamp"]
408 case _:
409 log.error(
410 "Timestamp version %s is not supported, run_timestamp will be used",
411 bundle.timestamp_version,
412 )
413 meta["timestamp"] = meta["run_timestamp"]
414 # Default to using the run_timestamp.
415 else:
416 meta["timestamp"] = meta["run_timestamp"]
418 def _handleIdentifier(
419 self,
420 meta: MutableMapping,
421 identifierFields: Mapping[str, Any] | None,
422 datasetIdentifier: str | None,
423 bundle: MetricMeasurementBundle,
424 ) -> None:
425 """Add an identifier to the meta record mapping.
427 If the bundle declares a dataset identifier to use add that to the
428 record, otherwise use 'Generic' as the identifier. If the
429 datasetIdentifier parameter is specified, that is used instead of
430 anything specified by the bundle.
432 This will also add any identifier fields supplied to the meta record
433 mapping.
435 Together these values (in addition to the timestamp and topic) should
436 uniquely identify an upload to the Sasquatch system.
438 Parameters
439 ----------
440 meta : `MutableMapping`
441 A mapping which corresponds to fields which should be encoded in
442 all records.
443 identifierFields: `Mapping` or `None`
444 The keys and values in this mapping will be both added as fields
445 in the record, and used in creating a unique tag for the uploaded
446 dataset type. I.e. the timestamp, and the tag will be unique, and
447 each record will belong to one combination of such.
448 datasetIdentifier : `str`
449 A string which will be used in creating unique identifier tags.
450 bundle : `MetricMeasurementBundle`
451 The bundle containing metric values to upload.
452 """
453 identifier: str
454 if datasetIdentifier is not None:
455 identifier = datasetIdentifier
456 elif hasattr(bundle, "datasetIdentifier") and bundle.datasetIdentifier is not None:
457 identifier = bundle.datasetIdentifier
458 else:
459 identifier = "Generic"
461 meta["dataset_tag"] = identifier
463 if identifierFields is None:
464 identifierFields = {}
465 for key in IDENTIFIER_KEYS:
466 value = identifierFields.get(key, "")
467 meta[key] = f"{value}"
469 def _prepareBundle(
470 self,
471 bundle: MetricMeasurementBundle,
472 run: str,
473 datasetType: str,
474 timestamp: datetime.datetime | None = None,
475 id: UUID | None = None,
476 identifierFields: Mapping | None = None,
477 datasetIdentifier: str | None = None,
478 extraFields: Mapping | None = None,
479 ) -> tuple[Mapping[str, list[Any]], bool]:
480 """Encode all of the inputs into a format that can be sent to the
481 kafka proxy server.
483 Parameters
484 ----------
485 bundle : `MetricMeasurementBundle`
486 The bundle containing metric values to upload.
487 run : `str`
488 The run name to associate with these metric values. If this bundle
489 is also stored in the butler, this should be the butler run
490 collection the bundle is stored in the butler.
491 datasetType : `str`
492 The dataset type name associated with this
493 `MetricMeasurementBundle`
494 timestamp : `str` or `None`
495 The timestamp to be associated with the measurements in the ingress
496 database. If this value is None, timestamp will be set by the run
497 time or current time.
498 id : `UUID` or `None`
499 The UUID of the `MetricMeasurementBundle` within the butler. If
500 `None`, a new random UUID will be generated so that each record in
501 Sasquatch will have a unique value.
502 datasetIdentifier : `str`
503 A string which will be used in creating unique identifier tags.
504 identifierFields: `Mapping` or `None`
505 The keys and values in this mapping will be both added as fields
506 in the record, and used in creating a unique tag for the uploaded
507 dataset type. I.e. the timestamp, and the tag will be unique, and
508 each record will belong to one combination of such.
509 extraFields: `Mapping`
510 Extra mapping keys and values that will be added as fields to the
511 dispatched record.
513 Returns
514 -------
515 result : `Mapping` of `str` to `list`
516 A mapping of metric name of list of metric measurement records.
517 status : `bool`
518 A status boolean indicating if some records had to be skipped due
519 to a problem parsing the bundle.
520 """
521 if id is None:
522 id = uuid4()
523 sid = str(id)
524 meta: dict[str, Any] = dict()
526 # Add other associated common fields
527 meta["id"] = sid
528 meta["run"] = run
529 meta["dataset_type"] = datasetType
531 # Check to see if the bundle declares a reference package
532 self._handleReferencePackage(meta, bundle)
534 # Handle the various timestamps that could be associated with a record
535 self._handleTimes(meta, bundle, run)
537 # Always use the supplied timestamp if one was passed to use.
538 if timestamp is not None:
539 meta["timestamp"] = timestamp.timestamp()
541 self._handleIdentifier(meta, identifierFields, datasetIdentifier, bundle)
543 # Add in any other fields that were supplied to the function call.
544 if extraFields is not None:
545 meta.update(extraFields)
547 metricRecords: dict[str, list[Any]] = dict()
549 # Record if any records needed skipped
550 resultsTrimmed = False
552 # Look at each of the metrics in the bundle (name, values)
553 for metric, measurements in bundle.items():
554 # Create a list which will contain the records for each measurement
555 # associated with metric.
556 metricRecordList = metricRecords.setdefault(metric, list())
558 record: dict[str, Any] = meta.copy()
560 # loop over each metric measurement within the metric
561 for measurement in measurements:
562 # need to extract any tags, package info, etc
563 note_key = f"{measurement.metric_name.metric}.metric_tags"
564 record["tags"] = dict(measurement.notes.items()).get(note_key, list())
566 # Missing values are replaced by 0 in sasquatch, see RFC-763.
567 name = ""
568 value = 0.0
569 match measurement.json:
570 case {"metric": name, "value": None}:
571 pass
572 case {"metric": name, "value": value}:
573 if math.isnan(value):
574 log.error(
575 "Measurement %s had a value that is a NaN, dispatch will be skipped",
576 measurement,
577 )
578 resultsTrimmed = True
579 continue
580 pass
581 case {"value": _}:
582 log.error("Measurement %s does not contain the key 'metric'", measurement)
583 resultsTrimmed = True
584 continue
585 case {"metric": _}:
586 log.error("Measurement %s does not contain the key 'value'", measurement)
587 resultsTrimmed = True
588 continue
589 record[name] = value
591 metricRecordList.append({"value": record})
592 return metricRecords, resultsTrimmed
594 def dispatch(
595 self,
596 bundle: MetricMeasurementBundle,
597 run: str,
598 datasetType: str,
599 timestamp: datetime.datetime | None = None,
600 id: UUID | None = None,
601 datasetIdentifier: str | None = None,
602 identifierFields: Mapping | None = None,
603 extraFields: Mapping | None = None,
604 ) -> None:
605 """Dispatch a `MetricMeasurementBundle` to Sasquatch.
607 Parameters
608 ----------
609 bundle : `MetricMeasurementBundle`
610 The bundle containing metric values to upload.
611 run : `str`
612 The run name to associate with these metric values. If this bundle
613 is also stored in the butler, this should be the butler run
614 collection the bundle is stored in the butler. This will be used
615 in generating uniqueness constraints in Sasquatch.
616 datasetType : `str`
617 The dataset type name associated with this
618 `MetricMeasurementBundle`.
619 timestamp : `str` or `None`
620 The timestamp to be associated with the measurements in the ingress
621 database. If this value is None, timestamp will be set by the run
622 time or current time.
623 id : `UUID` or `None`
624 The UUID of the `MetricMeasurementBundle` within the Butler. If
625 `None`, a new random UUID will be generated so that each record in
626 Sasquatch will have a unique value.
627 datasetIdentifier : `str` or `None`
628 A string which will be used in creating unique identifier tags. If
629 `None`, a default value will be inserted.
630 identifierFields: `Mapping` or `None`
631 The keys and values in this mapping will be both added as fields
632 in the record, and used in creating a unique tag for the uploaded
633 dataset type. I.e. the timestamp, and the tag will be unique, and
634 each record will belong to one combination of such. Examples of
635 entries would be things like visit or tract.
636 extraFields: `Mapping`
637 Extra mapping keys and values that will be added as fields to the
638 dispatched record.
640 Raises
641 ------
642 SasquatchDispatchPartialFailure
643 Raised if there were any errors in dispatching a bundle.
644 """
645 if id is None:
646 id = uuid4()
648 # Prepare the bundle by transforming it to a list of metric records
649 metricRecords, recordsTrimmed = self._prepareBundle(
650 bundle=bundle,
651 run=run,
652 datasetType=datasetType,
653 timestamp=timestamp,
654 id=id,
655 datasetIdentifier=datasetIdentifier,
656 identifierFields=identifierFields,
657 extraFields=extraFields,
658 )
660 headers = {"content-type": "application/vnd.kafka.avro.v2+json"}
661 data: dict[str, Any] = dict()
662 partialUpload = False
663 uploadFailed = []
665 for metric, record in metricRecords.items():
666 # create the kafka topic if it does not already exist
667 if not self._create_topic(metric):
668 log.error("Topic not created, skipping dispatch of %s", metric)
669 continue
670 recordValue = record[0]["value"]
671 # Generate schemas for each record
672 data["value_schema"], schemaTrimmed = self._generateAvroSchema(metric, recordValue)
673 data["records"] = record
675 if schemaTrimmed:
676 partialUpload = True
678 r = requests.post(f"{self.url}/topics/{self.namespace}.{metric}", json=data, headers=headers)
680 if r.status_code == requests.codes.ok:
681 log.debug("Succesfully sent data for metric %s", metric)
682 uploadFailed.append(False)
683 else:
684 log.error(
685 "There was a problem submitting the metric %s: %s, %s", metric, r.status_code, r.json()
686 )
687 uploadFailed.append(True)
688 partialUpload = True
690 if all(uploadFailed):
691 raise SasquatchDispatchFailure("All records were unable to be uploaded.")
693 if partialUpload or recordsTrimmed:
694 raise SasquatchDispatchPartialFailure("One or more records may not have been uploaded entirely")
696 def dispatchRef(
697 self,
698 bundle: MetricMeasurementBundle,
699 ref: DatasetRef,
700 timestamp: datetime.datetime | None = None,
701 extraFields: Mapping | None = None,
702 datasetIdentifier: str | None = None,
703 ) -> None:
704 """Dispatch a `MetricMeasurementBundle` to Sasquatch with a known
705 `DatasetRef`.
707 Parameters
708 ----------
709 bundle : `MetricMeasurementBundle`
710 The bundle containing metric values to upload.
711 ref : `DatasetRef`
712 The `Butler` dataset ref corresponding to the input
713 `MetricMeasurementBundle`.
714 timestamp : `str` or `None`
715 The timestamp to be associated with the measurements in the ingress
716 database. If this value is None, timestamp will be set by the run
717 time or current time.
718 extraFields: `Mapping` or `None`
719 Extra mapping keys and values that will be added as fields to the
720 dispatched record if not None.
721 datasetIdentifier : `str` or `None`
722 A string which will be used in creating unique identifier tags. If
723 None, a default value will be inserted.
725 Raises
726 ------
727 SasquatchDispatchPartialFailure
728 Raised if there were any errors in dispatching a bundle.
729 """
730 # Parse the relevant info out of the dataset ref.
731 serializedRef = ref.to_simple()
732 id = serializedRef.id
733 if serializedRef.run is None:
734 run = "<unknown>"
735 else:
736 run = serializedRef.run
737 dstype = serializedRef.datasetType
738 datasetType = dstype.name if dstype is not None else ""
739 dataRefMapping = serializedRef.dataId.dataId if serializedRef.dataId else None
741 self.dispatch(
742 bundle,
743 run=run,
744 timestamp=timestamp,
745 datasetType=datasetType,
746 id=id,
747 identifierFields=dataRefMapping,
748 extraFields=extraFields,
749 datasetIdentifier=datasetIdentifier,
750 )