Coverage for python/lsst/ctrl/bps/htcondor/htcondor_service.py: 1%
674 statements
« prev ^ index » next coverage.py v6.4.2, created at 2022-07-15 03:01 -0700
« prev ^ index » next coverage.py v6.4.2, created at 2022-07-15 03:01 -0700
1# This file is part of ctrl_bps_htcondor.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
22"""Interface between generic workflow to HTCondor workflow system.
23"""
25__all__ = ["HTCondorService", "HTCondorWorkflow"]
28import logging
29import os
30import re
31from collections import defaultdict
32from enum import IntEnum, auto
33from pathlib import Path
35import htcondor
36from lsst.ctrl.bps import (
37 BaseWmsService,
38 BaseWmsWorkflow,
39 GenericWorkflow,
40 GenericWorkflowJob,
41 WmsJobReport,
42 WmsRunReport,
43 WmsStates,
44)
45from lsst.ctrl.bps.bps_utils import chdir, create_count_summary
46from lsst.utils.timer import time_this
47from packaging import version
49from .lssthtc import (
50 MISSING_ID,
51 HTCDag,
52 HTCJob,
53 JobStatus,
54 NodeStatus,
55 condor_q,
56 condor_search,
57 condor_status,
58 htc_backup_files,
59 htc_check_dagman_output,
60 htc_create_submit_from_cmd,
61 htc_create_submit_from_dag,
62 htc_create_submit_from_file,
63 htc_escape,
64 htc_submit_dag,
65 htc_version,
66 pegasus_name_to_label,
67 read_dag_info,
68 read_dag_log,
69 read_dag_status,
70 read_node_status,
71 summary_from_dag,
72 write_dag_info,
73)
76class WmsIdType(IntEnum):
77 """Type of valid WMS ids."""
79 UNKNOWN = auto()
80 """The type of id cannot be determined.
81 """
83 LOCAL = auto()
84 """The id is HTCondor job's ClusterId (with optional '.ProcId').
85 """
87 GLOBAL = auto()
88 """Id is a HTCondor's global job id.
89 """
91 PATH = auto()
92 """Id is a submission path.
93 """
96DEFAULT_HTC_EXEC_PATT = ".*worker.*"
97"""Default pattern for searching execute machines in an HTCondor pool.
98"""
100_LOG = logging.getLogger(__name__)
103class HTCondorService(BaseWmsService):
104 """HTCondor version of WMS service."""
106 def prepare(self, config, generic_workflow, out_prefix=None):
107 """Convert generic workflow to an HTCondor DAG ready for submission.
109 Parameters
110 ----------
111 config : `lsst.ctrl.bps.BpsConfig`
112 BPS configuration that includes necessary submit/runtime
113 information.
114 generic_workflow : `lsst.ctrl.bps.GenericWorkflow`
115 The generic workflow (e.g., has executable name and arguments).
116 out_prefix : `str`
117 The root directory into which all WMS-specific files are written.
119 Returns
120 -------
121 workflow : `lsst.ctrl.bps.wms.htcondor.HTCondorWorkflow`
122 HTCondor workflow ready to be run.
123 """
124 _LOG.debug("out_prefix = '%s'", out_prefix)
125 with time_this(log=_LOG, level=logging.INFO, prefix=None, msg="Completed HTCondor workflow creation"):
126 workflow = HTCondorWorkflow.from_generic_workflow(
127 config,
128 generic_workflow,
129 out_prefix,
130 f"{self.__class__.__module__}." f"{self.__class__.__name__}",
131 )
133 with time_this(
134 log=_LOG, level=logging.INFO, prefix=None, msg="Completed writing out HTCondor workflow"
135 ):
136 workflow.write(out_prefix)
137 return workflow
139 def submit(self, workflow):
140 """Submit a single HTCondor workflow.
142 Parameters
143 ----------
144 workflow : `lsst.ctrl.bps.BaseWorkflow`
145 A single HTCondor workflow to submit. run_id is updated after
146 successful submission to WMS.
147 """
148 dag = workflow.dag
150 ver = version.parse(htc_version())
151 if ver >= version.parse("8.9.3"):
152 sub = htc_create_submit_from_dag(dag.graph["dag_filename"], {})
153 else:
154 sub = htc_create_submit_from_cmd(dag.graph["dag_filename"], {})
156 # For workflow portability, internal paths are all relative. Hence
157 # the DAG needs to be submitted to HTCondor from inside the submit
158 # directory.
159 with chdir(workflow.submit_path):
160 _LOG.info("Submitting from directory: %s", os.getcwd())
161 schedd_dag_info = htc_submit_dag(sub)
162 if schedd_dag_info:
163 write_dag_info(f"{dag.name}.info.json", schedd_dag_info)
165 _, dag_info = schedd_dag_info.popitem()
166 _, dag_ad = dag_info.popitem()
168 dag.run_id = f"{dag_ad['ClusterId']}.{dag_ad['ProcId']}"
169 workflow.run_id = dag.run_id
170 else:
171 raise RuntimeError("Submission failed: unable to retrieve DAGMan job information")
173 def restart(self, wms_workflow_id):
174 """Restart a failed DAGMan workflow.
176 Parameters
177 ----------
178 wms_workflow_id : `str`
179 The directory with HTCondor files.
181 Returns
182 -------
183 run_id : `str`
184 HTCondor id of the restarted DAGMan job. If restart failed, it will
185 be set to None.
186 run_name : `str`
187 Name of the restarted workflow. If restart failed, it will be set
188 to None.
189 message : `str`
190 A message describing any issues encountered during the restart.
191 If there were no issues, an empty string is returned.
192 """
193 wms_path = Path(wms_workflow_id)
194 if not wms_path.is_dir():
195 return None, None, f"Directory '{wms_path}' not found"
197 _LOG.info("Restarting workflow from directory '%s'", wms_path)
198 rescue_dags = list(wms_path.glob("*.dag.rescue*"))
199 if not rescue_dags:
200 return None, None, f"HTCondor rescue DAG(s) not found in '{wms_path}'"
202 _LOG.info("Verifying that the workflow is not already in the job queue")
203 schedd_dag_info = condor_q(constraint=f'regexp("dagman$", Cmd) && Iwd == "{wms_workflow_id}"')
204 if schedd_dag_info:
205 _, dag_info = schedd_dag_info.popitem()
206 _, dag_ad = dag_info.popitem()
207 id_ = dag_ad["GlobalJobId"]
208 return None, None, f"Workflow already in the job queue (global job id: '{id_}')"
210 _LOG.info("Checking execution status of the workflow")
211 warn = False
212 dag_ad = read_dag_status(str(wms_path))
213 if dag_ad:
214 nodes_total = dag_ad.get("NodesTotal", 0)
215 if nodes_total != 0:
216 nodes_done = dag_ad.get("NodesDone", 0)
217 if nodes_total == nodes_done:
218 return None, None, "All jobs in the workflow finished successfully"
219 else:
220 warn = True
221 else:
222 warn = True
223 if warn:
224 _LOG.warning(
225 "Cannot determine the execution status of the workflow, continuing with restart regardless"
226 )
228 _LOG.info("Backing up select HTCondor files from previous run attempt")
229 htc_backup_files(wms_path, subdir="backups")
231 # For workflow portability, internal paths are all relative. Hence
232 # the DAG needs to be resubmitted to HTCondor from inside the submit
233 # directory.
234 _LOG.info("Adding workflow to the job queue")
235 run_id, run_name, message = None, None, ""
236 with chdir(wms_path):
237 try:
238 dag_path = next(wms_path.glob("*.dag.condor.sub"))
239 except StopIteration:
240 message = f"DAGMan submit description file not found in '{wms_path}'"
241 else:
242 sub = htc_create_submit_from_file(dag_path.name)
243 schedd_dag_info = htc_submit_dag(sub)
245 # Save select information about the DAGMan job to a file. Use
246 # the run name (available in the ClassAd) as the filename.
247 if schedd_dag_info:
248 dag_info = next(iter(schedd_dag_info.values()))
249 dag_ad = next(iter(dag_info.values()))
250 write_dag_info(f"{dag_ad['bps_run']}.info.json", schedd_dag_info)
251 run_id = f"{dag_ad['ClusterId']}.{dag_ad['ProcId']}"
252 run_name = dag_ad["bps_run"]
253 else:
254 message = "DAGMan job information unavailable"
256 return run_id, run_name, message
258 def list_submitted_jobs(self, wms_id=None, user=None, require_bps=True, pass_thru=None, is_global=False):
259 """Query WMS for list of submitted WMS workflows/jobs.
261 This should be a quick lookup function to create list of jobs for
262 other functions.
264 Parameters
265 ----------
266 wms_id : `int` or `str`, optional
267 Id or path that can be used by WMS service to look up job.
268 user : `str`, optional
269 User whose submitted jobs should be listed.
270 require_bps : `bool`, optional
271 Whether to require jobs returned in list to be bps-submitted jobs.
272 pass_thru : `str`, optional
273 Information to pass through to WMS.
274 is_global : `bool`, optional
275 If set, all job queues (and their histories) will be queried for
276 job information. Defaults to False which means that only the local
277 job queue will be queried.
279 Returns
280 -------
281 job_ids : `list` [`Any`]
282 Only job ids to be used by cancel and other functions. Typically
283 this means top-level jobs (i.e., not children jobs).
284 """
285 _LOG.debug(
286 "list_submitted_jobs params: wms_id=%s, user=%s, require_bps=%s, pass_thru=%s, is_global=%s",
287 wms_id,
288 user,
289 require_bps,
290 pass_thru,
291 is_global,
292 )
294 # Determine which Schedds will be queried for job information.
295 coll = htcondor.Collector()
297 schedd_ads = []
298 if is_global:
299 schedd_ads.extend(coll.locateAll(htcondor.DaemonTypes.Schedd))
300 else:
301 schedd_ads.append(coll.locate(htcondor.DaemonTypes.Schedd))
303 # Construct appropriate constraint expression using provided arguments.
304 constraint = "False"
305 if wms_id is None:
306 if user is not None:
307 constraint = f'(Owner == "{user}")'
308 else:
309 schedd_ad, cluster_id, id_type = _wms_id_to_cluster(wms_id)
310 if cluster_id is not None:
311 constraint = f"(DAGManJobId == {cluster_id} || ClusterId == {cluster_id})"
313 # If provided id is either a submission path or a global id,
314 # make sure the right Schedd will be queried regardless of
315 # 'is_global' value.
316 if id_type in {WmsIdType.GLOBAL, WmsIdType.PATH}:
317 schedd_ads = [schedd_ad]
318 if require_bps:
319 constraint += ' && (bps_isjob == "True")'
320 if pass_thru:
321 if "-forcex" in pass_thru:
322 pass_thru_2 = pass_thru.replace("-forcex", "")
323 if pass_thru_2 and not pass_thru_2.isspace():
324 constraint += f" && ({pass_thru_2})"
325 else:
326 constraint += f" && ({pass_thru})"
328 # Create a list of scheduler daemons which need to be queried.
329 schedds = {ad["Name"]: htcondor.Schedd(ad) for ad in schedd_ads}
331 _LOG.debug("constraint = %s, schedds = %s", constraint, ", ".join(schedds))
332 results = condor_q(constraint=constraint, schedds=schedds)
334 # Prune child jobs where DAG job is in queue (i.e., aren't orphans).
335 job_ids = []
336 for schedd_name, job_info in results.items():
337 for job_id, job_ad in job_info.items():
338 _LOG.debug("job_id=%s DAGManJobId=%s", job_id, job_ad.get("DAGManJobId", "None"))
339 if "DAGManJobId" not in job_ad:
340 job_ids.append(job_ad.get("GlobalJobId", job_id))
341 else:
342 _LOG.debug("Looking for %s", f"{job_ad['DAGManJobId']}.0")
343 _LOG.debug("\tin jobs.keys() = %s", job_info.keys())
344 if f"{job_ad['DAGManJobId']}.0" not in job_info: # orphaned job
345 job_ids.append(job_ad.get("GlobalJobId", job_id))
347 _LOG.debug("job_ids = %s", job_ids)
348 return job_ids
350 def report(self, wms_workflow_id=None, user=None, hist=0, pass_thru=None, is_global=False):
351 """Return run information based upon given constraints.
353 Parameters
354 ----------
355 wms_workflow_id : `str`, optional
356 Limit to specific run based on id.
357 user : `str`, optional
358 Limit results to runs for this user.
359 hist : `float`, optional
360 Limit history search to this many days. Defaults to 0.
361 pass_thru : `str`, optional
362 Constraints to pass through to HTCondor.
363 is_global : `bool`, optional
364 If set, all job queues (and their histories) will be queried for
365 job information. Defaults to False which means that only the local
366 job queue will be queried.
368 Returns
369 -------
370 runs : `list` [`lsst.ctrl.bps.WmsRunReport`]
371 Information about runs from given job information.
372 message : `str`
373 Extra message for report command to print. This could be pointers
374 to documentation or to WMS specific commands.
375 """
376 if wms_workflow_id:
377 id_type = _wms_id_type(wms_workflow_id)
378 if id_type == WmsIdType.LOCAL:
379 schedulers = _locate_schedds(locate_all=is_global)
380 run_reports, message = _report_from_id(wms_workflow_id, hist, schedds=schedulers)
381 elif id_type == WmsIdType.GLOBAL:
382 schedulers = _locate_schedds(locate_all=True)
383 run_reports, message = _report_from_id(wms_workflow_id, hist, schedds=schedulers)
384 elif id_type == WmsIdType.PATH:
385 run_reports, message = _report_from_path(wms_workflow_id)
386 else:
387 run_reports, message = {}, "Invalid job id"
388 else:
389 schedulers = _locate_schedds(locate_all=is_global)
390 run_reports, message = _summary_report(user, hist, pass_thru, schedds=schedulers)
391 _LOG.debug("report: %s, %s", run_reports, message)
393 return list(run_reports.values()), message
395 def cancel(self, wms_id, pass_thru=None):
396 """Cancel submitted workflows/jobs.
398 Parameters
399 ----------
400 wms_id : `str`
401 Id or path of job that should be canceled.
402 pass_thru : `str`, optional
403 Information to pass through to WMS.
405 Returns
406 -------
407 deleted : `bool`
408 Whether successful deletion or not. Currently, if any doubt or any
409 individual jobs not deleted, return False.
410 message : `str`
411 Any message from WMS (e.g., error details).
412 """
413 _LOG.debug("Canceling wms_id = %s", wms_id)
415 schedd_ad, cluster_id, _ = _wms_id_to_cluster(wms_id)
417 if cluster_id is None:
418 deleted = False
419 message = "invalid id"
420 else:
421 _LOG.debug(
422 "Canceling job managed by schedd_name = %s with cluster_id = %s",
423 cluster_id,
424 schedd_ad["Name"],
425 )
426 schedd = htcondor.Schedd(schedd_ad)
428 constraint = f"ClusterId == {cluster_id}"
429 if pass_thru is not None and "-forcex" in pass_thru:
430 pass_thru_2 = pass_thru.replace("-forcex", "")
431 if pass_thru_2 and not pass_thru_2.isspace():
432 constraint += f"&& ({pass_thru_2})"
433 _LOG.debug("JobAction.RemoveX constraint = %s", constraint)
434 results = schedd.act(htcondor.JobAction.RemoveX, constraint)
435 else:
436 if pass_thru:
437 constraint += f"&& ({pass_thru})"
438 _LOG.debug("JobAction.Remove constraint = %s", constraint)
439 results = schedd.act(htcondor.JobAction.Remove, constraint)
440 _LOG.debug("Remove results: %s", results)
442 if results["TotalSuccess"] > 0 and results["TotalError"] == 0:
443 deleted = True
444 message = ""
445 else:
446 deleted = False
447 if results["TotalSuccess"] == 0 and results["TotalError"] == 0:
448 message = "no such bps job in batch queue"
449 else:
450 message = f"unknown problems deleting: {results}"
452 _LOG.debug("deleted: %s; message = %s", deleted, message)
453 return deleted, message
456class HTCondorWorkflow(BaseWmsWorkflow):
457 """Single HTCondor workflow.
459 Parameters
460 ----------
461 name : `str`
462 Unique name for Workflow used when naming files.
463 config : `lsst.ctrl.bps.BpsConfig`
464 BPS configuration that includes necessary submit/runtime information.
465 """
467 def __init__(self, name, config=None):
468 super().__init__(name, config)
469 self.dag = None
471 @classmethod
472 def from_generic_workflow(cls, config, generic_workflow, out_prefix, service_class):
473 # Docstring inherited
474 htc_workflow = cls(generic_workflow.name, config)
475 htc_workflow.dag = HTCDag(name=generic_workflow.name)
477 _LOG.debug("htcondor dag attribs %s", generic_workflow.run_attrs)
478 htc_workflow.dag.add_attribs(generic_workflow.run_attrs)
479 htc_workflow.dag.add_attribs(
480 {
481 "bps_wms_service": service_class,
482 "bps_wms_workflow": f"{cls.__module__}.{cls.__name__}",
483 "bps_run_quanta": create_count_summary(generic_workflow.quanta_counts),
484 "bps_job_summary": create_count_summary(generic_workflow.job_counts),
485 }
486 )
488 _, tmp_template = config.search("subDirTemplate", opt={"replaceVars": False, "default": ""})
489 if isinstance(tmp_template, str):
490 subdir_template = defaultdict(lambda: tmp_template)
491 else:
492 subdir_template = tmp_template
494 # Create all DAG jobs
495 site_values = {} # cache compute site specific values to reduce config lookups
496 for job_name in generic_workflow:
497 gwjob = generic_workflow.get_job(job_name)
498 if gwjob.compute_site not in site_values:
499 site_values[gwjob.compute_site] = _gather_site_values(config, gwjob.compute_site)
500 htc_job = _create_job(
501 subdir_template[gwjob.label],
502 site_values[gwjob.compute_site],
503 generic_workflow,
504 gwjob,
505 out_prefix,
506 )
507 htc_workflow.dag.add_job(htc_job)
509 # Add job dependencies to the DAG
510 for job_name in generic_workflow:
511 htc_workflow.dag.add_job_relationships([job_name], generic_workflow.successors(job_name))
513 # If final job exists in generic workflow, create DAG final job
514 final = generic_workflow.get_final()
515 if final and isinstance(final, GenericWorkflowJob):
516 if final.compute_site and final.compute_site not in site_values:
517 site_values[final.compute_site] = _gather_site_values(config, final.compute_site)
518 final_htjob = _create_job(
519 subdir_template[final.label],
520 site_values[final.compute_site],
521 generic_workflow,
522 final,
523 out_prefix,
524 )
525 if "post" not in final_htjob.dagcmds:
526 final_htjob.dagcmds["post"] = (
527 f"{os.path.dirname(__file__)}/final_post.sh" f" {final.name} $DAG_STATUS $RETURN"
528 )
529 htc_workflow.dag.add_final_job(final_htjob)
530 elif final and isinstance(final, GenericWorkflow):
531 raise NotImplementedError("HTCondor plugin does not support a workflow as the final job")
532 elif final:
533 return TypeError(f"Invalid type for GenericWorkflow.get_final() results ({type(final)})")
535 return htc_workflow
537 def write(self, out_prefix):
538 """Output HTCondor DAGMan files needed for workflow submission.
540 Parameters
541 ----------
542 out_prefix : `str`
543 Directory prefix for HTCondor files.
544 """
545 self.submit_path = out_prefix
546 os.makedirs(out_prefix, exist_ok=True)
548 # Write down the workflow in HTCondor format.
549 self.dag.write(out_prefix, "jobs/{self.label}")
552def _create_job(subdir_template, site_values, generic_workflow, gwjob, out_prefix):
553 """Convert GenericWorkflow job nodes to DAG jobs.
555 Parameters
556 ----------
557 subdir_template : `str`
558 Template for making subdirs.
559 site_values : `dict`
560 Site specific values
561 generic_workflow : `lsst.ctrl.bps.GenericWorkflow`
562 Generic workflow that is being converted.
563 gwjob : `lsst.ctrl.bps.GenericWorkflowJob`
564 The generic job to convert to a HTCondor job.
565 out_prefix : `str`
566 Directory prefix for HTCondor files.
568 Returns
569 -------
570 htc_job : `lsst.ctrl.bps.wms.htcondor.HTCJob`
571 The HTCondor job equivalent to the given generic job.
572 """
573 htc_job = HTCJob(gwjob.name, label=gwjob.label)
575 curvals = defaultdict(str)
576 curvals["label"] = gwjob.label
577 if gwjob.tags:
578 curvals.update(gwjob.tags)
580 subdir = subdir_template.format_map(curvals)
581 htc_job.subfile = Path("jobs") / subdir / f"{gwjob.name}.sub"
583 htc_job_cmds = {
584 "universe": "vanilla",
585 "should_transfer_files": "YES",
586 "when_to_transfer_output": "ON_EXIT_OR_EVICT",
587 "transfer_output_files": '""', # Set to empty string to disable
588 "transfer_executable": "False",
589 "getenv": "True",
590 # Exceeding memory sometimes triggering SIGBUS or SIGSEGV error. Tell
591 # htcondor to put on hold any jobs which exited by a signal.
592 "on_exit_hold": "ExitBySignal == true",
593 "on_exit_hold_reason": 'strcat("Job raised a signal ", string(ExitSignal), ". ", '
594 '"Handling signal as if job has gone over memory limit.")',
595 "on_exit_hold_subcode": "34",
596 }
598 htc_job_cmds.update(_translate_job_cmds(site_values, generic_workflow, gwjob))
600 # job stdout, stderr, htcondor user log.
601 for key in ("output", "error", "log"):
602 htc_job_cmds[key] = htc_job.subfile.with_suffix(f".$(Cluster).{key[:3]}")
603 _LOG.debug("HTCondor %s = %s", key, htc_job_cmds[key])
605 htc_job_cmds.update(
606 _handle_job_inputs(generic_workflow, gwjob.name, site_values["bpsUseShared"], out_prefix)
607 )
609 # Add the job cmds dict to the job object.
610 htc_job.add_job_cmds(htc_job_cmds)
612 htc_job.add_dag_cmds(_translate_dag_cmds(gwjob))
614 # Add job attributes to job.
615 _LOG.debug("gwjob.attrs = %s", gwjob.attrs)
616 htc_job.add_job_attrs(gwjob.attrs)
617 htc_job.add_job_attrs(site_values["attrs"])
618 htc_job.add_job_attrs({"bps_job_quanta": create_count_summary(gwjob.quanta_counts)})
619 htc_job.add_job_attrs({"bps_job_name": gwjob.name, "bps_job_label": gwjob.label})
621 return htc_job
624def _translate_job_cmds(cached_vals, generic_workflow, gwjob):
625 """Translate the job data that are one to one mapping
627 Parameters
628 ----------
629 cached_vals : `dict` [`str`, `Any`]
630 Config values common to jobs with same label.
631 generic_workflow : `lsst.ctrl.bps.GenericWorkflow`
632 Generic workflow that contains job to being converted.
633 gwjob : `lsst.ctrl.bps.GenericWorkflowJob`
634 Generic workflow job to be converted.
636 Returns
637 -------
638 htc_job_commands : `dict` [`str`, `Any`]
639 Contains commands which can appear in the HTCondor submit description
640 file.
641 """
642 # Values in the job script that just are name mappings.
643 job_translation = {
644 "mail_to": "notify_user",
645 "when_to_mail": "notification",
646 "request_cpus": "request_cpus",
647 "priority": "priority",
648 "category": "category",
649 "accounting_group": "accounting_group",
650 "accounting_user": "accounting_group_user",
651 }
653 jobcmds = {}
654 for gwkey, htckey in job_translation.items():
655 jobcmds[htckey] = getattr(gwjob, gwkey, None)
657 # If accounting info was not set explicitly, use site settings if any.
658 if not gwjob.accounting_group:
659 jobcmds["accounting_group"] = cached_vals.get("accountingGroup")
660 if not gwjob.accounting_user:
661 jobcmds["accounting_group_user"] = cached_vals.get("accountingUser")
663 # job commands that need modification
664 if gwjob.number_of_retries:
665 jobcmds["max_retries"] = f"{gwjob.number_of_retries}"
667 if gwjob.retry_unless_exit:
668 jobcmds["retry_until"] = f"{gwjob.retry_unless_exit}"
670 if gwjob.request_disk:
671 jobcmds["request_disk"] = f"{gwjob.request_disk}MB"
673 if gwjob.request_memory:
674 jobcmds["request_memory"] = f"{gwjob.request_memory}"
676 if gwjob.memory_multiplier:
677 # Do not use try-except! At the moment, BpsConfig returns an empty
678 # string if it does not contain the key.
679 memory_limit = cached_vals["memoryLimit"]
680 if not memory_limit:
681 raise RuntimeError(
682 "Memory autoscaling enabled, but automatic detection of the memory limit "
683 "failed; setting it explicitly with 'memoryLimit' or changing worker node "
684 "search pattern 'executeMachinesPattern' might help."
685 )
687 # Set maximal amount of memory job can ask for.
688 #
689 # The check below assumes that 'memory_limit' was set to a value which
690 # realistically reflects actual physical limitations of a given compute
691 # resource.
692 memory_max = memory_limit
693 if gwjob.request_memory_max and gwjob.request_memory_max < memory_limit:
694 memory_max = gwjob.request_memory_max
696 # Make job ask for more memory each time it failed due to insufficient
697 # memory requirements.
698 jobcmds["request_memory"] = _create_request_memory_expr(
699 gwjob.request_memory, gwjob.memory_multiplier, memory_max
700 )
702 # Periodically release jobs which are being held due to exceeding
703 # memory. Stop doing that (by removing the job from the HTCondor queue)
704 # after the maximal number of retries has been reached or the job was
705 # already run at maximal allowed memory.
706 jobcmds["periodic_release"] = _create_periodic_release_expr(
707 gwjob.request_memory, gwjob.memory_multiplier, memory_max
708 )
709 jobcmds["periodic_remove"] = _create_periodic_remove_expr(
710 gwjob.request_memory, gwjob.memory_multiplier, memory_max
711 )
713 # Assume concurrency_limit implemented using HTCondor concurrency limits.
714 # May need to move to special site-specific implementation if sites use
715 # other mechanisms.
716 if gwjob.concurrency_limit:
717 jobcmds["concurrency_limit"] = gwjob.concurrency_limit
719 # Handle command line
720 if gwjob.executable.transfer_executable:
721 jobcmds["transfer_executable"] = "True"
722 jobcmds["executable"] = os.path.basename(gwjob.executable.src_uri)
723 else:
724 jobcmds["executable"] = _fix_env_var_syntax(gwjob.executable.src_uri)
726 if gwjob.arguments:
727 arguments = gwjob.arguments
728 arguments = _replace_cmd_vars(arguments, gwjob)
729 arguments = _replace_file_vars(cached_vals["bpsUseShared"], arguments, generic_workflow, gwjob)
730 arguments = _fix_env_var_syntax(arguments)
731 jobcmds["arguments"] = arguments
733 # Add extra "pass-thru" job commands
734 if gwjob.profile:
735 for key, val in gwjob.profile.items():
736 jobcmds[key] = htc_escape(val)
737 for key, val in cached_vals["profile"].items():
738 jobcmds[key] = htc_escape(val)
740 return jobcmds
743def _translate_dag_cmds(gwjob):
744 """Translate job values into DAGMan commands.
746 Parameters
747 ----------
748 gwjob : `lsst.ctrl.bps.GenericWorkflowJob`
749 Job containing values to be translated.
751 Returns
752 -------
753 dagcmds : `dict` [`str`, `Any`]
754 DAGMan commands for the job.
755 """
756 # Values in the dag script that just are name mappings.
757 dag_translation = {"abort_on_value": "abort_dag_on", "abort_return_value": "abort_exit"}
759 dagcmds = {}
760 for gwkey, htckey in dag_translation.items():
761 dagcmds[htckey] = getattr(gwjob, gwkey, None)
763 # Still to be coded: vars "pre_cmdline", "post_cmdline"
764 return dagcmds
767def _fix_env_var_syntax(oldstr):
768 """Change ENV place holders to HTCondor Env var syntax.
770 Parameters
771 ----------
772 oldstr : `str`
773 String in which environment variable syntax is to be fixed.
775 Returns
776 -------
777 newstr : `str`
778 Given string with environment variable syntax fixed.
779 """
780 newstr = oldstr
781 for key in re.findall(r"<ENV:([^>]+)>", oldstr):
782 newstr = newstr.replace(rf"<ENV:{key}>", f"$ENV({key})")
783 return newstr
786def _replace_file_vars(use_shared, arguments, workflow, gwjob):
787 """Replace file placeholders in command line arguments with correct
788 physical file names.
790 Parameters
791 ----------
792 use_shared : `bool`
793 Whether HTCondor can assume shared filesystem.
794 arguments : `str`
795 Arguments string in which to replace file placeholders.
796 workflow : `lsst.ctrl.bps.GenericWorkflow`
797 Generic workflow that contains file information.
798 gwjob : `lsst.ctrl.bps.GenericWorkflowJob`
799 The job corresponding to the arguments.
801 Returns
802 -------
803 arguments : `str`
804 Given arguments string with file placeholders replaced.
805 """
806 # Replace input file placeholders with paths.
807 for gwfile in workflow.get_job_inputs(gwjob.name, data=True, transfer_only=False):
808 if not gwfile.wms_transfer:
809 # Must assume full URI if in command line and told WMS is not
810 # responsible for transferring file.
811 uri = gwfile.src_uri
812 elif use_shared:
813 if gwfile.job_shared:
814 # Have shared filesystems and jobs can share file.
815 uri = gwfile.src_uri
816 else:
817 # Taking advantage of inside knowledge. Not future-proof.
818 # Temporary fix until have job wrapper that pulls files
819 # within job.
820 if gwfile.name == "butlerConfig" and Path(gwfile.src_uri).suffix != ".yaml":
821 uri = "butler.yaml"
822 else:
823 uri = os.path.basename(gwfile.src_uri)
824 else: # Using push transfer
825 uri = os.path.basename(gwfile.src_uri)
826 arguments = arguments.replace(f"<FILE:{gwfile.name}>", uri)
828 # Replace output file placeholders with paths.
829 for gwfile in workflow.get_job_outputs(gwjob.name, data=True, transfer_only=False):
830 if not gwfile.wms_transfer:
831 # Must assume full URI if in command line and told WMS is not
832 # responsible for transferring file.
833 uri = gwfile.src_uri
834 elif use_shared:
835 if gwfile.job_shared:
836 # Have shared filesystems and jobs can share file.
837 uri = gwfile.src_uri
838 else:
839 uri = os.path.basename(gwfile.src_uri)
840 else: # Using push transfer
841 uri = os.path.basename(gwfile.src_uri)
842 arguments = arguments.replace(f"<FILE:{gwfile.name}>", uri)
843 return arguments
846def _replace_cmd_vars(arguments, gwjob):
847 """Replace format-style placeholders in arguments.
849 Parameters
850 ----------
851 arguments : `str`
852 Arguments string in which to replace placeholders.
853 gwjob : `lsst.ctrl.bps.GenericWorkflowJob`
854 Job containing values to be used to replace placeholders
855 (in particular gwjob.cmdvals).
857 Returns
858 -------
859 arguments : `str`
860 Given arguments string with placeholders replaced.
861 """
862 try:
863 arguments = arguments.format(**gwjob.cmdvals)
864 except (KeyError, TypeError): # TypeError in case None instead of {}
865 _LOG.error(
866 "Could not replace command variables:\narguments: %s\ncmdvals: %s", arguments, gwjob.cmdvals
867 )
868 raise
869 return arguments
872def _handle_job_inputs(generic_workflow: GenericWorkflow, job_name: str, use_shared: bool, out_prefix: str):
873 """Add job input files from generic workflow to job.
875 Parameters
876 ----------
877 generic_workflow : `lsst.ctrl.bps.GenericWorkflow`
878 The generic workflow (e.g., has executable name and arguments).
879 job_name : `str`
880 Unique name for the job.
881 use_shared : `bool`
882 Whether job has access to files via shared filesystem.
883 out_prefix : `str`
884 The root directory into which all WMS-specific files are written.
886 Returns
887 -------
888 htc_commands : `dict` [`str`, `str`]
889 HTCondor commands for the job submission script.
890 """
891 htc_commands = {}
892 inputs = []
893 for gwf_file in generic_workflow.get_job_inputs(job_name, data=True, transfer_only=True):
894 _LOG.debug("src_uri=%s", gwf_file.src_uri)
896 uri = Path(gwf_file.src_uri)
898 # Note if use_shared and job_shared, don't need to transfer file.
900 if not use_shared: # Copy file using push to job
901 inputs.append(str(uri.relative_to(out_prefix)))
902 elif not gwf_file.job_shared: # Jobs require own copy
904 # if using shared filesystem, but still need copy in job. Use
905 # HTCondor's curl plugin for a local copy.
907 # Execution butler is represented as a directory which the
908 # curl plugin does not handle. Taking advantage of inside
909 # knowledge for temporary fix until have job wrapper that pulls
910 # files within job.
911 if gwf_file.name == "butlerConfig":
912 # The execution butler directory doesn't normally exist until
913 # the submit phase so checking for suffix instead of using
914 # is_dir(). If other non-yaml file exists they would have a
915 # different gwf_file.name.
916 if uri.suffix == ".yaml": # Single file, so just copy.
917 inputs.append(f"file://{uri}")
918 else:
919 inputs.append(f"file://{uri / 'butler.yaml'}")
920 inputs.append(f"file://{uri / 'gen3.sqlite3'}")
921 elif uri.is_dir():
922 raise RuntimeError(
923 "HTCondor plugin cannot transfer directories locally within job " f"{gwf_file.src_uri}"
924 )
925 else:
926 inputs.append(f"file://{uri}")
928 if inputs:
929 htc_commands["transfer_input_files"] = ",".join(inputs)
930 _LOG.debug("transfer_input_files=%s", htc_commands["transfer_input_files"])
931 return htc_commands
934def _report_from_path(wms_path):
935 """Gather run information from a given run directory.
937 Parameters
938 ----------
939 wms_path : `str`
940 The directory containing the submit side files (e.g., HTCondor files).
942 Returns
943 -------
944 run_reports : `dict` [`str`, `lsst.ctrl.bps.WmsRunReport`]
945 Run information for the detailed report. The key is the HTCondor id
946 and the value is a collection of report information for that run.
947 message : `str`
948 Message to be printed with the summary report.
949 """
950 wms_workflow_id, jobs, message = _get_info_from_path(wms_path)
951 if wms_workflow_id == MISSING_ID:
952 run_reports = {}
953 else:
954 run_reports = _create_detailed_report_from_jobs(wms_workflow_id, jobs)
955 return run_reports, message
958def _report_from_id(wms_workflow_id, hist, schedds=None):
959 """Gather run information using workflow id.
961 Parameters
962 ----------
963 wms_workflow_id : `str`
964 Limit to specific run based on id.
965 hist : `float`
966 Limit history search to this many days.
967 schedds : `dict` [ `str`, `htcondor.Schedd` ], optional
968 HTCondor schedulers which to query for job information. If None
969 (default), all queries will be run against the local scheduler only.
971 Returns
972 -------
973 run_reports : `dict` [`str`, `lsst.ctrl.bps.WmsRunReport`]
974 Run information for the detailed report. The key is the HTCondor id
975 and the value is a collection of report information for that run.
976 message : `str`
977 Message to be printed with the summary report.
978 """
979 dag_constraint = 'regexp("dagman$", Cmd)'
980 try:
981 cluster_id = int(float(wms_workflow_id))
982 except ValueError:
983 dag_constraint += f' && GlobalJobId == "{wms_workflow_id}"'
984 else:
985 dag_constraint += f" && ClusterId == {cluster_id}"
987 # With the current implementation of the condor_* functions the query will
988 # always return only one match per Scheduler.
989 #
990 # Even in the highly unlikely situation where HTCondor history (which
991 # condor_search queries too) is long enough to have jobs from before the
992 # cluster ids were rolled over (and as a result there is more then one job
993 # with the same cluster id) they will not show up in the results.
994 schedd_dag_info = condor_search(constraint=dag_constraint, hist=hist, schedds=schedds)
995 if len(schedd_dag_info) == 0:
996 run_reports = {}
997 message = ""
998 elif len(schedd_dag_info) == 1:
999 _, dag_info = schedd_dag_info.popitem()
1000 dag_id, dag_ad = dag_info.popitem()
1002 # Create a mapping between jobs and their classads. The keys will be
1003 # of format 'ClusterId.ProcId'.
1004 job_info = {dag_id: dag_ad}
1006 # Find jobs (nodes) belonging to that DAGMan job.
1007 job_constraint = f"DAGManJobId == {int(float(dag_id))}"
1008 schedd_job_info = condor_search(constraint=job_constraint, hist=hist, schedds=schedds)
1009 if schedd_job_info:
1010 _, node_info = schedd_job_info.popitem()
1011 job_info.update(node_info)
1013 # Collect additional pieces of information about jobs using HTCondor
1014 # files in the submission directory.
1015 _, path_jobs, message = _get_info_from_path(dag_ad["Iwd"])
1016 _update_jobs(job_info, path_jobs)
1018 run_reports = _create_detailed_report_from_jobs(dag_id, job_info)
1019 else:
1020 ids = [ad["GlobalJobId"] for dag_info in schedd_dag_info.values() for ad in dag_info.values()]
1021 run_reports = {}
1022 message = (
1023 f"More than one job matches id '{wms_workflow_id}', "
1024 f"their global ids are: {', '.join(ids)}. Rerun with one of the global ids"
1025 )
1026 return run_reports, message
1029def _get_info_from_path(wms_path):
1030 """Gather run information from a given run directory.
1032 Parameters
1033 ----------
1034 wms_path : `str`
1035 Directory containing HTCondor files.
1037 Returns
1038 -------
1039 wms_workflow_id : `str`
1040 The run id which is a DAGman job id.
1041 jobs : `dict` [`str`, `dict` [`str`, `Any`]]
1042 Information about jobs read from files in the given directory.
1043 The key is the HTCondor id and the value is a dictionary of HTCondor
1044 keys and values.
1045 message : `str`
1046 Message to be printed with the summary report.
1047 """
1048 messages = []
1049 try:
1050 wms_workflow_id, jobs = read_dag_log(wms_path)
1051 _LOG.debug("_get_info_from_path: from dag log %s = %s", wms_workflow_id, jobs)
1052 _update_jobs(jobs, read_node_status(wms_path))
1053 _LOG.debug("_get_info_from_path: after node status %s = %s", wms_workflow_id, jobs)
1055 # Add more info for DAGman job
1056 job = jobs[wms_workflow_id]
1057 job.update(read_dag_status(wms_path))
1059 job["total_jobs"], job["state_counts"] = _get_state_counts_from_jobs(wms_workflow_id, jobs)
1060 if "bps_run" not in job:
1061 _add_run_info(wms_path, job)
1063 message = htc_check_dagman_output(wms_path)
1064 if message:
1065 messages.append(message)
1066 _LOG.debug(
1067 "_get_info: id = %s, total_jobs = %s", wms_workflow_id, jobs[wms_workflow_id]["total_jobs"]
1068 )
1070 # Add extra pieces of information which cannot be found in HTCondor
1071 # generated files like 'GlobalJobId'.
1072 #
1073 # Do not treat absence of this file as a serious error. Neither runs
1074 # submitted with earlier versions of the plugin nor the runs submitted
1075 # with Pegasus plugin will have it at the moment. However, once enough
1076 # time passes and Pegasus plugin will have its own report() method
1077 # (instead of sneakily using HTCondor's one), the lack of that file
1078 # should be treated as seriously as lack of any other file.
1079 try:
1080 job_info = read_dag_info(wms_path)
1081 except FileNotFoundError as exc:
1082 message = f"Warn: Some information may not be available: {exc}"
1083 messages.append(message)
1084 else:
1085 schedd_name = next(iter(job_info))
1086 job_ad = next(iter(job_info[schedd_name].values()))
1087 job.update(job_ad)
1088 except FileNotFoundError:
1089 message = f"Could not find HTCondor files in '{wms_path}'"
1090 _LOG.warning(message)
1091 messages.append(message)
1092 wms_workflow_id = MISSING_ID
1093 jobs = {}
1095 message = "\n".join([msg for msg in messages if msg])
1096 return wms_workflow_id, jobs, message
1099def _create_detailed_report_from_jobs(wms_workflow_id, jobs):
1100 """Gather run information to be used in generating summary reports.
1102 Parameters
1103 ----------
1104 wms_workflow_id : `str`
1105 The run id to create the report for.
1106 jobs : `dict` [`str`, `dict` [`str`, Any]]
1107 Mapping HTCondor job id to job information.
1109 Returns
1110 -------
1111 run_reports : `dict` [`str`, `lsst.ctrl.bps.WmsRunReport`]
1112 Run information for the detailed report. The key is the given HTCondor
1113 id and the value is a collection of report information for that run.
1114 """
1115 _LOG.debug("_create_detailed_report: id = %s, job = %s", wms_workflow_id, jobs[wms_workflow_id])
1116 dag_job = jobs[wms_workflow_id]
1117 report = WmsRunReport(
1118 wms_id=f"{dag_job['ClusterId']}.{dag_job['ProcId']}",
1119 global_wms_id=dag_job.get("GlobalJobId", "MISS"),
1120 path=dag_job["Iwd"],
1121 label=dag_job.get("bps_job_label", "MISS"),
1122 run=dag_job.get("bps_run", "MISS"),
1123 project=dag_job.get("bps_project", "MISS"),
1124 campaign=dag_job.get("bps_campaign", "MISS"),
1125 payload=dag_job.get("bps_payload", "MISS"),
1126 operator=_get_owner(dag_job),
1127 run_summary=_get_run_summary(dag_job),
1128 state=_htc_status_to_wms_state(dag_job),
1129 jobs=[],
1130 total_number_jobs=dag_job["total_jobs"],
1131 job_state_counts=dag_job["state_counts"],
1132 )
1134 for job_id, job_info in jobs.items():
1135 try:
1136 if job_info["ClusterId"] != int(float(wms_workflow_id)):
1137 job_report = WmsJobReport(
1138 wms_id=job_id,
1139 name=job_info.get("DAGNodeName", job_id),
1140 label=job_info.get("bps_job_label", pegasus_name_to_label(job_info["DAGNodeName"])),
1141 state=_htc_status_to_wms_state(job_info),
1142 )
1143 if job_report.label == "init":
1144 job_report.label = "pipetaskInit"
1145 report.jobs.append(job_report)
1146 except KeyError as ex:
1147 _LOG.error("Job missing key '%s': %s", str(ex), job_info)
1148 raise
1150 run_reports = {report.wms_id: report}
1151 _LOG.debug("_create_detailed_report: run_reports = %s", run_reports)
1152 return run_reports
1155def _summary_report(user, hist, pass_thru, schedds=None):
1156 """Gather run information to be used in generating summary reports.
1158 Parameters
1159 ----------
1160 user : `str`
1161 Run lookup restricted to given user.
1162 hist : `float`
1163 How many previous days to search for run information.
1164 pass_thru : `str`
1165 Advanced users can define the HTCondor constraint to be used
1166 when searching queue and history.
1168 Returns
1169 -------
1170 run_reports : `dict` [`str`, `lsst.ctrl.bps.WmsRunReport`]
1171 Run information for the summary report. The keys are HTCondor ids and
1172 the values are collections of report information for each run.
1173 message : `str`
1174 Message to be printed with the summary report.
1175 """
1176 # only doing summary report so only look for dagman jobs
1177 if pass_thru:
1178 constraint = pass_thru
1179 else:
1180 # Notes:
1181 # * bps_isjob == 'True' isn't getting set for DAG jobs that are
1182 # manually restarted.
1183 # * Any job with DAGManJobID isn't a DAG job
1184 constraint = 'bps_isjob == "True" && JobUniverse == 7'
1185 if user:
1186 constraint += f' && (Owner == "{user}" || bps_operator == "{user}")'
1188 job_info = condor_search(constraint=constraint, hist=hist, schedds=schedds)
1190 # Have list of DAGMan jobs, need to get run_report info.
1191 run_reports = {}
1192 for jobs in job_info.values():
1193 for job_id, job in jobs.items():
1194 total_jobs, state_counts = _get_state_counts_from_dag_job(job)
1195 # If didn't get from queue information (e.g., Kerberos bug),
1196 # try reading from file.
1197 if total_jobs == 0:
1198 try:
1199 job.update(read_dag_status(job["Iwd"]))
1200 total_jobs, state_counts = _get_state_counts_from_dag_job(job)
1201 except StopIteration:
1202 pass # don't kill report can't find htcondor files
1204 if "bps_run" not in job:
1205 _add_run_info(job["Iwd"], job)
1206 report = WmsRunReport(
1207 wms_id=job_id,
1208 global_wms_id=job["GlobalJobId"],
1209 path=job["Iwd"],
1210 label=job.get("bps_job_label", "MISS"),
1211 run=job.get("bps_run", "MISS"),
1212 project=job.get("bps_project", "MISS"),
1213 campaign=job.get("bps_campaign", "MISS"),
1214 payload=job.get("bps_payload", "MISS"),
1215 operator=_get_owner(job),
1216 run_summary=_get_run_summary(job),
1217 state=_htc_status_to_wms_state(job),
1218 jobs=[],
1219 total_number_jobs=total_jobs,
1220 job_state_counts=state_counts,
1221 )
1222 run_reports[report.global_wms_id] = report
1224 return run_reports, ""
1227def _add_run_info(wms_path, job):
1228 """Find BPS run information elsewhere for runs without bps attributes.
1230 Parameters
1231 ----------
1232 wms_path : `str`
1233 Path to submit files for the run.
1234 job : `dict` [`str`, `Any`]
1235 HTCondor dag job information.
1237 Raises
1238 ------
1239 StopIteration
1240 If cannot find file it is looking for. Permission errors are
1241 caught and job's run is marked with error.
1242 """
1243 path = Path(wms_path) / "jobs"
1244 try:
1245 subfile = next(path.glob("**/*.sub"))
1246 except (StopIteration, PermissionError):
1247 job["bps_run"] = "Unavailable"
1248 else:
1249 _LOG.debug("_add_run_info: subfile = %s", subfile)
1250 try:
1251 with open(subfile, "r", encoding="utf-8") as fh:
1252 for line in fh:
1253 if line.startswith("+bps_"):
1254 m = re.match(r"\+(bps_[^\s]+)\s*=\s*(.+)$", line)
1255 if m:
1256 _LOG.debug("Matching line: %s", line)
1257 job[m.group(1)] = m.group(2).replace('"', "")
1258 else:
1259 _LOG.debug("Could not parse attribute: %s", line)
1260 except PermissionError:
1261 job["bps_run"] = "PermissionError"
1262 _LOG.debug("After adding job = %s", job)
1265def _get_owner(job):
1266 """Get the owner of a dag job.
1268 Parameters
1269 ----------
1270 job : `dict` [`str`, `Any`]
1271 HTCondor dag job information.
1273 Returns
1274 -------
1275 owner : `str`
1276 Owner of the dag job.
1277 """
1278 owner = job.get("bps_operator", None)
1279 if not owner:
1280 owner = job.get("Owner", None)
1281 if not owner:
1282 _LOG.warning("Could not get Owner from htcondor job: %s", job)
1283 owner = "MISS"
1284 return owner
1287def _get_run_summary(job):
1288 """Get the run summary for a job.
1290 Parameters
1291 ----------
1292 job : `dict` [`str`, `Any`]
1293 HTCondor dag job information.
1295 Returns
1296 -------
1297 summary : `str`
1298 Number of jobs per PipelineTask label in approximate pipeline order.
1299 Format: <label>:<count>[;<label>:<count>]+
1300 """
1301 summary = job.get("bps_job_summary", job.get("bps_run_summary", None))
1302 if not summary:
1303 summary, _ = summary_from_dag(job["Iwd"])
1304 if not summary:
1305 _LOG.warning("Could not get run summary for htcondor job: %s", job)
1306 _LOG.debug("_get_run_summary: summary=%s", summary)
1308 # Workaround sometimes using init vs pipetaskInit
1309 summary = summary.replace("init:", "pipetaskInit:")
1311 if "pegasus_version" in job and "pegasus" not in summary:
1312 summary += ";pegasus:0"
1314 return summary
1317def _get_state_counts_from_jobs(wms_workflow_id, jobs):
1318 """Count number of jobs per WMS state.
1320 Parameters
1321 ----------
1322 wms_workflow_id : `str`
1323 HTCondor job id.
1324 jobs : `dict` [`str`, `Any`]
1325 HTCondor dag job information.
1327 Returns
1328 -------
1329 total_count : `int`
1330 Total number of dag nodes.
1331 state_counts : `dict` [`lsst.ctrl.bps.WmsStates`, `int`]
1332 Keys are the different WMS states and values are counts of jobs
1333 that are in that WMS state.
1334 """
1335 state_counts = dict.fromkeys(WmsStates, 0)
1337 for jid, jinfo in jobs.items():
1338 if jid != wms_workflow_id:
1339 state_counts[_htc_status_to_wms_state(jinfo)] += 1
1341 total_counted = sum(state_counts.values())
1342 if "NodesTotal" in jobs[wms_workflow_id]:
1343 total_count = jobs[wms_workflow_id]["NodesTotal"]
1344 else:
1345 total_count = total_counted
1347 state_counts[WmsStates.UNREADY] += total_count - total_counted
1349 return total_count, state_counts
1352def _get_state_counts_from_dag_job(job):
1353 """Count number of jobs per WMS state.
1355 Parameters
1356 ----------
1357 job : `dict` [`str`, `Any`]
1358 HTCondor dag job information.
1360 Returns
1361 -------
1362 total_count : `int`
1363 Total number of dag nodes.
1364 state_counts : `dict` [`lsst.ctrl.bps.WmsStates`, `int`]
1365 Keys are the different WMS states and values are counts of jobs
1366 that are in that WMS state.
1367 """
1368 _LOG.debug("_get_state_counts_from_dag_job: job = %s %s", type(job), len(job))
1369 state_counts = dict.fromkeys(WmsStates, 0)
1370 if "DAG_NodesReady" in job:
1371 state_counts = {
1372 WmsStates.UNREADY: job.get("DAG_NodesUnready", 0),
1373 WmsStates.READY: job.get("DAG_NodesReady", 0),
1374 WmsStates.HELD: job.get("JobProcsHeld", 0),
1375 WmsStates.SUCCEEDED: job.get("DAG_NodesDone", 0),
1376 WmsStates.FAILED: job.get("DAG_NodesFailed", 0),
1377 WmsStates.MISFIT: job.get("DAG_NodesPre", 0) + job.get("DAG_NodesPost", 0),
1378 }
1379 total_jobs = job.get("DAG_NodesTotal")
1380 _LOG.debug("_get_state_counts_from_dag_job: from DAG_* keys, total_jobs = %s", total_jobs)
1381 elif "NodesFailed" in job:
1382 state_counts = {
1383 WmsStates.UNREADY: job.get("NodesUnready", 0),
1384 WmsStates.READY: job.get("NodesReady", 0),
1385 WmsStates.HELD: job.get("JobProcsHeld", 0),
1386 WmsStates.SUCCEEDED: job.get("NodesDone", 0),
1387 WmsStates.FAILED: job.get("NodesFailed", 0),
1388 WmsStates.MISFIT: job.get("NodesPre", 0) + job.get("NodesPost", 0),
1389 }
1390 try:
1391 total_jobs = job.get("NodesTotal")
1392 except KeyError as ex:
1393 _LOG.error("Job missing %s. job = %s", str(ex), job)
1394 raise
1395 _LOG.debug("_get_state_counts_from_dag_job: from NODES* keys, total_jobs = %s", total_jobs)
1396 else:
1397 # With Kerberos job auth and Kerberos bug, if warning would be printed
1398 # for every DAG.
1399 _LOG.debug("Can't get job state counts %s", job["Iwd"])
1400 total_jobs = 0
1402 _LOG.debug("total_jobs = %s, state_counts: %s", total_jobs, state_counts)
1403 return total_jobs, state_counts
1406def _htc_status_to_wms_state(job):
1407 """Convert HTCondor job status to generic wms state.
1409 Parameters
1410 ----------
1411 job : `dict` [`str`, `Any`]
1412 HTCondor job information.
1414 Returns
1415 -------
1416 wms_state : `WmsStates`
1417 The equivalent WmsState to given job's status.
1418 """
1419 wms_state = WmsStates.MISFIT
1420 if "JobStatus" in job:
1421 wms_state = _htc_job_status_to_wms_state(job)
1422 elif "NodeStatus" in job:
1423 wms_state = _htc_node_status_to_wms_state(job)
1424 return wms_state
1427def _htc_job_status_to_wms_state(job):
1428 """Convert HTCondor job status to generic wms state.
1430 Parameters
1431 ----------
1432 job : `dict` [`str`, `Any`]
1433 HTCondor job information.
1435 Returns
1436 -------
1437 wms_state : `lsst.ctrl.bps.WmsStates`
1438 The equivalent WmsState to given job's status.
1439 """
1440 _LOG.debug(
1441 "htc_job_status_to_wms_state: %s=%s, %s", job["ClusterId"], job["JobStatus"], type(job["JobStatus"])
1442 )
1443 job_status = int(job["JobStatus"])
1444 wms_state = WmsStates.MISFIT
1446 _LOG.debug("htc_job_status_to_wms_state: job_status = %s", job_status)
1447 if job_status == JobStatus.IDLE:
1448 wms_state = WmsStates.PENDING
1449 elif job_status == JobStatus.RUNNING:
1450 wms_state = WmsStates.RUNNING
1451 elif job_status == JobStatus.REMOVED:
1452 wms_state = WmsStates.DELETED
1453 elif job_status == JobStatus.COMPLETED:
1454 if (
1455 job.get("ExitBySignal", False)
1456 or job.get("ExitCode", 0)
1457 or job.get("ExitSignal", 0)
1458 or job.get("DAG_Status", 0)
1459 or job.get("ReturnValue", 0)
1460 ):
1461 wms_state = WmsStates.FAILED
1462 else:
1463 wms_state = WmsStates.SUCCEEDED
1464 elif job_status == JobStatus.HELD:
1465 wms_state = WmsStates.HELD
1467 return wms_state
1470def _htc_node_status_to_wms_state(job):
1471 """Convert HTCondor status to generic wms state.
1473 Parameters
1474 ----------
1475 job : `dict` [`str`, `Any`]
1476 HTCondor job information.
1478 Returns
1479 -------
1480 wms_state : `lsst.ctrl.bps.WmsStates`
1481 The equivalent WmsState to given node's status.
1482 """
1483 wms_state = WmsStates.MISFIT
1485 status = job["NodeStatus"]
1486 if status == NodeStatus.NOT_READY:
1487 wms_state = WmsStates.UNREADY
1488 elif status == NodeStatus.READY:
1489 wms_state = WmsStates.READY
1490 elif status == NodeStatus.PRERUN:
1491 wms_state = WmsStates.MISFIT
1492 elif status == NodeStatus.SUBMITTED:
1493 if job["JobProcsHeld"]:
1494 wms_state = WmsStates.HELD
1495 elif job["StatusDetails"] == "not_idle":
1496 wms_state = WmsStates.RUNNING
1497 elif job["JobProcsQueued"]:
1498 wms_state = WmsStates.PENDING
1499 elif status == NodeStatus.POSTRUN:
1500 wms_state = WmsStates.MISFIT
1501 elif status == NodeStatus.DONE:
1502 wms_state = WmsStates.SUCCEEDED
1503 elif status == NodeStatus.ERROR:
1504 # Use job exist instead of post script exit
1505 if "DAGMAN error 0" in job["StatusDetails"]:
1506 wms_state = WmsStates.SUCCEEDED
1507 else:
1508 wms_state = WmsStates.FAILED
1510 return wms_state
1513def _update_jobs(jobs1, jobs2):
1514 """Update jobs1 with info in jobs2.
1516 (Basically an update for nested dictionaries.)
1518 Parameters
1519 ----------
1520 jobs1 : `dict` [`str`, `dict` [`str`, `Any`]]
1521 HTCondor job information to be updated.
1522 jobs2 : `dict` [`str`, `dict` [`str`, `Any`]]
1523 Additional HTCondor job information.
1524 """
1525 for jid, jinfo in jobs2.items():
1526 if jid in jobs1:
1527 jobs1[jid].update(jinfo)
1528 else:
1529 jobs1[jid] = jinfo
1532def _wms_id_type(wms_id):
1533 """Determine the type of the WMS id.
1535 Parameters
1536 ----------
1537 wms_id : `str`
1538 WMS id identifying a job.
1540 Returns
1541 -------
1542 id_type : `lsst.ctrl.bps.htcondor.WmsIdType`
1543 Type of WMS id.
1544 """
1545 try:
1546 int(float(wms_id))
1547 except ValueError:
1548 wms_path = Path(wms_id)
1549 if wms_path.exists():
1550 id_type = WmsIdType.PATH
1551 else:
1552 id_type = WmsIdType.GLOBAL
1553 except TypeError:
1554 id_type = WmsIdType.UNKNOWN
1555 else:
1556 id_type = WmsIdType.LOCAL
1557 return id_type
1560def _wms_id_to_cluster(wms_id):
1561 """Convert WMS id to cluster id.
1563 Parameters
1564 ----------
1565 wms_id : `int` or `float` or `str`
1566 HTCondor job id or path.
1568 Returns
1569 -------
1570 schedd_ad : `classad.ClassAd`
1571 ClassAd describing the scheduler managing the job with the given id.
1572 cluster_id : `int`
1573 HTCondor cluster id.
1574 id_type : `lsst.ctrl.bps.wms.htcondor.IdType`
1575 The type of the provided id.
1576 """
1577 coll = htcondor.Collector()
1579 schedd_ad = None
1580 cluster_id = None
1581 id_type = _wms_id_type(wms_id)
1582 if id_type == WmsIdType.LOCAL:
1583 schedd_ad = coll.locate(htcondor.DaemonTypes.Schedd)
1584 cluster_id = int(float(wms_id))
1585 elif id_type == WmsIdType.GLOBAL:
1586 constraint = f'GlobalJobId == "{wms_id}"'
1587 schedd_ads = {ad["Name"]: ad for ad in coll.locateAll(htcondor.DaemonTypes.Schedd)}
1588 schedds = [htcondor.Schedd(ad) for ad in schedd_ads.values()]
1589 queries = [schedd.xquery(requirements=constraint, projection=["ClusterId"]) for schedd in schedds]
1590 results = {
1591 query.tag(): dict(ads[0])
1592 for query in htcondor.poll(queries)
1593 if (ads := query.nextAdsNonBlocking())
1594 }
1595 if results:
1596 schedd_name = next(iter(results))
1597 schedd_ad = schedd_ads[schedd_name]
1598 cluster_id = results[schedd_name]["ClusterId"]
1599 elif id_type == WmsIdType.PATH:
1600 try:
1601 job_info = read_dag_info(wms_id)
1602 except (FileNotFoundError, PermissionError, IOError):
1603 pass
1604 else:
1605 schedd_name = next(iter(job_info))
1606 job_id = next(iter(job_info[schedd_name]))
1607 schedd_ad = coll.locate(htcondor.DaemonTypes.Schedd, schedd_name)
1608 cluster_id = int(float(job_id))
1609 else:
1610 pass
1611 return schedd_ad, cluster_id, id_type
1614def _create_periodic_release_expr(memory, multiplier, limit):
1615 """Construct an HTCondorAd expression for releasing held jobs.
1617 The expression instruct HTCondor to release any job which was put on hold
1618 due to exceeding memory requirements back to the job queue providing it
1619 satisfies all of the conditions below:
1621 * number of run attempts did not reach allowable number of retries,
1622 * the memory requirements in the last failed run attempt did not reach
1623 the specified memory limit.
1625 Parameters
1626 ----------
1627 memory : `int`
1628 Requested memory in MB.
1629 multiplier : `float`
1630 Memory growth rate between retires.
1631 limit : `int`
1632 Memory limit.
1634 Returns
1635 -------
1636 expr : `str`
1637 A string representing an HTCondor ClassAd expression for releasing jobs
1638 which have been held due to exceeding the memory requirements.
1639 """
1640 is_retry_allowed = "NumJobStarts <= JobMaxRetries"
1641 was_below_limit = f"min({{int({memory} * pow({multiplier}, NumJobStarts - 1)), {limit}}}) < {limit}"
1643 # Job ClassAds attributes 'HoldReasonCode' and 'HoldReasonSubCode' are
1644 # UNDEFINED if job is not HELD (i.e. when 'JobStatus' is not 5).
1645 # The special comparison operators ensure that all comparisons below will
1646 # evaluate to FALSE in this case.
1647 #
1648 # Note:
1649 # May not be strictly necessary. Operators '&&' and '||' are not strict so
1650 # the entire expression should evaluate to FALSE when the job is not HELD.
1651 # According to ClassAd evaluation semantics FALSE && UNDEFINED is FALSE,
1652 # but better safe than sorry.
1653 was_mem_exceeded = (
1654 "JobStatus == 5 "
1655 "&& (HoldReasonCode =?= 34 && HoldReasonSubCode =?= 0 "
1656 "|| HoldReasonCode =?= 3 && HoldReasonSubCode =?= 34)"
1657 )
1659 expr = f"{was_mem_exceeded} && {is_retry_allowed} && {was_below_limit}"
1660 return expr
1663def _create_periodic_remove_expr(memory, multiplier, limit):
1664 """Construct an HTCondorAd expression for removing jobs from the queue.
1666 The expression instruct HTCondor to remove any job which was put on hold
1667 due to exceeding memory requirements from the job queue providing it
1668 satisfies any of the conditions below:
1670 * allowable number of retries was reached,
1671 * the memory requirements during the last failed run attempt reached
1672 the specified memory limit.
1674 Parameters
1675 ----------
1676 memory : `int`
1677 Requested memory in MB.
1678 multiplier : `float`
1679 Memory growth rate between retires.
1680 limit : `int`
1681 Memory limit.
1683 Returns
1684 -------
1685 expr : `str`
1686 A string representing an HTCondor ClassAd expression for removing jobs
1687 which were run at the maximal allowable memory and still exceeded
1688 the memory requirements.
1689 """
1690 is_retry_disallowed = "NumJobStarts > JobMaxRetries"
1691 was_limit_reached = f"min({{int({memory} * pow({multiplier}, NumJobStarts - 1)), {limit}}}) == {limit}"
1693 # Job ClassAds attributes 'HoldReasonCode' and 'HoldReasonSubCode' are
1694 # UNDEFINED if job is not HELD (i.e. when 'JobStatus' is not 5).
1695 # The special comparison operators ensure that all comparisons below will
1696 # evaluate to FALSE in this case.
1697 #
1698 # Note:
1699 # May not be strictly necessary. Operators '&&' and '||' are not strict so
1700 # the entire expression should evaluate to FALSE when the job is not HELD.
1701 # According to ClassAd evaluation semantics FALSE && UNDEFINED is FALSE,
1702 # but better safe than sorry.
1703 was_mem_exceeded = (
1704 "JobStatus == 5 "
1705 "&& (HoldReasonCode =?= 34 && HoldReasonSubCode =?= 0 "
1706 "|| HoldReasonCode =?= 3 && HoldReasonSubCode =?= 34)"
1707 )
1709 expr = f"{was_mem_exceeded} && ({is_retry_disallowed} || {was_limit_reached})"
1710 return expr
1713def _create_request_memory_expr(memory, multiplier, limit):
1714 """Construct an HTCondor ClassAd expression for safe memory scaling.
1716 Parameters
1717 ----------
1718 memory : `int`
1719 Requested memory in MB.
1720 multiplier : `float`
1721 Memory growth rate between retires.
1722 limit : `int`
1723 Memory limit.
1725 Returns
1726 -------
1727 expr : `str`
1728 A string representing an HTCondor ClassAd expression enabling safe
1729 memory scaling between job retries.
1730 """
1731 # The check if the job was held due to exceeding memory requirements
1732 # will be made *after* job was released back to the job queue (is in
1733 # the IDLE state), hence the need to use `Last*` job ClassAds instead of
1734 # the ones describing job's current state.
1735 #
1736 # Also, 'Last*' job ClassAds attributes are UNDEFINED when a job is
1737 # initially put in the job queue. The special comparison operators ensure
1738 # that all comparisons below will evaluate to FALSE in this case.
1739 was_mem_exceeded = (
1740 "LastJobStatus =?= 5 "
1741 "&& (LastHoldReasonCode =?= 34 && LastHoldReasonSubCode =?= 0 "
1742 "|| LastHoldReasonCode =?= 3 && LastHoldReasonSubCode =?= 34)"
1743 )
1745 # If job runs the first time or was held for reasons other than exceeding
1746 # the memory, set the required memory to the requested value or use
1747 # the memory value measured by HTCondor (MemoryUsage) depending on
1748 # whichever is greater.
1749 expr = (
1750 f"({was_mem_exceeded}) "
1751 f"? min({{int({memory} * pow({multiplier}, NumJobStarts)), {limit}}}) "
1752 f": max({{{memory}, MemoryUsage ?: 0}})"
1753 )
1754 return expr
1757def _locate_schedds(locate_all=False):
1758 """Find out Scheduler daemons in an HTCondor pool.
1760 Parameters
1761 ----------
1762 locate_all : `bool`, optional
1763 If True, all available schedulers in the HTCondor pool will be located.
1764 False by default which means that the search will be limited to looking
1765 for the Scheduler running on a local host.
1767 Returns
1768 -------
1769 schedds : `dict` [`str`, `htcondor.Schedd`]
1770 A mapping between Scheduler names and Python objects allowing for
1771 interacting with them.
1772 """
1773 coll = htcondor.Collector()
1775 schedd_ads = []
1776 if locate_all:
1777 schedd_ads.extend(coll.locateAll(htcondor.DaemonTypes.Schedd))
1778 else:
1779 schedd_ads.append(coll.locate(htcondor.DaemonTypes.Schedd))
1780 return {ad["Name"]: htcondor.Schedd(ad) for ad in schedd_ads}
1783def _gather_site_values(config, compute_site):
1784 """Gather values specific to given site.
1786 Parameters
1787 ----------
1788 config : `lsst.ctrl.bps.BpsConfig`
1789 BPS configuration that includes necessary submit/runtime
1790 information.
1791 compute_site : `str`
1792 Compute site name.
1794 Returns
1795 -------
1796 site_values : `dict` [`str`, `Any`]
1797 Values specific to the given site.
1798 """
1799 site_values = {"attrs": {}, "profile": {}}
1800 search_opts = {}
1801 if compute_site:
1802 search_opts["curvals"] = {"curr_site": compute_site}
1804 # Determine the hard limit for the memory requirement.
1805 found, limit = config.search("memoryLimit", opt=search_opts)
1806 if not found:
1807 search_opts["default"] = DEFAULT_HTC_EXEC_PATT
1808 _, patt = config.search("executeMachinesPattern", opt=search_opts)
1809 del search_opts["default"]
1811 # To reduce the amount of data, ignore dynamic slots (if any) as,
1812 # by definition, they cannot have more memory than
1813 # the partitionable slot they are the part of.
1814 constraint = f'SlotType != "Dynamic" && regexp("{patt}", Machine)'
1815 pool_info = condor_status(constraint=constraint)
1816 try:
1817 limit = max(int(info["TotalSlotMemory"]) for info in pool_info.values())
1818 except ValueError:
1819 _LOG.debug("No execute machine in the pool matches %s", patt)
1820 if limit:
1821 config[".bps_defined.memory_limit"] = limit
1823 _, site_values["bpsUseShared"] = config.search("bpsUseShared", opt={"default": False})
1824 site_values["memoryLimit"] = limit
1826 found, value = config.search("accountingGroup", opt=search_opts)
1827 if found:
1828 site_values["accountingGroup"] = value
1829 found, value = config.search("accountingUser", opt=search_opts)
1830 if found:
1831 site_values["accountingUser"] = value
1833 key = f".site.{compute_site}.profile.condor"
1834 if key in config:
1835 for key, val in config[key].items():
1836 if key.startswith("+"):
1837 site_values["attrs"][key[1:]] = val
1838 else:
1839 site_values["profile"][key] = val
1841 return site_values