lsst.ctrl.pool  16.0+10
parallel.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 
3 from __future__ import print_function
4 from builtins import object
5 
6 import re
7 import os
8 import os.path
9 import stat
10 import sys
11 import pipes
12 import tempfile
13 import argparse
14 import traceback
15 import contextlib
16 from lsst.pipe.base import CmdLineTask, TaskRunner
17 from .pool import startPool, Pool, NODE, abortOnError, setBatchType
18 from . import log # register pickle functions for log
19 
20 __all__ = ["Batch", "PbsBatch", "SlurmBatch", "SmpBatch", "BATCH_TYPES", "BatchArgumentParser",
21  "BatchCmdLineTask", "BatchPoolTask", ]
22 
23 UMASK = 0o002 # umask to set
24 
25 # Functions to convert a list of arguments to a quoted shell command, provided by Dave Abrahams
26 # http://stackoverflow.com/questions/967443/python-module-to-shellquote-unshellquote
27 _quote_pos = re.compile('(?=[^-0-9a-zA-Z_./\n])')
28 
29 
30 def shQuote(arg):
31  r"""Quote the argument for the shell.
32 
33  >>> quote('\t')
34  '\\\t'
35  >>> quote('foo bar')
36  'foo\\ bar'
37  """
38  # This is the logic emacs uses
39  if arg:
40  return _quote_pos.sub('\\\\', arg).replace('\n', "'\n'")
41  else:
42  return "''"
43 
44 
46  """Convert a list of shell arguments to a shell command-line"""
47  return ' '.join([shQuote(a) for a in args])
48 
49 
51  """Collect Linux-specific process statistics
52 
53  Parses the /proc/self/status file (N.B. Linux-specific!) into a dict
54  which is returned.
55  """
56  result = {}
57  with open("/proc/self/status") as f:
58  for line in f:
59  key, _, value = line.partition(":")
60  result[key] = value.strip()
61  return result
62 
63 
65  """Print the process statistics to the log"""
66  from lsst.log import Log
67  log = Log.getDefaultLogger()
68  log.info("Process stats for %s: %s" % (NODE, processStats()))
69 
70 
71 class Batch(object):
72  """Base class for batch submission"""
73 
74  def __init__(self, outputDir=None, numNodes=0, numProcsPerNode=0, numCores=0, queue=None, jobName=None,
75  walltime=0.0, dryrun=False, doExec=False, mpiexec="", submit=None, options=None,
76  verbose=False):
77  """!Constructor
78 
79  @param outputDir: output directory, or None
80  @param numNodes: number of nodes
81  @param numProcsPerNode: number of processors per node
82  @param numCores: number of cores (Slurm, SMP only)
83  @param queue: name of queue, or None
84  @param jobName: name of job, or None
85  @param walltime: maximum wall clock time for job
86  @param dryrun: Dry run (only print actions that would be taken)?
87  @param doExec: exec the script instead of submitting to batch system?
88  @param mpiexec: options for mpiexec
89  @param submit: command-line options for batch submission (e.g., for qsub, sbatch)
90  @param options: options to append to script header (e.g., #PBS or #SBATCH)
91  @param verbose: produce verbose output?
92  """
93  if (numNodes <= 0 or numProcsPerNode <= 0) and numCores <= 0:
94  raise RuntimeError("Must specify numNodes+numProcs or numCores")
95 
96  self.outputDir = outputDir
97  self.numNodes = numNodes
98  self.numProcsPerNode = numProcsPerNode
99  self.numCores = numCores
100  self.queue = queue
101  self.jobName = jobName
102  self.walltime = walltime
103  self.dryrun = dryrun
104  self.doExec = doExec
105  self.mpiexec = mpiexec
106  self.submit = submit
107  self.options = options
108  self.verbose = verbose
109 
110  def shebang(self):
111  return "#!/bin/bash"
112 
113  def preamble(self, command, walltime=None):
114  """Return preamble string for script to be submitted
115 
116  Most batch systems allow you to embed submission options as comments here.
117  """
118  raise NotImplementedError("Not implemented for base class")
119 
120  def execution(self, command):
121  """Return execution string for script to be submitted"""
122  script = [exportEnv(),
123  "umask %03o" % UMASK,
124  "cd %s" % pipes.quote(os.getcwd()),
125  ]
126  if self.verbose:
127  script += ["echo \"mpiexec is at: $(which mpiexec)\"",
128  "ulimit -a",
129  "echo 'umask: ' $(umask)",
130  "eups list -s",
131  "export",
132  "date",
133  ]
134  script += ["mpiexec %s %s" % (self.mpiexec, command)]
135  if self.verbose:
136  script += ["date",
137  "echo Done.",
138  ]
139  return "\n".join(script)
140 
141  def createScript(self, command, walltime=None):
142  """!Create script to be submitted
143 
144  @param command: command to run
145  @param walltime: maximum wall clock time, overrides value to constructor
146  @return name of script on filesystem
147  """
148  fd, scriptName = tempfile.mkstemp()
149  with os.fdopen(fd, "w") as f:
150  f.write(self.shebang())
151  f.write('\n')
152  f.write(self.preamble(walltime))
153  f.write('\n')
154  f.write(self.execution(command))
155  f.write('\n')
156 
157  os.chmod(scriptName, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
158  return scriptName
159 
160  def submitCommand(self, scriptName):
161  """!Return command to submit script
162 
163  @param scriptName: name of script on filesystem
164  """
165  raise NotImplementedError("No implementation for base class")
166 
167  def run(self, command, walltime=None):
168  """!Run the batch system
169 
170  Creates and submits the script to execute the provided command
171 
172  @param command: command to run
173  @param walltime: maximum wall clock time, overrides value to constructor
174  @return name of script on filesystem
175  """
176  scriptName = self.createScript(command, walltime=walltime)
177  command = self.submitCommand(scriptName)
178  if self.dryrun:
179  print("Would run: %s" % command)
180  elif self.doExec:
181  os.execl(scriptName, scriptName)
182  else:
183  os.system(command)
184  return scriptName
185 
186 
188  """Batch submission with PBS"""
189 
190  def preamble(self, walltime=None):
191  if walltime is None:
192  walltime = self.walltime
193  if walltime <= 0:
194  raise RuntimeError("Non-positive walltime: %s (did you forget '--time'?)" % (walltime,))
195  if self.numNodes <= 0 or self.numProcsPerNode <= 0:
196  raise RuntimeError(
197  "Number of nodes (--nodes=%d) or number of processors per node (--procs=%d) not set" %
198  (self.numNodes, self.numProcsPerNode))
199  if self.numCores > 0:
200  raise RuntimeError("PBS does not support setting the number of cores")
201  return "\n".join([
202  "#PBS %s" % self.options if self.options is not None else "",
203  "#PBS -l nodes=%d:ppn=%d" % (self.numNodes, self.numProcsPerNode),
204  "#PBS -l walltime=%d" % walltime if walltime is not None else "",
205  "#PBS -o %s" % self.outputDir if self.outputDir is not None else "",
206  "#PBS -N %s" % self.jobName if self.jobName is not None else "",
207  "#PBS -q %s" % self.queue if self.queue is not None else "",
208  "#PBS -j oe",
209  "#PBS -W umask=%03o" % UMASK,
210  ])
211 
212  def submitCommand(self, scriptName):
213  return "qsub %s -V %s" % (self.submit if self.submit is not None else "", scriptName)
214 
215 
217  """Batch submission with Slurm"""
218 
219  @staticmethod
220  def formatWalltime(walltime):
221  """Format walltime (in seconds) as days-hours:minutes"""
222  secInDay = 3600*24
223  secInHour = 3600
224  secInMinute = 60
225  days = walltime//secInDay
226  walltime -= days*secInDay
227  hours = walltime//secInHour
228  walltime -= hours*secInHour
229  minutes = walltime//secInMinute
230  walltime -= minutes*secInMinute
231  if walltime > 0:
232  minutes += 1
233  return "%d-%d:%d" % (days, hours, minutes)
234 
235  def preamble(self, walltime=None):
236  if walltime is None:
237  walltime = self.walltime
238  if walltime <= 0:
239  raise RuntimeError("Non-positive walltime: %s (did you forget '--time'?)" % (walltime,))
240  if (self.numNodes <= 0 or self.numProcsPerNode <= 0) and self.numCores <= 0:
241  raise RuntimeError(
242  "Number of nodes (--nodes=%d) and number of processors per node (--procs=%d) not set OR "
243  "number of cores (--cores=%d) not set" % (self.numNodes, self.numProcsPerNode, self.numCores))
244  if self.numCores > 0 and (self.numNodes > 0 or self.numProcsPerNode > 0):
245  raise RuntimeError("Must set either --nodes,--procs or --cores: not both")
246 
247  outputDir = self.outputDir if self.outputDir is not None else os.getcwd()
248  filename = os.path.join(outputDir, (self.jobName if self.jobName is not None else "slurm") + ".o%j")
249  return "\n".join([("#SBATCH --nodes=%d" % self.numNodes) if self.numNodes > 0 else "",
250  ("#SBATCH --ntasks-per-node=%d" % self.numProcsPerNode) if
251  self.numProcsPerNode > 0 else "",
252  ("#SBATCH --ntasks=%d" % self.numCores) if self.numCores > 0 else "",
253  "#SBATCH --time=%s" % self.formatWalltime(walltime),
254  "#SBATCH --job-name=%s" % self.jobName if self.jobName is not None else "",
255  "#SBATCH -p %s" % self.queue if self.queue is not None else "",
256  "#SBATCH --output=%s" % filename,
257  "#SBATCH --error=%s" % filename,
258  "#SBATCH %s" % self.options if self.options is not None else "",
259  ])
260 
261  def submitCommand(self, scriptName):
262  return "sbatch %s %s" % (self.submit if self.submit is not None else "", scriptName)
263 
264 
266  """Not-really-Batch submission with multiple cores on the current node
267 
268  The job is run immediately.
269  """
270 
271  def __init__(self, *args, **kwargs):
272  super(SmpBatch, self).__init__(*args, **kwargs)
273  if self.numNodes in (0, 1) and self.numProcsPerNode > 0 and self.numCores == 0:
274  # --nodes=1 --procs=NN being used as a synonym for --cores=NN
275  self.numNodes = 0
276  self.numCores = self.numProcsPerNode
278  if self.numNodes > 0 or self.numProcsPerNode > 0:
279  raise RuntimeError("SMP does not support the --nodes and --procs command-line options; "
280  "use --cores to specify the number of cores to use")
281  if self.numCores > 1:
282  self.mpiexec = "%s -n %d" % (self.mpiexec if self.mpiexec is not None else "", self.numCores)
283  else:
284  self.mpiexec = ""
285 
286  def preamble(self, walltime=None):
287  return ""
288 
289  def submitCommand(self, scriptName):
290  return "exec %s" % scriptName
291 
292 
293 BATCH_TYPES = {'none' : None,
294  'None' : None,
295  'pbs': PbsBatch,
296  'slurm': SlurmBatch,
297  'smp': SmpBatch,
298  } # Mapping batch type --> Batch class
299 
300 
301 class BatchArgumentParser(argparse.ArgumentParser):
302  """An argument parser to get relevant parameters for batch submission
303 
304  We want to be able to display the help for a 'parent' ArgumentParser
305  along with the batch-specific options we introduce in this class, but
306  we don't want to swallow the parent (i.e., ArgumentParser(parents=[parent]))
307  because we want to save the list of arguments that this particular
308  BatchArgumentParser doesn't parse, so they can be passed on to a different
309  program (though we also want to parse them to check that they can be parsed).
310  """
311 
312  def __init__(self, parent=None, *args, **kwargs):
313  super(BatchArgumentParser, self).__init__(*args, **kwargs)
314  self._parent = parent
315  group = self.add_argument_group("Batch submission options")
316  group.add_argument("--queue", help="Queue name")
317  group.add_argument("--job", help="Job name")
318  group.add_argument("--nodes", type=int, default=0, help="Number of nodes")
319  group.add_argument("--procs", type=int, default=0, help="Number of processors per node")
320  group.add_argument("--cores", type=int, default=0, help="Number of cores (Slurm/SMP only)")
321  group.add_argument("--time", type=float, default=0,
322  help="Expected execution time per element (sec)")
323  group.add_argument("--batch-type", dest="batchType", choices=list(BATCH_TYPES.keys()), default="smp",
324  help="Batch system to use")
325  group.add_argument("--batch-verbose", dest="batchVerbose", action="store_true", default=False,
326  help=("Enable verbose output in batch script "
327  "(including system environment information at batch start)?"))
328  group.add_argument("--batch-output", dest="batchOutput", help="Output directory")
329  group.add_argument("--batch-submit", dest="batchSubmit", help="Batch submission command-line flags")
330  group.add_argument("--batch-options", dest="batchOptions", help="Header options for batch script")
331  group.add_argument("--batch-profile", dest="batchProfile", action="store_true", default=False,
332  help="Enable profiling on batch job?")
333  group.add_argument("--batch-stats", dest="batchStats", action="store_true", default=False,
334  help="Print process stats on completion (Linux only)?")
335  group.add_argument("--dry-run", dest="dryrun", default=False, action="store_true",
336  help="Dry run?")
337  group.add_argument("--do-exec", dest="doExec", default=False, action="store_true",
338  help="Exec script instead of submit to batch system?")
339  group.add_argument("--mpiexec", default="", help="mpiexec options")
340 
341  def parse_args(self, config=None, args=None, namespace=None, **kwargs):
342  args, leftover = super(BatchArgumentParser, self).parse_known_args(args=args, namespace=namespace)
343  args.parent = None
344  args.leftover = None
345  if len(leftover) > 0:
346  # Save any leftovers for the parent
347  if self._parent is None:
348  self.error("Unrecognised arguments: %s" % leftover)
349  args.parent = self._parent.parse_args(config, args=leftover, **kwargs)
350  args.leftover = leftover
351  args.batch = self.makeBatch(args)
352  return args
353 
354  def makeBatch(self, args):
355  """Create a Batch object from the command-line arguments"""
356  # argMapping is a dict that maps Batch init kwarg names to parsed arguments attribute *names*
357  argMapping = {'outputDir': 'batchOutput',
358  'numNodes': 'nodes',
359  'numProcsPerNode': 'procs',
360  'numCores': 'cores',
361  'walltime': 'time',
362  'queue': 'queue',
363  'jobName': 'job',
364  'dryrun': 'dryrun',
365  'doExec': 'doExec',
366  'mpiexec': 'mpiexec',
367  'submit': 'batchSubmit',
368  'options': 'batchOptions',
369  'verbose': 'batchVerbose',
370  }
371 
372  if BATCH_TYPES[args.batchType] is None:
373  return None
374 
375  # kwargs is a dict that maps Batch init kwarg names to parsed arguments attribute *values*
376  kwargs = {k: getattr(args, v) for k, v in argMapping.items()}
377  return BATCH_TYPES[args.batchType](**kwargs)
378 
379  def format_help(self):
380  text = """This is a script for queue submission of a wrapped script.
381 
382 Use this program name and ignore that for the wrapped script (it will be
383 passed on to the batch system). Arguments for *both* this wrapper script or the
384 wrapped script are valid (if it is required for the wrapped script, it
385 is required for the wrapper as well).
386 
387 *** Batch system submission wrapper:
388 
389 """
390  text += super(BatchArgumentParser, self).format_help()
391  if self._parent is not None:
392  text += """
393 
394 *** Wrapped script:
395 
396 """
397  text += self._parent.format_help()
398  return text
399 
400  def format_usage(self):
401  if self._parent is not None:
402  prog = self._parent.prog
403  self._parent.prog = self.prog
404  usage = self._parent.format_usage()
405  self._parent.prog = prog
406  return usage
407  return super(BatchArgumentParser, self).format_usage()
408 
409 
410 def exportEnv():
411  """Generate bash script to regenerate the current environment"""
412  output = ""
413  for key, val in os.environ.items():
414  if key in ("DISPLAY",):
415  continue
416  if val.startswith("() {"):
417  # This is a function.
418  # "Two parentheses, a single space, and a brace"
419  # is exactly the same criterion as bash uses.
420 
421  # From 2014-09-25, the function name is prefixed by 'BASH_FUNC_'
422  # and suffixed by '()', which we have to remove.
423  if key.startswith("BASH_FUNC_") and key.endswith("()"):
424  key = key[10:-2]
425 
426  output += "{key} {val}\nexport -f {key}\n".format(key=key, val=val)
427  else:
428  # This is a variable.
429  output += "export {key}='{val}'\n".format(key=key, val=val.replace("'", "'\"'\"'"))
430  return output
431 
432 
433 class BatchCmdLineTask(CmdLineTask):
434 
435  @classmethod
436  def parseAndSubmit(cls, args=None, **kwargs):
437  taskParser = cls._makeArgumentParser(doBatch=True, add_help=False)
438  batchParser = BatchArgumentParser(parent=taskParser)
439  batchArgs = batchParser.parse_args(config=cls.ConfigClass(), args=args, override=cls.applyOverrides,
440  **kwargs)
441 
442  if not cls.RunnerClass(cls, batchArgs.parent).precall(batchArgs.parent): # Write config, schema
443  taskParser.error("Error in task preparation")
444 
445  setBatchType(batchArgs.batch)
446 
447  if batchArgs.batch is None: # don't use a batch system
448  sys.argv = [sys.argv[0]] + batchArgs.leftover # Remove all batch arguments
449 
450  return cls.parseAndRun()
451  else:
452  numCores = batchArgs.cores if batchArgs.cores > 0 else batchArgs.nodes*batchArgs.procs
453  walltime = cls.batchWallTime(batchArgs.time, batchArgs.parent, numCores)
454 
455  command = cls.batchCommand(batchArgs)
456  batchArgs.batch.run(command, walltime=walltime)
457 
458  @classmethod
459  def batchWallTime(cls, time, parsedCmd, numCores):
460  """!Return walltime request for batch job
461 
462  Subclasses should override if the walltime should be calculated
463  differently (e.g., addition of some serial time).
464 
465  @param cls: Class
466  @param time: Requested time per iteration
467  @param parsedCmd: Results of argument parsing
468  @param numCores: Number of cores
469  """
470  numTargets = len(cls.RunnerClass.getTargetList(parsedCmd))
471  return time*numTargets/float(numCores)
472 
473  @classmethod
474  def batchCommand(cls, args):
475  """!Return command to run CmdLineTask
476 
477  @param cls: Class
478  @param args: Parsed batch job arguments (from BatchArgumentParser)
479  """
480  job = args.job if args.job is not None else "job"
481  module = cls.__module__
482  script = ("import os; os.umask(%#05o); " +
483  "import lsst.base; lsst.base.disableImplicitThreading(); " +
484  "import lsst.ctrl.pool.log; lsst.ctrl.pool.log.jobLog(\"%s\"); ") % (UMASK, job)
485 
486  if args.batchStats:
487  script += ("import lsst.ctrl.pool.parallel; import atexit; " +
488  "atexit.register(lsst.ctrl.pool.parallel.printProcessStats); ")
489 
490  script += "import %s; %s.%s.parseAndRun();" % (module, module, cls.__name__)
491 
492  profilePre = "import cProfile; import os; cProfile.run(\"\"\""
493  profilePost = "\"\"\", filename=\"profile-" + job + "-%s-%d.dat\" % (os.uname()[1], os.getpid()))"
494 
495  return ("python -c '" + (profilePre if args.batchProfile else "") + script +
496  (profilePost if args.batchProfile else "") + "' " + shCommandFromArgs(args.leftover) +
497  " --noExit")
498 
499  @contextlib.contextmanager
500  def logOperation(self, operation, catch=False, trace=True):
501  """!Provide a context manager for logging an operation
502 
503  @param operation: description of operation (string)
504  @param catch: Catch all exceptions?
505  @param trace: Log a traceback of caught exception?
506 
507  Note that if 'catch' is True, all exceptions are swallowed, but there may
508  be other side-effects such as undefined variables.
509  """
510  self.log.info("%s: Start %s" % (NODE, operation))
511  try:
512  yield
513  except:
514  if catch:
515  cls, e, _ = sys.exc_info()
516  self.log.warn("%s: Caught %s while %s: %s" % (NODE, cls.__name__, operation, e))
517  if trace:
518  self.log.info("%s: Traceback:\n%s" % (NODE, traceback.format_exc()))
519  return
520  raise
521  finally:
522  self.log.info("%s: Finished %s" % (NODE, operation))
523 
524 
526  """Starts a BatchCmdLineTask with an MPI process pool
527 
528  Use this subclass of BatchCmdLineTask if you want to use the Pool directly.
529  """
530  @classmethod
531  @abortOnError
532  def parseAndRun(cls, *args, **kwargs):
533  """Run with a MPI process pool"""
534  pool = startPool()
535  super(BatchPoolTask, cls).parseAndRun(*args, **kwargs)
536  pool.exit()
537 
538 
539 class BatchTaskRunner(TaskRunner):
540  """Run a Task individually on a list of inputs using the MPI process pool"""
541 
542  def __init__(self, *args, **kwargs):
543  """Constructor
544 
545  Warn if the user specified multiprocessing.
546  """
547  TaskRunner.__init__(self, *args, **kwargs)
548  if self.numProcesses > 1:
549  self.log.warn("Multiprocessing arguments (-j %d) ignored since using batch processing" %
550  self.numProcesses)
551  self.numProcesses = 1
552 
553  def run(self, parsedCmd):
554  """Run the task on all targets
555 
556  Sole input is the result of parsing the command-line with the ArgumentParser.
557 
558  Output is None if 'precall' failed; otherwise it is a list of calling ourself
559  on each element of the target list from the 'getTargetList' method.
560  """
561  resultList = None
562 
563  import multiprocessing
564  self.prepareForMultiProcessing()
565  pool = Pool()
566 
567  if self.precall(parsedCmd):
568  targetList = self.getTargetList(parsedCmd)
569  if len(targetList) > 0:
570  parsedCmd.log.info("Processing %d targets with a pool of %d processes..." %
571  (len(targetList), pool.size))
572  # Run the task using self.__call__
573  resultList = pool.map(self, targetList)
574  else:
575  parsedCmd.log.warn("Not running the task because there is no data to process; "
576  "you may preview data using \"--show data\"")
577  resultList = []
578 
579  return resultList
580 
581  @abortOnError
582  def __call__(self, cache, args):
583  """Run the Task on a single target
584 
585  Strips out the process pool 'cache' argument.
586 
587  'args' are those arguments provided by the getTargetList method.
588 
589  Brings down the entire job if an exception is not caught (i.e., --doraise).
590  """
591  return TaskRunner.__call__(self, args)
592 
593 
595  """Runs the BatchCmdLineTask in parallel
596 
597  Use this subclass of BatchCmdLineTask if you don't need to use the Pool
598  directly, but just want to iterate over many objects (like a multi-node
599  version of the '-j' command-line argument).
600  """
601  RunnerClass = BatchTaskRunner
602 
603  @classmethod
604  def _makeArgumentParser(cls, *args, **kwargs):
605  """Build an ArgumentParser
606 
607  Removes the batch-specific parts in order to delegate to the parent classes.
608  """
609  kwargs.pop("doBatch", False)
610  kwargs.pop("add_help", False)
611  return super(BatchCmdLineTask, cls)._makeArgumentParser(*args, **kwargs)
612 
613  @classmethod
614  def parseAndRun(cls, *args, **kwargs):
615  """Parse an argument list and run the command
616 
617  This is the entry point when we run in earnest, so start the process pool
618  so that the worker nodes don't go any further.
619  """
620  pool = startPool()
621  results = super(BatchParallelTask, cls).parseAndRun(*args, **kwargs)
622  pool.exit()
623  return results
def parseAndRun(cls, args, kwargs)
Definition: parallel.py:532
def __call__(self, cache, args)
Definition: parallel.py:582
def preamble(self, walltime=None)
Definition: parallel.py:286
def parseAndRun(cls, args, kwargs)
Definition: parallel.py:614
def batchCommand(cls, args)
Return command to run CmdLineTask.
Definition: parallel.py:474
def __init__(self, outputDir=None, numNodes=0, numProcsPerNode=0, numCores=0, queue=None, jobName=None, walltime=0.0, dryrun=False, doExec=False, mpiexec="", submit=None, options=None, verbose=False)
Constructor.
Definition: parallel.py:76
def __init__(self, parent=None, args, kwargs)
Definition: parallel.py:312
def preamble(self, walltime=None)
Definition: parallel.py:235
def submitCommand(self, scriptName)
Definition: parallel.py:289
def startPool(comm=None, root=0, killSlaves=True)
Start a process pool.
Definition: pool.py:1216
def parseAndSubmit(cls, args=None, kwargs)
Definition: parallel.py:436
def createScript(self, command, walltime=None)
Create script to be submitted.
Definition: parallel.py:141
def __init__(self, args, kwargs)
Definition: parallel.py:271
def preamble(self, walltime=None)
Definition: parallel.py:190
def submitCommand(self, scriptName)
Definition: parallel.py:261
def parse_args(self, config=None, args=None, namespace=None, kwargs)
Definition: parallel.py:341
def submitCommand(self, scriptName)
Return command to submit script.
Definition: parallel.py:160
def batchWallTime(cls, time, parsedCmd, numCores)
Return walltime request for batch job.
Definition: parallel.py:459
def preamble(self, command, walltime=None)
Definition: parallel.py:113
def submitCommand(self, scriptName)
Definition: parallel.py:212
def setBatchType(batchType)
Definition: pool.py:103
def logOperation(self, operation, catch=False, trace=True)
Provide a context manager for logging an operation.
Definition: parallel.py:500
def __init__(self, args, kwargs)
Definition: parallel.py:542
def run(self, command, walltime=None)
Run the batch system.
Definition: parallel.py:167
def execution(self, command)
Definition: parallel.py:120
def shCommandFromArgs(args)
Definition: parallel.py:45