22 from __future__
import absolute_import, division, print_function
35 from builtins
import zip
36 from builtins
import str
37 from builtins
import range
38 from builtins
import object
45 from future.utils
import with_metaclass
47 __all__ = [
"ArgumentParser",
"ConfigFileAction",
"ConfigValueAction",
"DataIdContainer",
48 "DatasetArgument",
"ConfigDatasetType",
"InputOnlyArgumentParser"]
50 DEFAULT_INPUT_NAME =
"PIPE_INPUT_ROOT" 51 DEFAULT_CALIB_NAME =
"PIPE_CALIB_ROOT" 52 DEFAULT_OUTPUT_NAME =
"PIPE_OUTPUT_ROOT" 55 def _fixPath(defName, path):
56 """Apply environment variable as default root, if present, and abspath. 61 Name of environment variable containing default root path; if the environment variable does not exist 62 then the path is relative to the current working directory 64 Path relative to default root path. 69 Path that has been expanded, or `None` if the environment variable does not exist and path is `None`. 71 defRoot = os.environ.get(defName)
75 return os.path.abspath(path)
76 return os.path.abspath(os.path.join(defRoot, path
or ""))
80 """Container for data IDs and associated data references. 89 Override for data IDs that require special handling to be converted to ``data references``, and specify 90 the override class as ``ContainerClass`` for ``add_id_argument``. (If you don't want the argument parser 91 to compute data references, you may use this class and specify ``doMakeDataRefList=False`` in 102 """Set actual dataset type, once it is known. 112 """Validate data IDs and cast them to the correct type (modify idList in place). 116 butler : `lsst.daf.persistence.Butler` 120 raise RuntimeError(
"Must call setDatasetType first")
122 idKeyTypeDict = butler.getKeys(datasetType=self.
datasetType, level=self.
level)
124 raise KeyError(
"Cannot get keys for datasetType %s at level %s" % (self.
datasetType, self.
level))
126 for dataDict
in self.
idList:
127 for key, strVal
in dataDict.items():
129 keyType = idKeyTypeDict[key]
134 log = lsstLog.Log.getDefaultLogger()
135 log.warn(
"Unexpected ID %s; guessing type is \"%s\"" %
136 (key,
'str' if keyType == str
else keyType))
137 idKeyTypeDict[key] = keyType
141 castVal = keyType(strVal)
143 raise TypeError(
"Cannot cast value %r to %s for ID key %r" % (strVal, keyType, key,))
144 dataDict[key] = castVal
147 """Compute refList based on idList. 152 Results of parsing command-line (with ``butler`` and ``log`` elements). 156 Not called if ``add_id_argument`` called with ``doMakeDataRefList=False``. 159 raise RuntimeError(
"Must call setDatasetType first")
160 butler = namespace.butler
161 for dataId
in self.
idList:
162 refList = dafPersist.searchDataRefs(butler, datasetType=self.
datasetType,
163 level=self.
level, dataId=dataId)
165 namespace.log.warn(
"No data found for dataId=%s", dataId)
171 """data ID argument, used by `ArgumentParser.add_id_argument`. 176 Name of identifier (argument name without dashes). 178 Type of dataset; specify a string for a fixed dataset type or a `DatasetArgument` for a dynamic 179 dataset type (e.g. one specified by a command-line argument). 181 Level of dataset, for `~lsst.daf.persistence.Butler`. 182 doMakeDataRefList : `bool`, optional 183 If `True` (default), construct data references. 184 ContainerClass : class, optional 185 Class to contain data IDs and data references; the default class will work for many kinds of data, 186 but you may have to override to compute some kinds of data references. Default is `DataIdContainer`. 189 def __init__(self, name, datasetType, level, doMakeDataRefList=True, ContainerClass=DataIdContainer):
190 if name.startswith(
"-"):
191 raise RuntimeError(
"Name %s must not start with -" % (name,))
201 """`True` if the dataset type is dynamic (that is, specified on the command line).""" 202 return isinstance(self.
datasetType, DynamicDatasetType)
205 """Get the dataset type as a string. 224 """Abstract base class for a dataset type determined from parsed command-line arguments. 228 """Add a command-line argument to specify dataset type name, if wanted. 232 parser : `ArgumentParser` 233 Argument parser to add the argument to. 235 Name of data ID argument, without the leading ``"--"``, e.g. ``"id"``. 239 The default implementation does nothing 245 """Get the dataset type as a string, based on parsed command-line arguments. 252 raise NotImplementedError(
"Subclasses must override")
256 """Dataset type specified by a command-line argument. 260 name : `str`, optional 261 Name of command-line argument (including leading "--", if appropriate) whose value is the dataset 262 type. If `None`, uses ``--idName_dstype`` where idName is the name of the data ID argument (e.g. 264 help : `str`, optional 265 Help string for the command-line argument. 266 default : obj, optional 267 Default value. If `None`, then the command-line option is required. This argument isignored if the 268 command-line argument is positional (name does not start with "-") because positional arguments do 269 not support default values. 274 help="dataset type to process from input data repository",
277 DynamicDatasetType.__init__(self)
283 """Get the dataset type as a string, from the appropriate command-line argument. 295 argName = self.
name.lstrip(
"-")
296 return getattr(namespace, argName)
299 """Add a command-line argument to specify the dataset type name. 303 parser : `ArgumentParser` 310 Also sets the `name` attribute if it is currently `None`. 312 help = self.
help if self.
help else "dataset type for %s" % (idName,)
313 if self.
name is None:
314 self.
name =
"--%s_dstype" % (idName,)
315 requiredDict = dict()
316 if self.
name.startswith(
"-"):
317 requiredDict = dict(required=self.
default is None)
326 """Dataset type specified by a config parameter. 331 Name of config option whose value is the dataset type. 335 DynamicDatasetType.__init__(self)
339 """Return the dataset type as a string, from the appropriate config field 343 namespace : `argparse.Namespace` 348 keyList = self.
name.split(
".")
349 value = namespace.config
352 value = getattr(value, key)
354 raise RuntimeError(
"Cannot find config parameter %r" % (self.
name,))
359 """Argument parser for command-line tasks that is based on `argparse.ArgumentParser`. 364 Name of top-level task; used to identify camera-specific override files. 365 usage : `str`, optional 366 Command-line usage signature. 368 Additional keyword arguments for `argparse.ArgumentParser`. 372 Users may wish to add additional arguments before calling `parse_args`. 380 """Require an output directory to be specified (`bool`).""" 382 def __init__(self, name, usage="%(prog)s input [options]
", **kwargs): 385 argparse.ArgumentParser.__init__(self,
387 fromfile_prefix_chars=
'@',
388 epilog=textwrap.dedent(
"""Notes: 389 * --config, --configfile, --id, --loglevel and @file may appear multiple times; 390 all values are used, in order left to right 391 * @file reads command-line options from the specified file: 392 * data may be distributed among multiple lines (e.g. one option per line) 393 * data after # is treated as a comment and ignored 394 * blank lines and lines starting with # are ignored 395 * To specify multiple values for an option, do not use = after the option name: 396 * right: --configfile foo bar 397 * wrong: --configfile=foo bar 399 formatter_class=argparse.RawDescriptionHelpFormatter,
401 self.add_argument(metavar=
'input', dest=
"rawInput",
402 help=
"path to input data repository, relative to $%s" % (DEFAULT_INPUT_NAME,))
403 self.add_argument(
"--calib", dest=
"rawCalib",
404 help=
"path to input calibration repository, relative to $%s" %
405 (DEFAULT_CALIB_NAME,))
406 self.add_argument(
"--output", dest=
"rawOutput",
407 help=
"path to output data repository (need not exist), relative to $%s" %
408 (DEFAULT_OUTPUT_NAME,))
409 self.add_argument(
"--rerun", dest=
"rawRerun", metavar=
"[INPUT:]OUTPUT",
410 help=
"rerun name: sets OUTPUT to ROOT/rerun/OUTPUT; " 411 "optionally sets ROOT to ROOT/rerun/INPUT")
412 self.add_argument(
"-c",
"--config", nargs=
"*", action=ConfigValueAction,
413 help=
"config override(s), e.g. -c foo=newfoo bar.baz=3", metavar=
"NAME=VALUE")
414 self.add_argument(
"-C",
"--configfile", dest=
"configfile", nargs=
"*", action=ConfigFileAction,
415 help=
"config override file(s)")
416 self.add_argument(
"-L",
"--loglevel", nargs=
"*", action=LogLevelAction,
417 help=
"logging level; supported levels are [trace|debug|info|warn|error|fatal]",
418 metavar=
"LEVEL|COMPONENT=LEVEL")
419 self.add_argument(
"--longlog", action=
"store_true", help=
"use a more verbose format for the logging")
420 self.add_argument(
"--debug", action=
"store_true", help=
"enable debugging output?")
421 self.add_argument(
"--doraise", action=
"store_true",
422 help=
"raise an exception on error (else log a message and continue)?")
423 self.add_argument(
"--noExit", action=
"store_true",
424 help=
"Do not exit even upon failure (i.e. return a struct to the calling script)")
425 self.add_argument(
"--profile", help=
"Dump cProfile statistics to filename")
426 self.add_argument(
"--show", nargs=
"+", default=(),
427 help=
"display the specified information to stdout and quit " 428 "(unless run is specified).")
429 self.add_argument(
"-j",
"--processes", type=int, default=1, help=
"Number of processes to use")
430 self.add_argument(
"-t",
"--timeout", type=float,
431 help=
"Timeout for multiprocessing; maximum wall time (sec)")
432 self.add_argument(
"--clobber-output", action=
"store_true", dest=
"clobberOutput", default=
False,
433 help=(
"remove and re-create the output directory if it already exists " 434 "(safe with -j, but not all other forms of parallel execution)"))
435 self.add_argument(
"--clobber-config", action=
"store_true", dest=
"clobberConfig", default=
False,
436 help=(
"backup and then overwrite existing config files instead of checking them " 437 "(safe with -j, but not all other forms of parallel execution)"))
438 self.add_argument(
"--no-backup-config", action=
"store_true", dest=
"noBackupConfig", default=
False,
439 help=
"Don't copy config to file~N backup.")
440 self.add_argument(
"--clobber-versions", action=
"store_true", dest=
"clobberVersions", default=
False,
441 help=(
"backup and then overwrite existing package versions instead of checking" 442 "them (safe with -j, but not all other forms of parallel execution)"))
443 self.add_argument(
"--no-versions", action=
"store_true", dest=
"noVersions", default=
False,
444 help=
"don't check package versions; useful for development")
445 lsstLog.configure_prop(
""" 446 log4j.rootLogger=INFO, A1 447 log4j.appender.A1=ConsoleAppender 448 log4j.appender.A1.Target=System.out 449 log4j.appender.A1.layout=PatternLayout 450 log4j.appender.A1.layout.ConversionPattern=%c %p: %m%n 453 def add_id_argument(self, name, datasetType, help, level=None, doMakeDataRefList=True,
454 ContainerClass=DataIdContainer):
455 """Add a data ID argument. 461 Data ID argument (including leading dashes, if wanted). 462 datasetType : `str` or `DynamicDatasetType`-type 463 Type of dataset. Supply a string for a fixed dataset type. For a dynamically determined dataset 464 type, supply a `DynamicDatasetType`, such a `DatasetArgument`. 466 Help string for the argument. 467 level : object, optional 468 Level of dataset, for the `~lsst.daf.persistence.Butler`. 469 doMakeDataRefList : bool, optional 470 If `True` (default), construct data references. 471 ContainerClass : class, optional 472 Data ID container class to use to contain results; override the default if you need a special 473 means of computing data references from data IDs 477 If ``datasetType`` is an instance of `DatasetArgument`, then add a second argument to specify the 480 The associated data is put into ``namespace.<dataIdArgument.name>`` as an instance of ContainerClass; 481 the container includes fields: 483 - ``idList``: a list of data ID dicts. 484 - ``refList``: a list of `~lsst.daf.persistence.Butler` data references (empty if 485 ``doMakeDataRefList`` is `False`). 487 argName = name.lstrip(
"-")
490 raise RuntimeError(
"Data ID argument %s already exists" % (name,))
491 if argName
in set((
"camera",
"config",
"butler",
"log",
"obsPkg")):
492 raise RuntimeError(
"Data ID argument %s is a reserved name" % (name,))
494 self.add_argument(name, nargs=
"*", action=IdValueAction, help=help,
495 metavar=
"KEY=VALUE1[^VALUE2[^VALUE3...]")
499 datasetType=datasetType,
501 doMakeDataRefList=doMakeDataRefList,
502 ContainerClass=ContainerClass,
505 if dataIdArgument.isDynamicDatasetType:
506 datasetType.addArgument(parser=self, idName=argName)
510 def parse_args(self, config, args=None, log=None, override=None):
511 """Parse arguments for a command-line task. 515 config : `lsst.pex.config.Config` 516 Config for the task being run. 517 args : `list`, optional 518 Argument list; if `None` then ``sys.argv[1:]`` is used. 519 log : `lsst.log.Log`, optional 520 `~lsst.log.Log` instance; if `None` use the default log. 521 override : callable, optional 522 A config override function. It must take the root config object as its only argument and must 523 modify the config in place. This function is called after camera-specific overrides files are 524 applied, and before command-line config overrides are applied (thus allowing the user the final 529 namespace : `argparse.Namespace` 530 A `~argparse.Namespace` instance containing fields: 532 - ``camera``: camera name. 533 - ``config``: the supplied config with all overrides applied, validated and frozen. 534 - ``butler``: a `lsst.daf.persistence.Butler` for the data. 535 - An entry for each of the data ID arguments registered by `add_id_argument`, 536 the value of which is a `~lsst.pipe.base.DataIdArgument` that includes public elements 537 ``idList`` and ``refList``. 538 - ``log``: a `lsst.log` Log. 539 - An entry for each command-line argument, with the following exceptions: 540 - config is the supplied config, suitably updated. 541 - configfile, id and loglevel are all missing. 542 - ``obsPkg``: name of the ``obs_`` package for this camera. 547 if len(args) < 1
or args[0].startswith(
"-")
or args[0].startswith(
"@"):
549 if len(args) == 1
and args[0]
in (
"-h",
"--help"):
552 self.exit(
"%s: error: Must specify input as first argument" % self.prog)
556 namespace = argparse.Namespace()
557 namespace.input = _fixPath(DEFAULT_INPUT_NAME, args[0])
558 if not os.path.isdir(namespace.input):
559 self.error(
"Error: input=%r not found" % (namespace.input,))
561 namespace.config = config
562 namespace.log = log
if log
is not None else lsstLog.Log.getDefaultLogger()
563 mapperClass = dafPersist.Butler.getMapperClass(namespace.input)
564 namespace.camera = mapperClass.getCameraName()
565 namespace.obsPkg = mapperClass.getPackageName()
570 if override
is not None:
571 override(namespace.config)
575 setattr(namespace, dataIdArgument.name, dataIdArgument.ContainerClass(level=dataIdArgument.level))
577 namespace = argparse.ArgumentParser.parse_args(self, args=args, namespace=namespace)
578 del namespace.configfile
582 if namespace.clobberOutput:
583 if namespace.output
is None:
584 self.error(
"--clobber-output is only valid with --output or --rerun")
585 elif namespace.output == namespace.input:
586 self.error(
"--clobber-output is not valid when the output and input repos are the same")
587 if os.path.exists(namespace.output):
588 namespace.log.info(
"Removing output repo %s for --clobber-output", namespace.output)
589 shutil.rmtree(namespace.output)
591 namespace.log.debug(
"input=%s", namespace.input)
592 namespace.log.debug(
"calib=%s", namespace.calib)
593 namespace.log.debug(
"output=%s", namespace.output)
598 if self.
requireOutput and namespace.output
is None and namespace.rerun
is None:
599 self.error(
"no output directory specified.\n" 600 "An output directory must be specified with the --output or --rerun\n" 601 "command-line arguments.\n")
605 butlerArgs = {
'mapperArgs': {
'calibRoot': namespace.calib}}
607 outputs = {
'root': namespace.output,
'mode':
'rw'}
608 inputs = {
'root': namespace.input}
609 inputs.update(butlerArgs)
610 outputs.update(butlerArgs)
611 namespace.butler = dafPersist.Butler(inputs=inputs, outputs=outputs)
613 outputs = {
'root': namespace.input,
'mode':
'rw'}
614 outputs.update(butlerArgs)
615 namespace.butler = dafPersist.Butler(outputs=outputs)
621 if "data" in namespace.show:
623 for dataRef
in getattr(namespace, dataIdName).refList:
624 print(
"%s dataRef.dataId = %s" % (dataIdName, dataRef.dataId))
626 if namespace.show
and "run" not in namespace.show:
634 sys.stderr.write(
"Warning: no 'debug' module found\n")
635 namespace.debug =
False 637 del namespace.loglevel
639 if namespace.longlog:
640 lsstLog.configure_prop(
""" 641 log4j.rootLogger=INFO, A1 642 log4j.appender.A1=ConsoleAppender 643 log4j.appender.A1.Target=System.out 644 log4j.appender.A1.layout=PatternLayout 645 log4j.appender.A1.layout.ConversionPattern=%-5p %d{yyyy-MM-ddThh:mm:ss.sss} %c (%X{LABEL})(%F:%L)- %m%n 647 del namespace.longlog
649 namespace.config.validate()
650 namespace.config.freeze()
654 def _parseDirectories(self, namespace):
655 """Parse input, output and calib directories 657 This allows for hacking the directories, e.g., to include a "rerun". 658 Modifications are made to the 'namespace' object in-place. 660 mapperClass = dafPersist.Butler.getMapperClass(_fixPath(DEFAULT_INPUT_NAME, namespace.rawInput))
661 namespace.calib = _fixPath(DEFAULT_CALIB_NAME, namespace.rawCalib)
664 if namespace.rawOutput:
665 namespace.output = _fixPath(DEFAULT_OUTPUT_NAME, namespace.rawOutput)
667 namespace.output =
None 672 if namespace.rawRerun:
674 self.error(
"Error: cannot specify both --output and --rerun")
675 namespace.rerun = namespace.rawRerun.split(
":")
676 rerunDir = [os.path.join(namespace.input,
"rerun", dd)
for dd
in namespace.rerun]
677 modifiedInput =
False 678 if len(rerunDir) == 2:
679 namespace.input, namespace.output = rerunDir
681 elif len(rerunDir) == 1:
682 namespace.output = rerunDir[0]
683 if os.path.exists(os.path.join(namespace.output,
"_parent")):
684 namespace.input = os.path.realpath(os.path.join(namespace.output,
"_parent"))
687 self.error(
"Error: invalid argument for --rerun: %s" % namespace.rerun)
688 if modifiedInput
and dafPersist.Butler.getMapperClass(namespace.input) != mapperClass:
689 self.error(
"Error: input directory specified by --rerun must have the same mapper as INPUT")
691 namespace.rerun =
None 692 del namespace.rawInput
693 del namespace.rawCalib
694 del namespace.rawOutput
695 del namespace.rawRerun
697 def _processDataIds(self, namespace):
698 """Process the parsed data for each data ID argument in a `~argparse.Namespace`. 702 - Validate data ID keys. 703 - Cast the data ID values to the correct type. 704 - Compute data references from data IDs. 708 namespace : parsed namespace (an argparse.Namespace); 709 Parsed namespace. These attributes are read: 713 - ``config``, if any dynamic dataset types are set by a config parameter. 714 - Dataset type arguments (e.g. ``id_dstype``), if any dynamic dataset types are specified by such 715 and modifies these attributes: 716 - ``<name>`` for each data ID argument registered using `add_id_argument`. 719 dataIdContainer = getattr(namespace, dataIdArgument.name)
720 dataIdContainer.setDatasetType(dataIdArgument.getDatasetType(namespace))
721 if dataIdArgument.doMakeDataRefList:
723 dataIdContainer.castDataIds(butler=namespace.butler)
724 except (KeyError, TypeError)
as e:
729 dataIdContainer.makeDataRefList(namespace)
731 def _applyInitialOverrides(self, namespace):
732 """Apply obs-package-specific and camera-specific config override files, if found 736 namespace : `argparse.Namespace` 737 Parsed namespace. These attributes are read: 741 Look in the package namespace.obsPkg for files: 743 - ``config/<task_name>.py`` 744 - ``config/<camera_name>/<task_name>.py`` and load if found. 747 fileName = self.
_name +
".py" 749 os.path.join(obsPkgDir,
"config", fileName),
750 os.path.join(obsPkgDir,
"config", namespace.camera, fileName),
752 if os.path.exists(filePath):
753 namespace.log.info(
"Loading config overrride file %r", filePath)
754 namespace.config.load(filePath)
756 namespace.log.debug(
"Config override file does not exist: %r", filePath)
759 """Perform camera-specific operations before parsing the command-line. 763 namespace : `argparse.Namespace` 764 Namespace (an ) with the following fields: 766 - ``camera``: the camera name. 767 - ``config``: the config passed to parse_args, with no overrides applied. 768 - ``obsPkg``: the ``obs_`` package for this camera. 769 - ``log``: a `lsst.log` Log. 773 The default implementation does nothing. 778 """Allow files of arguments referenced by ``@<path>`` to contain multiple values on each line. 783 Line of text read from an argument file. 785 arg_line = arg_line.strip()
786 if not arg_line
or arg_line.startswith(
"#"):
788 for arg
in shlex.split(arg_line, comments=
True, posix=
True):
794 """Add a "--reuse-outputs-from SUBTASK" option to the argument parser. 796 CmdLineTasks that can be restarted at an intermediate step using outputs 797 from earlier (but still internal) steps should use this method to allow 798 the user to control whether that happens when outputs from earlier steps 804 A sequence of string names (by convention, top-level subtasks) that 805 identify the steps that could be skipped when their outputs are 806 already present. The list is ordered, so when the user specifies 807 one step on the command line, all previous steps may be skipped as 808 well. In addition to the choices provided, users may pass "all" 809 to indicate that all steps may be thus skipped. 811 When this method is called, the ``namespace`` object returned by 812 ``parse_args`` will contain a ``reuse`` attribute containing a list of 813 all steps that should be skipped if their outputs are already present. 814 If no steps should be skipped, the ``reuse`` will be an empty list. 816 choices = list(choices)
817 choices.append(
"all")
818 self.add_argument(
"--reuse-outputs-from", dest=
"reuse", choices=choices,
819 default=[], action=ReuseAction,
820 help=(
"Skip the given subtask and its predecessors and reuse their outputs " 821 "if those outputs already exist. Use 'all' to specify all subtasks."))
825 """`ArgumentParser` for command-line tasks that don't write any output. 828 requireOutput =
False 832 """Get a dictionary of task info for all subtasks in a config 836 config : `lsst.pex.config.Config` 837 Configuration to process. 838 taskDict : `dict`, optional 839 Users should not specify this argument. Supports recursion; if provided, taskDict is updated in 840 place, else a new `dict` is started). 841 baseName : `str`, optional 842 Users should not specify this argument. It is only used for recursion: if a non-empty string then a 843 period is appended and the result is used as a prefix for additional entries in taskDict; otherwise 849 Keys are config field names, values are task names. 853 This function is designed to be called recursively. The user should call with only a config 854 (leaving taskDict and baseName at their default values). 858 for fieldName, field
in config.items():
859 if hasattr(field,
"value")
and hasattr(field,
"target"):
860 subConfig = field.value
861 if isinstance(subConfig, pexConfig.Config):
862 subBaseName =
"%s.%s" % (baseName, fieldName)
if baseName
else fieldName
864 taskName =
"%s.%s" % (field.target.__module__, field.target.__name__)
866 taskName = repr(field.target)
867 taskDict[subBaseName] = taskName
868 getTaskDict(config=subConfig, taskDict=taskDict, baseName=subBaseName)
873 """Process arguments specified with ``--show`` (but ignores ``"data"``). 877 showOpts : `list` of `str` 878 List of options passed to ``--show``. 881 exit : bool, optional 882 Exit if ``"run"`` isn't included in ``showOpts``. 886 Supports the following options in showOpts: 888 - ``config[=PAT]``. Dump all the config entries, or just the ones that match the glob pattern. 889 - ``history=PAT``. Show where the config entries that match the glob pattern were set. 890 - ``tasks``. Show task hierarchy. 891 - ``data``. Ignored; to be processed by caller. 892 - ``run``. Keep going (the default behaviour is to exit if --show is specified). 894 Calls ``sys.exit(1)`` if any other option found. 899 for what
in showOpts:
900 showCommand, showArgs = what.split(
"=", 1)
if "=" in what
else (what,
"")
902 if showCommand ==
"config":
903 matConfig = re.search(
r"^(?:config.)?(.+)?", showArgs)
904 pattern = matConfig.group(1)
906 class FilteredStream(object):
907 """A file object that only prints lines that match the glob "pattern" 909 N.b. Newlines are silently discarded and reinserted; crude but effective. 912 def __init__(self, pattern):
914 mat = re.search(
r"(.*):NOIGNORECASE$", pattern)
917 pattern = mat.group(1)
918 self._pattern = re.compile(fnmatch.translate(pattern))
920 if pattern != pattern.lower():
921 print(
u"Matching \"%s\" without regard to case " 922 "(append :NOIGNORECASE to prevent this)" % (pattern,), file=sys.stdout)
923 self._pattern = re.compile(fnmatch.translate(pattern), re.IGNORECASE)
925 def write(self, showStr):
926 showStr = showStr.rstrip()
928 matchStr = showStr.split(
"\n")[-1].split(
"=")[0]
929 if self._pattern.search(matchStr):
930 print(
u"\n" + showStr)
932 fd = FilteredStream(pattern)
936 config.saveToStream(fd,
"config")
937 elif showCommand ==
"history":
938 matHistory = re.search(
r"^(?:config.)?(.+)?", showArgs)
939 pattern = matHistory.group(1)
941 print(
"Please provide a value with --show history (e.g. history=XXX)", file=sys.stderr)
944 pattern = pattern.split(
".")
945 cpath, cname = pattern[:-1], pattern[-1]
947 for i, cpt
in enumerate(cpath):
949 hconfig = getattr(hconfig, cpt)
950 except AttributeError:
951 print(
"Error: configuration %s has no subconfig %s" %
952 (
".".join([
"config"] + cpath[:i]), cpt), file=sys.stderr)
957 print(pexConfig.history.format(hconfig, cname))
959 print(
"Error: %s has no field %s" % (
".".join([
"config"] + cpath), cname), file=sys.stderr)
962 elif showCommand ==
"data":
964 elif showCommand ==
"run":
966 elif showCommand ==
"tasks":
969 print(
u"Unknown value for show: %s (choose from '%s')" %
970 (what,
"', '".join(
"config[=XXX] data history=XXX tasks run".split())), file=sys.stderr)
973 if exit
and "run" not in showOpts:
978 """Print task hierarchy to stdout. 982 config : `lsst.pex.config.Config` 983 Configuration to process. 988 fieldNameList = sorted(taskDict.keys())
989 for fieldName
in fieldNameList:
990 taskName = taskDict[fieldName]
991 print(
u"%s: %s" % (fieldName, taskName))
995 """argparse action callback to override config parameters using name=value pairs from the command-line. 998 def __call__(self, parser, namespace, values, option_string):
999 """Override one or more config name value pairs. 1003 parser : `argparse.ArgumentParser` 1005 namespace : `argparse.Namespace` 1006 Parsed command. The ``namespace.config`` attribute is updated. 1008 A list of ``configItemName=value`` pairs. 1009 option_string : `str` 1010 Option value specified by the user. 1012 if namespace.config
is None:
1014 for nameValue
in values:
1015 name, sep, valueStr = nameValue.partition(
"=")
1017 parser.error(
"%s value %s must be in form name=value" % (option_string, nameValue))
1022 except AttributeError:
1023 parser.error(
"no config field: %s" % (name,))
1026 value = eval(valueStr, {})
1028 parser.error(
"cannot parse %r as a value for %s" % (valueStr, name))
1031 except Exception
as e:
1032 parser.error(
"cannot set config.%s=%r: %s" % (name, value, e))
1036 """argparse action to load config overrides from one or more files. 1039 def __call__(self, parser, namespace, values, option_string=None):
1040 """Load one or more files of config overrides. 1044 parser : `argparse.ArgumentParser` 1046 namespace : `argparse.Namespace` 1047 Parsed command. The following attributes are updated by this method: ``namespace.config``. 1049 A list of data config file paths. 1050 option_string : `str`, optional 1051 Option value specified by the user. 1053 if namespace.config
is None:
1055 for configfile
in values:
1057 namespace.config.load(configfile)
1058 except Exception
as e:
1059 parser.error(
"cannot load config file %r: %s" % (configfile, e))
1063 """argparse action callback to process a data ID into a dict. 1066 def __call__(self, parser, namespace, values, option_string):
1067 """Parse ``--id`` data and append results to ``namespace.<argument>.idList``. 1071 parser : `ArgumentParser` 1073 namespace : `argparse.Namespace` 1074 Parsed command (an instance of argparse.Namespace). The following attributes are updated: 1076 - ``<idName>.idList``, where ``<idName>`` is the name of the ID argument, for instance ``"id"`` 1077 for ID argument ``--id``. 1079 A list of data IDs; see Notes below. 1080 option_string : `str` 1081 Option value specified by the user. 1085 The data format is:: 1087 key1=value1_1[^value1_2[^value1_3...] key2=value2_1[^value2_2[^value2_3...]... 1089 The values (e.g. ``value1_1``) may either be a string, or of the form ``"int..int"`` 1090 (e.g. ``"1..3"``) which is interpreted as ``"1^2^3"`` (inclusive, unlike a python range). 1091 So ``"0^2..4^7..9"`` is equivalent to ``"0^2^3^4^7^8^9"``. You may also specify a stride: 1092 ``"1..5:2"`` is ``"1^3^5"``. 1094 The cross product is computed for keys with multiple values. For example:: 1096 --id visit 1^2 ccd 1,1^2,2 1098 results in the following data ID dicts being appended to ``namespace.<argument>.idList``: 1100 {"visit":1, "ccd":"1,1"} 1101 {"visit":2, "ccd":"1,1"} 1102 {"visit":1, "ccd":"2,2"} 1103 {"visit":2, "ccd":"2,2"} 1105 if namespace.config
is None:
1107 idDict = collections.OrderedDict()
1108 for nameValue
in values:
1109 name, sep, valueStr = nameValue.partition(
"=")
1111 parser.error(
"%s appears multiple times in one ID argument: %s" % (name, option_string))
1113 for v
in valueStr.split(
"^"):
1114 mat = re.search(
r"^(\d+)\.\.(\d+)(?::(\d+))?$", v)
1116 v1 = int(mat.group(1))
1117 v2 = int(mat.group(2))
1119 v3 = int(v3)
if v3
else 1
1120 for v
in range(v1, v2 + 1, v3):
1121 idDict[name].append(str(v))
1123 idDict[name].append(v)
1125 iterList = [idDict[key]
for key
in idDict.keys()]
1126 idDictList = [collections.OrderedDict(zip(idDict.keys(), valList))
1127 for valList
in itertools.product(*iterList)]
1129 argName = option_string.lstrip(
"-")
1130 ident = getattr(namespace, argName)
1131 ident.idList += idDictList
1135 """argparse action to set log level. 1138 def __call__(self, parser, namespace, values, option_string):
1143 parser : `ArgumentParser` 1145 namespace : `argparse.Namespace` 1146 Parsed command. This argument is not used. 1148 List of trace levels; each item must be of the form ``component_name=level`` or ``level``, 1149 where ``level`` is a keyword (not case sensitive) or an integer. 1150 option_string : `str` 1151 Option value specified by the user. 1153 permittedLevelList = (
'TRACE',
'DEBUG',
'INFO',
'WARN',
'ERROR',
'FATAL')
1154 permittedLevelSet = set(permittedLevelList)
1155 for componentLevel
in values:
1156 component, sep, levelStr = componentLevel.partition(
"=")
1158 levelStr, component = component,
None 1159 logLevelUpr = levelStr.upper()
1160 if logLevelUpr
in permittedLevelSet:
1161 logLevel = getattr(lsstLog.Log, logLevelUpr)
1163 parser.error(
"loglevel=%r not one of %s" % (levelStr, permittedLevelList))
1164 if component
is None:
1165 namespace.log.setLevel(logLevel)
1167 lsstLog.Log.getLogger(component).setLevel(logLevel)
1171 """argparse action associated with ArgumentPraser.addReuseOption.""" 1173 def __call__(self, parser, namespace, value, option_string):
1175 value = self.choices[-2]
1176 index = self.choices.index(value)
1177 namespace.reuse = self.choices[:index + 1]
1181 """Set an instance attribute (like `setattr` but accepting hierarchical names such as ``foo.bar.baz``). 1186 Object whose attribute is to be set. 1188 Name of attribute to set. 1190 New value for the attribute. 1194 For example if name is ``foo.bar.baz`` then ``item.foo.bar.baz`` is set to the specified value. 1197 subnameList = name.split(
".")
1198 for subname
in subnameList[:-1]:
1199 subitem = getattr(subitem, subname)
1200 setattr(subitem, subnameList[-1], value)
1204 """Get an attribute (like `getattr` but accepts hierarchical names such as ``foo.bar.baz``). 1209 Object whose attribute is to be returned. 1211 Name of the attribute to get. 1216 If name is ``foo.bar.baz then the return value is ``item.foo.bar.baz``. 1219 for subname
in name.split(
"."):
1220 subitem = getattr(subitem, subname)
def addArgument(self, parser, idName)
def getTaskDict(config, taskDict=None, baseName="")
def __init__(self, level=None)
def getDottedAttr(item, name)
def __init__(self, name=None, help="dataset type to process from input data repository", default=None)
def getDatasetType(self, namespace)
def addArgument(self, parser, idName)
def makeDataRefList(self, namespace)
def setDottedAttr(item, name, value)
def __call__(self, parser, namespace, values, option_string=None)
def getDatasetType(self, namespace)
def obeyShowArgument(showOpts, config=None, exit=False)
def addReuseOption(self, choices)
def getDatasetType(self, namespace)
std::string getPackageDir(std::string const &packageName)
def _processDataIds(self, namespace)
def getDatasetType(self, namespace)
def convert_arg_line_to_args(self, arg_line)
def isDynamicDatasetType(self)
def handleCamera(self, namespace)
def __init__(self, name, datasetType, level, doMakeDataRefList=True, ContainerClass=DataIdContainer)
def __call__(self, parser, namespace, value, option_string)
def _applyInitialOverrides(self, namespace)
def castDataIds(self, butler)
def __init__(self, name, usage="%(prog)s input [options]", kwargs)
def __call__(self, parser, namespace, values, option_string)
def setDatasetType(self, datasetType)
def showTaskHierarchy(config)
def __call__(self, parser, namespace, values, option_string)
def __call__(self, parser, namespace, values, option_string)
def parse_args(self, config, args=None, log=None, override=None)
def _parseDirectories(self, namespace)
def add_id_argument(self, name, datasetType, help, level=None, doMakeDataRefList=True, ContainerClass=DataIdContainer)