Coverage for python/lsst/ctrl/mpexec/cli/script/run.py : 14%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of ctrl_mpexec.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
22import logging
24from lsst.daf.butler.cli.cliLog import CliLog
25from ... import CmdLineFwk, TaskFactory
27_log = logging.getLogger(__name__.partition(".")[2])
30def run(do_raise,
31 graph_fixup,
32 init_only,
33 log_level,
34 no_versions,
35 processes,
36 profile,
37 qgraph,
38 register_dataset_types,
39 skip_init_writes,
40 timeout,
41 butler_config,
42 input,
43 output,
44 output_run,
45 extend_run,
46 replace_run,
47 prune_replaced,
48 data_query,
49 skip_existing,
50 debug,
51 fail_fast):
52 """Implements the command line interface `pipetask run` subcommand, should
53 only be called by command line tools and unit test code that test this
54 function.
56 Parameters
57 ----------
58 do_raise : `bool`
59 Raise an exception in the case of an error.
60 graph_fixup : `str`
61 The name of the class or factory method which makes an instance used
62 for execution graph fixup.
63 init_only : `bool`
64 If true, do not actually run; just register dataset types and/or save
65 init outputs.
66 log_level : `list` of `tuple`
67 per-component logging levels, each item in the list is a tuple
68 (component, level), `component` is a logger name or an empty string
69 or `None` for root logger, `level` is a logging level name, one of
70 CRITICAL, ERROR, WARNING, INFO, DEBUG (case insensitive).
71 no_versions : `bool`
72 If true, do not save or check package versions.
73 processes : `int`
74 The number of processes to use.
75 profile : `str`
76 File name to dump cProfile information to.
77 qgraph : `lsst.pipe.base.QuantumGraph`
78 A QuantumGraph generated by a previous subcommand.
79 register_dataset_types : `bool`
80 If true, register DatasetTypes that do not already exist in the Registry.
81 skip_init_writes : `bool`
82 If true, do not write collection-wide 'init output' datasets (e.g.
83 schemas).
84 timeout : `int`
85 Timeout for multiprocessing; maximum wall time (sec).
86 butler_config : `str`, `dict`, or `lsst.daf.butler.Config`
87 If `str`, `butler_config` is the path location of the gen3
88 butler/registry config file. If `dict`, `butler_config` is key value
89 pairs used to init or update the `lsst.daf.butler.Config` instance. If
90 `Config`, it is the object used to configure a Butler.
91 input : `str`
92 Comma-separated names of the input collection(s). Entries may include a
93 colon (:), the first string is a dataset type name that restricts the
94 search in that collection.
95 output : `str`
96 Name of the output CHAINED collection. This may either be an existing
97 CHAINED collection to use as both input and output (if `input` is
98 `None`), or a new CHAINED collection created to include all inputs
99 (if `input` is not `None`). In both cases, the collection's children
100 will start with an output RUN collection that directly holds all new
101 datasets (see `output_run`).
102 output_run : `str`
103 Name of the new output RUN collection. If not provided then `output`
104 must be provided and a new RUN collection will be created by appending
105 a timestamp to the value passed with `output`. If this collection
106 already exists then `extend_run` must be passed.
107 extend_run : `bool`
108 Instead of creating a new RUN collection, insert datasets into either
109 the one given by `output_run` (if provided) or the first child
110 collection of `output` (which must be of type RUN).
111 replace_run : `bool`
112 Before creating a new RUN collection in an existing CHAINED collection,
113 remove the first child collection (which must be of type RUN). This can
114 be used to repeatedly write to the same (parent) collection during
115 development, but it does not delete the datasets associated with the
116 replaced run unless `prune-replaced` is also True. Requires `output`,
117 and `extend_run` must be `None`.
118 prune_replaced : "unstore", "purge", or `None`.
119 If not `None`, delete the datasets in the collection replaced by
120 `replace_run`, either just from the datastore ("unstore") or by
121 removing them and the RUN completely ("purge"). Requires `replace_run`.
122 data_query : `str`
123 User query selection expression.
124 skip_existing : `bool`
125 If all Quantum outputs already exist in the output RUN collection then
126 that Quantum will be excluded from the QuantumGraph. Requires the 'run`
127 command's `--extend-run` flag to be set.
128 debug : `bool`
129 If true, enable debugging output using lsstDebug facility (imports
130 debug.py).
131 fail_fast : `bool`
132 If true then stop processing at first error, otherwise process as many
133 tasks as possible.
134 """
136 if log_level is not None:
137 CliLog.setLogLevels(log_level)
139 class RunArgs:
140 """A container class for arguments to CmdLineFwk.runPipeline, whose
141 API (currently) is written to accept inputs from argparse in a generic
142 container class.
143 """
145 def __init__(self,
146 do_raise,
147 graph_fixup,
148 init_only,
149 no_versions,
150 processes,
151 profile,
152 skip_init_writes,
153 timeout,
154 register_dataset_types,
155 butler_config,
156 input,
157 output,
158 output_run,
159 extend_run,
160 replace_run,
161 prune_replaced,
162 data_query,
163 skip_existing,
164 debug,
165 fail_fast):
166 self.do_raise = do_raise
167 self.graph_fixup = graph_fixup
168 self.init_only = init_only
169 self.no_versions = no_versions
170 self.processes = processes
171 self.profile = profile
172 self.skip_init_writes = skip_init_writes
173 self.timeout = timeout
174 self.register_dataset_types = register_dataset_types
175 self.butler_config = butler_config
176 self.input = input
177 self.output = output
178 self.output_run = output_run
179 self.extend_run = extend_run
180 self.replace_run = replace_run
181 self.prune_replaced = prune_replaced
182 self.data_query = data_query
183 self.skip_existing = skip_existing
184 self.enableLsstDebug = debug
185 self.fail_fast = fail_fast
187 args = RunArgs(do_raise=do_raise, graph_fixup=graph_fixup, init_only=init_only, no_versions=no_versions,
188 processes=processes, profile=profile, skip_init_writes=skip_init_writes, timeout=timeout,
189 register_dataset_types=register_dataset_types, butler_config=butler_config, input=input,
190 output=output, output_run=output_run, extend_run=extend_run, replace_run=replace_run,
191 prune_replaced=prune_replaced, data_query=data_query, skip_existing=skip_existing,
192 debug=debug, fail_fast=fail_fast)
194 f = CmdLineFwk()
195 taskFactory = TaskFactory()
196 f.runPipeline(qgraph, taskFactory, args)