Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# This file is part of daf_butler. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (http://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21 

22from __future__ import annotations 

23 

24import logging 

25import re 

26import tempfile 

27 

28__all__ = ('ButlerS3URI',) 

29 

30from typing import ( 

31 TYPE_CHECKING, 

32 Optional, 

33 Any, 

34 Callable, 

35 Iterator, 

36 List, 

37 Tuple, 

38 Union, 

39) 

40 

41from ..utils import time_this 

42from .utils import NoTransaction 

43from ._butlerUri import ButlerURI 

44from .s3utils import getS3Client, s3CheckFileExists, bucketExists 

45 

46from botocore.exceptions import ClientError 

47from http.client import ImproperConnectionState, HTTPException 

48from urllib3.exceptions import RequestError, HTTPError 

49 

50if TYPE_CHECKING: 50 ↛ 51line 50 didn't jump to line 51, because the condition on line 50 was never true

51 try: 

52 import boto3 

53 except ImportError: 

54 pass 

55 from ..datastore import DatastoreTransaction 

56 

57# https://pypi.org/project/backoff/ 

58try: 

59 import backoff 

60except ImportError: 

61 class Backoff(): 

62 @staticmethod 

63 def expo(func: Callable, *args: Any, **kwargs: Any) -> Callable: 

64 return func 

65 

66 @staticmethod 

67 def on_exception(func: Callable, *args: Any, **kwargs: Any) -> Callable: 

68 return func 

69 

70 backoff = Backoff 

71 

72# settings for "backoff" retry decorators. these retries are belt-and- 

73# suspenders along with the retries built into Boto3, to account for 

74# semantic differences in errors between S3-like providers. 

75retryable_io_errors = ( 

76 # http.client 

77 ImproperConnectionState, HTTPException, 

78 # urllib3.exceptions 

79 RequestError, HTTPError, 

80 # built-ins 

81 TimeoutError, ConnectionError) 

82retryable_client_errors = ( 

83 # botocore.exceptions 

84 ClientError, 

85 # built-ins 

86 PermissionError) 

87all_retryable_errors = retryable_client_errors + retryable_io_errors 

88max_retry_time = 60 

89 

90 

91log = logging.getLogger(__name__) 

92 

93 

94class ButlerS3URI(ButlerURI): 

95 """S3 URI implementation class.""" 

96 

97 @property 

98 def client(self) -> boto3.client: 

99 """Client object to address remote resource.""" 

100 # Defer import for circular dependencies 

101 return getS3Client() 

102 

103 @backoff.on_exception(backoff.expo, retryable_client_errors, max_time=max_retry_time) 

104 def exists(self) -> bool: 

105 """Check that the S3 resource exists.""" 

106 if self.is_root: 106 ↛ 108line 106 didn't jump to line 108, because the condition on line 106 was never true

107 # Only check for the bucket since the path is irrelevant 

108 return bucketExists(self.netloc) 

109 exists, _ = s3CheckFileExists(self, client=self.client) 

110 return exists 

111 

112 @backoff.on_exception(backoff.expo, retryable_client_errors, max_time=max_retry_time) 

113 def size(self) -> int: 

114 """Return the size of the resource in bytes.""" 

115 if self.dirLike: 115 ↛ 116line 115 didn't jump to line 116, because the condition on line 115 was never true

116 return 0 

117 exists, sz = s3CheckFileExists(self, client=self.client) 

118 if not exists: 

119 raise FileNotFoundError(f"Resource {self} does not exist") 

120 return sz 

121 

122 @backoff.on_exception(backoff.expo, retryable_client_errors, max_time=max_retry_time) 

123 def remove(self) -> None: 

124 """Remove the resource.""" 

125 # https://github.com/boto/boto3/issues/507 - there is no 

126 # way of knowing if the file was actually deleted except 

127 # for checking all the keys again, reponse is HTTP 204 OK 

128 # response all the time 

129 self.client.delete_object(Bucket=self.netloc, Key=self.relativeToPathRoot) 

130 

131 @backoff.on_exception(backoff.expo, all_retryable_errors, max_time=max_retry_time) 

132 def read(self, size: int = -1) -> bytes: 

133 """Read the contents of the resource.""" 

134 args = {} 

135 if size > 0: 

136 args["Range"] = f"bytes=0-{size-1}" 

137 try: 

138 response = self.client.get_object(Bucket=self.netloc, 

139 Key=self.relativeToPathRoot, 

140 **args) 

141 except (self.client.exceptions.NoSuchKey, self.client.exceptions.NoSuchBucket) as err: 

142 raise FileNotFoundError(f"No such resource: {self}") from err 

143 with time_this(log, msg="Read from %s", args=(self,)): 

144 body = response["Body"].read() 

145 response["Body"].close() 

146 return body 

147 

148 @backoff.on_exception(backoff.expo, all_retryable_errors, max_time=max_retry_time) 

149 def write(self, data: bytes, overwrite: bool = True) -> None: 

150 """Write the supplied data to the resource.""" 

151 if not overwrite: 

152 if self.exists(): 152 ↛ 153line 152 didn't jump to line 153, because the condition on line 152 was never true

153 raise FileExistsError(f"Remote resource {self} exists and overwrite has been disabled") 

154 with time_this(log, msg="Write to %s", args=(self,)): 

155 self.client.put_object(Bucket=self.netloc, Key=self.relativeToPathRoot, 

156 Body=data) 

157 

158 @backoff.on_exception(backoff.expo, all_retryable_errors, max_time=max_retry_time) 

159 def mkdir(self) -> None: 

160 """Write a directory key to S3.""" 

161 if not bucketExists(self.netloc): 161 ↛ 162line 161 didn't jump to line 162, because the condition on line 161 was never true

162 raise ValueError(f"Bucket {self.netloc} does not exist for {self}!") 

163 

164 if not self.dirLike: 164 ↛ 165line 164 didn't jump to line 165, because the condition on line 164 was never true

165 raise ValueError(f"Can not create a 'directory' for file-like URI {self}") 

166 

167 # don't create S3 key when root is at the top-level of an Bucket 

168 if not self.path == "/": 168 ↛ exitline 168 didn't return from function 'mkdir', because the condition on line 168 was never false

169 self.client.put_object(Bucket=self.netloc, Key=self.relativeToPathRoot) 

170 

171 @backoff.on_exception(backoff.expo, all_retryable_errors, max_time=max_retry_time) 

172 def _as_local(self) -> Tuple[str, bool]: 

173 """Download object from S3 and place in temporary directory. 

174 

175 Returns 

176 ------- 

177 path : `str` 

178 Path to local temporary file. 

179 temporary : `bool` 

180 Always returns `True`. This is always a temporary file. 

181 """ 

182 with tempfile.NamedTemporaryFile(suffix=self.getExtension(), delete=False) as tmpFile: 

183 with time_this(log, msg="Downloading %s to local file", args=(self,)): 

184 self.client.download_fileobj(self.netloc, self.relativeToPathRoot, tmpFile) 

185 return tmpFile.name, True 

186 

187 @backoff.on_exception(backoff.expo, all_retryable_errors, max_time=max_retry_time) 

188 def transfer_from(self, src: ButlerURI, transfer: str = "copy", 

189 overwrite: bool = False, 

190 transaction: Optional[Union[DatastoreTransaction, NoTransaction]] = None) -> None: 

191 """Transfer the current resource to an S3 bucket. 

192 

193 Parameters 

194 ---------- 

195 src : `ButlerURI` 

196 Source URI. 

197 transfer : `str` 

198 Mode to use for transferring the resource. Supports the following 

199 options: copy. 

200 overwrite : `bool`, optional 

201 Allow an existing file to be overwritten. Defaults to `False`. 

202 transaction : `DatastoreTransaction`, optional 

203 Currently unused. 

204 """ 

205 # Fail early to prevent delays if remote resources are requested 

206 if transfer not in self.transferModes: 

207 raise ValueError(f"Transfer mode '{transfer}' not supported by URI scheme {self.scheme}") 

208 

209 # Existence checks cost time so do not call this unless we know 

210 # that debugging is enabled. 

211 if log.isEnabledFor(logging.DEBUG): 211 ↛ 212line 211 didn't jump to line 212, because the condition on line 211 was never true

212 log.debug("Transferring %s [exists: %s] -> %s [exists: %s] (transfer=%s)", 

213 src, src.exists(), self, self.exists(), transfer) 

214 

215 if not overwrite and self.exists(): 

216 raise FileExistsError(f"Destination path '{self}' already exists.") 

217 

218 if transfer == "auto": 218 ↛ 219line 218 didn't jump to line 219, because the condition on line 218 was never true

219 transfer = self.transferDefault 

220 

221 timer_msg = "Transfer from %s to %s" 

222 timer_args = (src, self) 

223 

224 if isinstance(src, type(self)): 

225 # Looks like an S3 remote uri so we can use direct copy 

226 # note that boto3.resource.meta.copy is cleverer than the low 

227 # level copy_object 

228 copy_source = { 

229 "Bucket": src.netloc, 

230 "Key": src.relativeToPathRoot, 

231 } 

232 with time_this(log, msg=timer_msg, args=timer_args): 

233 self.client.copy_object(CopySource=copy_source, Bucket=self.netloc, 

234 Key=self.relativeToPathRoot) 

235 else: 

236 # Use local file and upload it 

237 with src.as_local() as local_uri: 

238 

239 # resource.meta.upload_file seems like the right thing 

240 # but we have a low level client 

241 with time_this(log, msg=timer_msg, args=timer_args): 

242 with open(local_uri.ospath, "rb") as fh: 

243 self.client.put_object(Bucket=self.netloc, 

244 Key=self.relativeToPathRoot, Body=fh) 

245 

246 # This was an explicit move requested from a remote resource 

247 # try to remove that resource 

248 if transfer == "move": 248 ↛ 250line 248 didn't jump to line 250, because the condition on line 248 was never true

249 # Transactions do not work here 

250 src.remove() 

251 

252 @backoff.on_exception(backoff.expo, all_retryable_errors, max_time=max_retry_time) 

253 def walk(self, file_filter: Optional[Union[str, re.Pattern]] = None) -> Iterator[Union[List, 

254 Tuple[ButlerURI, 

255 List[str], 

256 List[str]]]]: 

257 """Walk the directory tree returning matching files and directories. 

258 

259 Parameters 

260 ---------- 

261 file_filter : `str` or `re.Pattern`, optional 

262 Regex to filter out files from the list before it is returned. 

263 

264 Yields 

265 ------ 

266 dirpath : `ButlerURI` 

267 Current directory being examined. 

268 dirnames : `list` of `str` 

269 Names of subdirectories within dirpath. 

270 filenames : `list` of `str` 

271 Names of all the files within dirpath. 

272 """ 

273 # We pretend that S3 uses directories and files and not simply keys 

274 if not (self.isdir() or self.is_root): 274 ↛ 275line 274 didn't jump to line 275, because the condition on line 274 was never true

275 raise ValueError(f"Can not walk a non-directory URI: {self}") 

276 

277 if isinstance(file_filter, str): 277 ↛ 278line 277 didn't jump to line 278, because the condition on line 277 was never true

278 file_filter = re.compile(file_filter) 

279 

280 s3_paginator = self.client.get_paginator('list_objects_v2') 

281 

282 # Limit each query to a single "directory" to match os.walk 

283 # We could download all keys at once with no delimiter and work 

284 # it out locally but this could potentially lead to large memory 

285 # usage for millions of keys. It will also make the initial call 

286 # to this method potentially very slow. If making this method look 

287 # like os.walk was not required, we could query all keys with 

288 # pagination and return them in groups of 1000, but that would 

289 # be a different interface since we can't guarantee we would get 

290 # them all grouped properly across the 1000 limit boundary. 

291 prefix = self.relativeToPathRoot if not self.is_root else "" 

292 prefix_len = len(prefix) 

293 dirnames = [] 

294 filenames = [] 

295 files_there = False 

296 

297 for page in s3_paginator.paginate(Bucket=self.netloc, Prefix=prefix, Delimiter="/"): 

298 # All results are returned as full key names and we must 

299 # convert them back to the root form. The prefix is fixed 

300 # and delimited so that is a simple trim 

301 

302 # Directories are reported in the CommonPrefixes result 

303 # which reports the entire key and must be stripped. 

304 found_dirs = [dir["Prefix"][prefix_len:] for dir in page.get("CommonPrefixes", ())] 

305 dirnames.extend(found_dirs) 

306 

307 found_files = [file["Key"][prefix_len:] for file in page.get("Contents", ())] 

308 if found_files: 

309 files_there = True 

310 if file_filter is not None: 

311 found_files = [f for f in found_files if file_filter.search(f)] 

312 

313 filenames.extend(found_files) 

314 

315 # Directories do not exist so we can't test for them. If no files 

316 # or directories were found though, this means that it effectively 

317 # does not exist and we should match os.walk() behavior and return 

318 # []. 

319 if not dirnames and not files_there: 319 ↛ 320line 319 didn't jump to line 320, because the condition on line 319 was never true

320 yield [] 

321 else: 

322 yield self, dirnames, filenames 

323 

324 for dir in dirnames: 

325 new_uri = self.join(dir) 

326 yield from new_uri.walk(file_filter)