Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

#!/usr/bin/env python 

 

# 

# LSST Data Management System 

# Copyright 2008, 2009, 2010 LSST Corporation. 

# 

# This product includes software developed by the 

# LSST Project (http://www.lsst.org/). 

# 

# This program is free software: you can redistribute it and/or modify 

# it under the terms of the GNU General Public License as published by 

# the Free Software Foundation, either version 3 of the License, or 

# (at your option) any later version. 

# 

# This program is distributed in the hope that it will be useful, 

# but WITHOUT ANY WARRANTY; without even the implied warranty of 

# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

# GNU General Public License for more details. 

# 

# You should have received a copy of the LSST License Statement and 

# the GNU General Public License along with this program. If not, 

# see <http://www.lsstcorp.org/LegalNotices/>. 

# 

from builtins import object, super 

 

 

from . import Policy 

 

"""This module defines the Mapper base class.""" 

 

 

class Mapper(object): 

"""Mapper is a base class for all mappers. 

 

Subclasses may define the following methods: 

 

map_{datasetType}(self, dataId, write) 

Map a dataset id for the given dataset type into a ButlerLocation. 

If write=True, this mapping is for an output dataset. 

 

query_{datasetType}(self, key, format, dataId) 

Return the possible values for the format fields that would produce 

datasets at the granularity of key in combination with the provided 

partial dataId. 

 

std_{datasetType}(self, item) 

Standardize an object of the given data set type. 

 

Methods that must be overridden: 

 

keys(self) 

Return a list of the keys that can be used in data ids. 

 

Other public methods: 

 

__init__(self) 

 

getDatasetTypes(self) 

 

map(self, datasetType, dataId, write=False) 

 

queryMetadata(self, datasetType, key, format, dataId) 

 

canStandardize(self, datasetType) 

 

standardize(self, datasetType, item, dataId) 

 

validate(self, dataId) 

""" 

 

@staticmethod 

def Mapper(cfg): 

'''Instantiate a Mapper from a configuration. 

In come cases the cfg may have already been instantiated into a Mapper, this is allowed and 

the input var is simply returned. 

 

:param cfg: the cfg for this mapper. It is recommended this be created by calling 

Mapper.cfg() 

:return: a Mapper instance 

''' 

if isinstance(cfg, Policy): 

return cfg['cls'](cfg) 

return cfg 

 

def __new__(cls, *args, **kwargs): 

"""Create a new Mapper, saving arguments for pickling. 

 

This is in __new__ instead of __init__ to save the user 

from having to save the arguments themselves (either explicitly, 

or by calling the super's __init__ with all their 

*args,**kwargs. The resulting pickling system (of __new__, 

__getstate__ and __setstate__ is similar to how __reduce__ 

is usually used, except that we save the user from any 

responsibility (except when overriding __new__, but that 

is not common). 

""" 

self = super().__new__(cls) 

self._arguments = (args, kwargs) 

return self 

 

def __init__(self, **kwargs): 

pass 

 

def __getstate__(self): 

return self._arguments 

 

def __setstate__(self, state): 

self._arguments = state 

args, kwargs = state 

self.__init__(*args, **kwargs) 

 

def keys(self): 

raise NotImplementedError("keys() unimplemented") 

 

def queryMetadata(self, datasetType, format, dataId): 

"""Get possible values for keys given a partial data id. 

 

:param datasetType: see documentation about the use of datasetType 

:param key: this is used as the 'level' parameter 

:param format: 

:param dataId: see documentation about the use of dataId 

:return: 

""" 

func = getattr(self, 'query_' + datasetType) 

 

val = func(format, self.validate(dataId)) 

return val 

 

def getDatasetTypes(self): 

"""Return a list of the mappable dataset types.""" 

 

list = [] 

for attr in dir(self): 

if attr.startswith("map_"): 

list.append(attr[4:]) 

return list 

 

def map(self, datasetType, dataId, write=False): 

"""Map a data id using the mapping method for its dataset type. 

 

Parameters 

---------- 

datasetType : string 

The datasetType to map 

dataId : DataId instance 

The dataId to use when mapping 

write : bool, optional 

Indicates if the map is being performed for a read operation 

(False) or a write operation (True) 

 

Returns 

------- 

ButlerLocation or a list of ButlerLocation 

The location(s) found for the map operation. If write is True, a 

list is returned. If write is False a single ButlerLocation is 

returned. 

 

Raises 

------ 

NoResults 

If no locaiton was found for this map operation, the derived mapper 

class may raise a lsst.daf.persistence.NoResults exception. Butler 

catches this and will look in the next Repository if there is one. 

""" 

func = getattr(self, 'map_' + datasetType) 

return func(self.validate(dataId), write) 

 

def canStandardize(self, datasetType): 

"""Return true if this mapper can standardize an object of the given 

dataset type.""" 

 

return hasattr(self, 'std_' + datasetType) 

 

def standardize(self, datasetType, item, dataId): 

"""Standardize an object using the standardization method for its data 

set type, if it exists.""" 

 

if hasattr(self, 'std_' + datasetType): 

func = getattr(self, 'std_' + datasetType) 

return func(item, self.validate(dataId)) 

return item 

 

def validate(self, dataId): 

"""Validate a dataId's contents. 

 

If the dataId is valid, return it. If an invalid component can be 

transformed into a valid one, copy the dataId, fix the component, and 

return the copy. Otherwise, raise an exception.""" 

 

return dataId 

 

def backup(self, datasetType, dataId): 

"""Rename any existing object with the given type and dataId. 

 

Not implemented in the base mapper. 

""" 

raise NotImplementedError("Base-class Mapper does not implement backups") 

 

def getRegistry(self): 

"""Get the registry""" 

return None