Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# 

2# LSST Data Management System 

3# 

4# Copyright 2008-2017 AURA/LSST. 

5# 

6# This product includes software developed by the 

7# LSST Project (http://www.lsst.org/). 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the LSST License Statement and 

20# the GNU General Public License along with this program. If not, 

21# see <https://www.lsstcorp.org/LegalNotices/>. 

22# 

23 

24__all__ = ["LoadIndexedReferenceObjectsConfig", "LoadIndexedReferenceObjectsTask"] 

25 

26from .loadReferenceObjects import hasNanojanskyFluxUnits, convertToNanojansky, getFormatVersionFromRefCat 

27from lsst.meas.algorithms import getRefFluxField, LoadReferenceObjectsTask, LoadReferenceObjectsConfig 

28import lsst.afw.table as afwTable 

29import lsst.geom 

30import lsst.pex.config as pexConfig 

31import lsst.pipe.base as pipeBase 

32from .indexerRegistry import IndexerRegistry 

33 

34 

35class LoadIndexedReferenceObjectsConfig(LoadReferenceObjectsConfig): 

36 ref_dataset_name = pexConfig.Field( 

37 dtype=str, 

38 default='cal_ref_cat', 

39 doc='Name of the ingested reference dataset' 

40 ) 

41 

42 

43class LoadIndexedReferenceObjectsTask(LoadReferenceObjectsTask): 

44 """Load reference objects from an indexed catalog ingested by 

45 IngestIndexReferenceTask. 

46 

47 Parameters 

48 ---------- 

49 butler : `lsst.daf.persistence.Butler` 

50 Data butler for reading catalogs 

51 """ 

52 ConfigClass = LoadIndexedReferenceObjectsConfig 

53 _DefaultName = 'LoadIndexedReferenceObjectsTask' 

54 

55 def __init__(self, butler, *args, **kwargs): 

56 LoadReferenceObjectsTask.__init__(self, *args, **kwargs) 

57 self.dataset_config = butler.get("ref_cat_config", name=self.config.ref_dataset_name, immediate=True) 

58 self.indexer = IndexerRegistry[self.dataset_config.indexer.name](self.dataset_config.indexer.active) 

59 # This needs to come from the loader config, not the dataset_config since directory aliases can 

60 # change the path where the shards are found. 

61 self.ref_dataset_name = self.config.ref_dataset_name 

62 self.butler = butler 

63 

64 @pipeBase.timeMethod 

65 def loadSkyCircle(self, ctrCoord, radius, filterName=None, epoch=None, centroids=False): 

66 shardIdList, isOnBoundaryList = self.indexer.getShardIds(ctrCoord, radius) 

67 shards = self.getShards(shardIdList) 

68 refCat = self.butler.get('ref_cat', 

69 dataId=self.indexer.makeDataId('master_schema', self.ref_dataset_name), 

70 immediate=True) 

71 

72 # load the catalog, one shard at a time 

73 for shard, isOnBoundary in zip(shards, isOnBoundaryList): 

74 if shard is None: 

75 continue 

76 if isOnBoundary: 

77 refCat.extend(self._trimToCircle(shard, ctrCoord, radius)) 

78 else: 

79 refCat.extend(shard) 

80 

81 # make sure catalog is contiguous: must do this before PM calculations 

82 if not refCat.isContiguous(): 

83 refCat = refCat.copy(True) 

84 

85 # apply proper motion corrections 

86 if epoch is not None and "pm_ra" in refCat.schema: 

87 # check for a catalog in a non-standard format 

88 if isinstance(refCat.schema["pm_ra"].asKey(), lsst.afw.table.KeyAngle): 

89 self.applyProperMotions(refCat, epoch) 

90 else: 

91 self.log.warn("Catalog pm_ra field is not an Angle; not applying proper motion") 

92 

93 # update version=0 style refcats to have nJy fluxes 

94 if self.dataset_config.format_version == 0 or not hasNanojanskyFluxUnits(refCat.schema): 

95 self.log.warn("Found version 0 reference catalog with old style units in schema.") 

96 self.log.warn("run `meas_algorithms/bin/convert_refcat_to_nJy.py` to convert fluxes to nJy.") 

97 self.log.warn("See RFC-575 for more details.") 

98 refCat = convertToNanojansky(refCat, self.log) 

99 else: 

100 # For version >= 1, the version should be in the catalog header, 

101 # too, and should be consistent with the version in the config. 

102 catVersion = getFormatVersionFromRefCat(refCat) 

103 if catVersion != self.dataset_config.format_version: 

104 raise RuntimeError(f"Format version in reference catalog ({catVersion}) does not match" 

105 f" format_version field in config ({self.dataset_config.format_version})") 

106 

107 self._addFluxAliases(refCat.schema) 

108 fluxField = getRefFluxField(schema=refCat.schema, filterName=filterName) 

109 

110 if centroids: 

111 # add and initialize centroid and hasCentroid fields (these are 

112 # added after loading to avoid wasting space in the saved catalogs) 

113 # the new fields are automatically initialized to (nan, nan) and 

114 # False so no need to set them explicitly 

115 mapper = afwTable.SchemaMapper(refCat.schema, True) 

116 mapper.addMinimalSchema(refCat.schema, True) 

117 mapper.editOutputSchema().addField("centroid_x", type=float) 

118 mapper.editOutputSchema().addField("centroid_y", type=float) 

119 mapper.editOutputSchema().addField("hasCentroid", type="Flag") 

120 expandedCat = afwTable.SimpleCatalog(mapper.getOutputSchema()) 

121 expandedCat.extend(refCat, mapper=mapper) 

122 refCat = expandedCat 

123 

124 # return reference catalog 

125 return pipeBase.Struct( 

126 refCat=refCat, 

127 fluxField=fluxField, 

128 ) 

129 

130 def getShards(self, shardIdList): 

131 """Get shards by ID. 

132 

133 Parameters 

134 ---------- 

135 shardIdList : `list` of `int` 

136 A list of integer shard ids. 

137 

138 Returns 

139 ------- 

140 catalogs : `list` of `lsst.afw.table.SimpleCatalog` 

141 A list of reference catalogs, one for each entry in shardIdList. 

142 """ 

143 shards = [] 

144 for shardId in shardIdList: 

145 if self.butler.datasetExists('ref_cat', 

146 dataId=self.indexer.makeDataId(shardId, self.ref_dataset_name)): 

147 shards.append(self.butler.get('ref_cat', 

148 dataId=self.indexer.makeDataId(shardId, self.ref_dataset_name), 

149 immediate=True)) 

150 return shards 

151 

152 def _trimToCircle(self, refCat, ctrCoord, radius): 

153 """Trim a reference catalog to a circular aperture. 

154 

155 Parameters 

156 ---------- 

157 refCat : `lsst.afw.table.SimpleCatalog` 

158 Reference catalog to be trimmed. 

159 ctrCoord : `lsst.geom.SpherePoint` 

160 ICRS center of search region. 

161 radius : `lsst.geom.Angle` 

162 Radius of search region. 

163 

164 Returns 

165 ------- 

166 catalog : `lsst.afw.table.SimpleCatalog` 

167 Catalog containing objects that fall in the circular aperture. 

168 """ 

169 tempCat = type(refCat)(refCat.schema) 

170 for record in refCat: 

171 if record.getCoord().separation(ctrCoord) < radius: 

172 tempCat.append(record) 

173 return tempCat