Coverage for python/lsst/meas/algorithms/loadIndexedReferenceObjects.py: 23%

64 statements  

« prev     ^ index     » next       coverage.py v6.4.1, created at 2022-07-11 07:09 +0000

1# 

2# LSST Data Management System 

3# 

4# Copyright 2008-2017 AURA/LSST. 

5# 

6# This product includes software developed by the 

7# LSST Project (http://www.lsst.org/). 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the LSST License Statement and 

20# the GNU General Public License along with this program. If not, 

21# see <https://www.lsstcorp.org/LegalNotices/>. 

22# 

23 

24__all__ = ["LoadIndexedReferenceObjectsConfig", "LoadIndexedReferenceObjectsTask"] 

25 

26from .loadReferenceObjects import hasNanojanskyFluxUnits, convertToNanojansky, getFormatVersionFromRefCat 

27from lsst.meas.algorithms import getRefFluxField, LoadReferenceObjectsTask, LoadReferenceObjectsConfig 

28import lsst.afw.table as afwTable 

29import lsst.pex.config as pexConfig 

30import lsst.pipe.base as pipeBase 

31from .indexerRegistry import IndexerRegistry 

32 

33 

34class LoadIndexedReferenceObjectsConfig(LoadReferenceObjectsConfig): 

35 ref_dataset_name = pexConfig.Field( 

36 dtype=str, 

37 default='cal_ref_cat', 

38 doc='Name of the ingested reference dataset' 

39 ) 

40 

41 

42class LoadIndexedReferenceObjectsTask(LoadReferenceObjectsTask): 

43 """Load reference objects from an indexed catalog ingested by 

44 IngestIndexReferenceTask. 

45 

46 Parameters 

47 ---------- 

48 butler : `lsst.daf.persistence.Butler` 

49 Data butler for reading catalogs 

50 """ 

51 ConfigClass = LoadIndexedReferenceObjectsConfig 

52 _DefaultName = 'LoadIndexedReferenceObjectsTask' 

53 

54 def __init__(self, butler, *args, **kwargs): 

55 LoadReferenceObjectsTask.__init__(self, *args, **kwargs) 

56 self.dataset_config = butler.get("ref_cat_config", name=self.config.ref_dataset_name, immediate=True) 

57 self.indexer = IndexerRegistry[self.dataset_config.indexer.name](self.dataset_config.indexer.active) 

58 # This needs to come from the loader config, not the dataset_config since directory aliases can 

59 # change the path where the shards are found. 

60 self.ref_dataset_name = self.config.ref_dataset_name 

61 self.butler = butler 

62 

63 @pipeBase.timeMethod 

64 def loadSkyCircle(self, ctrCoord, radius, filterName=None, epoch=None, centroids=False): 

65 shardIdList, isOnBoundaryList = self.indexer.getShardIds(ctrCoord, radius) 

66 shards = self.getShards(shardIdList) 

67 refCat = self.butler.get('ref_cat', 

68 dataId=self.indexer.makeDataId('master_schema', self.ref_dataset_name), 

69 immediate=True) 

70 

71 # load the catalog, one shard at a time 

72 for shard, isOnBoundary in zip(shards, isOnBoundaryList): 

73 if shard is None: 

74 continue 

75 if isOnBoundary: 

76 refCat.extend(self._trimToCircle(shard, ctrCoord, radius)) 

77 else: 

78 refCat.extend(shard) 

79 

80 # make sure catalog is contiguous: must do this before PM calculations 

81 if not refCat.isContiguous(): 

82 refCat = refCat.copy(True) 

83 

84 # apply proper motion corrections 

85 self.applyProperMotions(refCat, epoch) 

86 

87 # update version=0 style refcats to have nJy fluxes 

88 if self.dataset_config.format_version == 0 or not hasNanojanskyFluxUnits(refCat.schema): 

89 self.log.warning("Found version 0 reference catalog with old style units in schema.") 

90 self.log.warning("run `meas_algorithms/bin/convert_refcat_to_nJy.py` to convert fluxes to nJy.") 

91 self.log.warning("See RFC-575 for more details.") 

92 refCat = convertToNanojansky(refCat, self.log) 

93 else: 

94 # For version >= 1, the version should be in the catalog header, 

95 # too, and should be consistent with the version in the config. 

96 catVersion = getFormatVersionFromRefCat(refCat) 

97 if catVersion != self.dataset_config.format_version: 

98 raise RuntimeError(f"Format version in reference catalog ({catVersion}) does not match" 

99 f" format_version field in config ({self.dataset_config.format_version})") 

100 

101 self._addFluxAliases(refCat.schema) 

102 fluxField = getRefFluxField(schema=refCat.schema, filterName=filterName) 

103 

104 if centroids: 

105 # add and initialize centroid and hasCentroid fields (these are 

106 # added after loading to avoid wasting space in the saved catalogs) 

107 # the new fields are automatically initialized to (nan, nan) and 

108 # False so no need to set them explicitly 

109 mapper = afwTable.SchemaMapper(refCat.schema, True) 

110 mapper.addMinimalSchema(refCat.schema, True) 

111 mapper.editOutputSchema().addField("centroid_x", type=float) 

112 mapper.editOutputSchema().addField("centroid_y", type=float) 

113 mapper.editOutputSchema().addField("hasCentroid", type="Flag") 

114 expandedCat = afwTable.SimpleCatalog(mapper.getOutputSchema()) 

115 expandedCat.extend(refCat, mapper=mapper) 

116 refCat = expandedCat 

117 

118 # return reference catalog 

119 return pipeBase.Struct( 

120 refCat=refCat, 

121 fluxField=fluxField, 

122 ) 

123 

124 def getShards(self, shardIdList): 

125 """Get shards by ID. 

126 

127 Parameters 

128 ---------- 

129 shardIdList : `list` of `int` 

130 A list of integer shard ids. 

131 

132 Returns 

133 ------- 

134 catalogs : `list` of `lsst.afw.table.SimpleCatalog` 

135 A list of reference catalogs, one for each entry in shardIdList. 

136 """ 

137 shards = [] 

138 for shardId in shardIdList: 

139 if self.butler.datasetExists('ref_cat', 

140 dataId=self.indexer.makeDataId(shardId, self.ref_dataset_name)): 

141 shards.append(self.butler.get('ref_cat', 

142 dataId=self.indexer.makeDataId(shardId, self.ref_dataset_name), 

143 immediate=True)) 

144 return shards 

145 

146 def _trimToCircle(self, refCat, ctrCoord, radius): 

147 """Trim a reference catalog to a circular aperture. 

148 

149 Parameters 

150 ---------- 

151 refCat : `lsst.afw.table.SimpleCatalog` 

152 Reference catalog to be trimmed. 

153 ctrCoord : `lsst.geom.SpherePoint` 

154 ICRS center of search region. 

155 radius : `lsst.geom.Angle` 

156 Radius of search region. 

157 

158 Returns 

159 ------- 

160 catalog : `lsst.afw.table.SimpleCatalog` 

161 Catalog containing objects that fall in the circular aperture. 

162 """ 

163 tempCat = type(refCat)(refCat.schema) 

164 for record in refCat: 

165 if record.getCoord().separation(ctrCoord) < radius: 

166 tempCat.append(record) 

167 return tempCat