Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

# 

# LSST Data Management System 

# 

# Copyright 2008-2017 AURA/LSST. 

# 

# This product includes software developed by the 

# LSST Project (http://www.lsst.org/). 

# 

# This program is free software: you can redistribute it and/or modify 

# it under the terms of the GNU General Public License as published by 

# the Free Software Foundation, either version 3 of the License, or 

# (at your option) any later version. 

# 

# This program is distributed in the hope that it will be useful, 

# but WITHOUT ANY WARRANTY; without even the implied warranty of 

# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

# GNU General Public License for more details. 

# 

# You should have received a copy of the LSST License Statement and 

# the GNU General Public License along with this program. If not, 

# see <https://www.lsstcorp.org/LegalNotices/>. 

# 

 

__all__ = ["LoadIndexedReferenceObjectsConfig", "LoadIndexedReferenceObjectsTask"] 

 

from .loadReferenceObjects import hasNanojanskyFluxUnits, convertToNanojansky, getFormatVersionFromRefCat 

from lsst.meas.algorithms import getRefFluxField, LoadReferenceObjectsTask, LoadReferenceObjectsConfig 

import lsst.afw.table as afwTable 

import lsst.geom 

import lsst.pex.config as pexConfig 

import lsst.pipe.base as pipeBase 

from .indexerRegistry import IndexerRegistry 

 

 

class LoadIndexedReferenceObjectsConfig(LoadReferenceObjectsConfig): 

ref_dataset_name = pexConfig.Field( 

dtype=str, 

default='cal_ref_cat', 

doc='Name of the ingested reference dataset' 

) 

 

 

class LoadIndexedReferenceObjectsTask(LoadReferenceObjectsTask): 

"""Load reference objects from an indexed catalog ingested by 

IngestIndexReferenceTask. 

 

Parameters 

---------- 

butler : `lsst.daf.persistence.Butler` 

Data butler for reading catalogs 

""" 

ConfigClass = LoadIndexedReferenceObjectsConfig 

_DefaultName = 'LoadIndexedReferenceObjectsTask' 

 

def __init__(self, butler, *args, **kwargs): 

LoadReferenceObjectsTask.__init__(self, *args, **kwargs) 

self.dataset_config = butler.get("ref_cat_config", name=self.config.ref_dataset_name, immediate=True) 

self.indexer = IndexerRegistry[self.dataset_config.indexer.name](self.dataset_config.indexer.active) 

# This needs to come from the loader config, not the dataset_config since directory aliases can 

# change the path where the shards are found. 

self.ref_dataset_name = self.config.ref_dataset_name 

self.butler = butler 

 

@pipeBase.timeMethod 

def loadSkyCircle(self, ctrCoord, radius, filterName=None, epoch=None, centroids=False): 

shardIdList, isOnBoundaryList = self.indexer.getShardIds(ctrCoord, radius) 

shards = self.getShards(shardIdList) 

refCat = self.butler.get('ref_cat', 

dataId=self.indexer.makeDataId('master_schema', self.ref_dataset_name), 

immediate=True) 

 

# load the catalog, one shard at a time 

for shard, isOnBoundary in zip(shards, isOnBoundaryList): 

if shard is None: 

continue 

if isOnBoundary: 

refCat.extend(self._trimToCircle(shard, ctrCoord, radius)) 

else: 

refCat.extend(shard) 

 

# apply proper motion corrections 

if epoch is not None and "pm_ra" in refCat.schema: 

# check for a catalog in a non-standard format 

if isinstance(refCat.schema["pm_ra"].asKey(), lsst.afw.table.KeyAngle): 

self.applyProperMotions(refCat, epoch) 

else: 

self.log.warn("Catalog pm_ra field is not an Angle; not applying proper motion") 

 

# update version=0 style refcats to have nJy fluxes 

if self.dataset_config.format_version == 0 or not hasNanojanskyFluxUnits(refCat.schema): 

self.log.warn("Found version 0 reference catalog with old style units in schema.") 

self.log.warn("run `meas_algorithms/bin/convert_refcat_to_nJy.py` to convert fluxes to nJy.") 

self.log.warn("See RFC-575 for more details.") 

refCat = convertToNanojansky(refCat, self.log) 

else: 

# For version >= 1, the version should be in the catalog header, 

# too, and should be consistent with the version in the config. 

catVersion = getFormatVersionFromRefCat(refCat) 

if catVersion != self.dataset_config.format_version: 

raise RuntimeError(f"Format version in reference catalog ({catVersion}) does not match" 

f" format_version field in config ({self.dataset_config.format_version})") 

 

self._addFluxAliases(refCat.schema) 

fluxField = getRefFluxField(schema=refCat.schema, filterName=filterName) 

 

if centroids: 

# add and initialize centroid and hasCentroid fields (these are 

# added after loading to avoid wasting space in the saved catalogs) 

# the new fields are automatically initialized to (nan, nan) and 

# False so no need to set them explicitly 

mapper = afwTable.SchemaMapper(refCat.schema, True) 

mapper.addMinimalSchema(refCat.schema, True) 

mapper.editOutputSchema().addField("centroid_x", type=float) 

mapper.editOutputSchema().addField("centroid_y", type=float) 

mapper.editOutputSchema().addField("hasCentroid", type="Flag") 

expandedCat = afwTable.SimpleCatalog(mapper.getOutputSchema()) 

expandedCat.extend(refCat, mapper=mapper) 

refCat = expandedCat 

 

# make sure catalog is contiguous 

if not refCat.isContiguous(): 

refCat = refCat.copy(True) 

 

# return reference catalog 

return pipeBase.Struct( 

refCat=refCat, 

fluxField=fluxField, 

) 

 

def getShards(self, shardIdList): 

"""Get shards by ID. 

 

Parameters 

---------- 

shardIdList : `list` of `int` 

A list of integer shard ids. 

 

Returns 

------- 

catalogs : `list` of `lsst.afw.table.SimpleCatalog` 

A list of reference catalogs, one for each entry in shardIdList. 

""" 

shards = [] 

for shardId in shardIdList: 

if self.butler.datasetExists('ref_cat', 

dataId=self.indexer.makeDataId(shardId, self.ref_dataset_name)): 

shards.append(self.butler.get('ref_cat', 

dataId=self.indexer.makeDataId(shardId, self.ref_dataset_name), 

immediate=True)) 

return shards 

 

def _trimToCircle(self, refCat, ctrCoord, radius): 

"""Trim a reference catalog to a circular aperture. 

 

Parameters 

---------- 

refCat : `lsst.afw.table.SimpleCatalog` 

Reference catalog to be trimmed. 

ctrCoord : `lsst.geom.SpherePoint` 

ICRS center of search region. 

radius : `lsst.geom.Angle` 

Radius of search region. 

 

Returns 

------- 

catalog : `lsst.afw.table.SimpleCatalog` 

Catalog containing objects that fall in the circular aperture. 

""" 

tempCat = type(refCat)(refCat.schema) 

for record in refCat: 

if record.getCoord().separation(ctrCoord) < radius: 

tempCat.append(record) 

return tempCat