25 from collections
import defaultdict
33 from lsst.pipe.base import CmdLineTask, ArgumentParser, DataIdContainer
36 from .parquetTable
import ParquetTable
37 from .multiBandUtils
import makeMergeArgumentParser, MergeSourcesRunner
38 from .functors
import CompositeFunctor, RAColumn, DecColumn, Column
41 def flattenFilters(df, filterDict, noDupCols=['coord_ra', 'coord_dec'], camelCase=False):
42 """Flattens a dataframe with multilevel column index
44 newDf = pd.DataFrame()
45 for filt, filtShort
in filterDict.items():
47 columnFormat =
'{0}{1}' if camelCase
else '{0}_{1}'
48 newColumns = {c: columnFormat.format(filtShort, c)
49 for c
in subdf.columns
if c
not in noDupCols}
50 cols = list(newColumns.keys())
51 newDf = pd.concat([newDf, subdf[cols].rename(columns=newColumns)], axis=1)
53 newDf = pd.concat([subdf[noDupCols], newDf], axis=1)
58 priorityList = pexConfig.ListField(
61 doc=
"Priority-ordered list of bands for the merge."
63 engine = pexConfig.Field(
66 doc=
"Parquet engine for writing (pyarrow or fastparquet)"
68 coaddName = pexConfig.Field(
75 pexConfig.Config.validate(self)
77 raise RuntimeError(
"No priority list provided")
81 """Write filter-merged source tables to parquet
83 _DefaultName =
"writeObjectTable"
84 ConfigClass = WriteObjectTableConfig
85 RunnerClass = MergeSourcesRunner
88 inputDatasets = (
'forced_src',
'meas',
'ref')
93 def __init__(self, butler=None, schema=None, **kwargs):
97 CmdLineTask.__init__(self, **kwargs)
101 @brief Merge coadd sources from multiple bands. Calls @ref `run` which must be defined in
102 subclasses that inherit from MergeSourcesTask.
103 @param[in] patchRefList list of data references for each filter
105 catalogs = dict(self.
readCatalog(patchRef)
for patchRef
in patchRefList)
106 dataId = patchRefList[0].dataId
107 mergedCatalog = self.
run(catalogs, tract=dataId[
'tract'], patch=dataId[
'patch'])
108 self.
write(patchRefList[0], mergedCatalog)
111 def _makeArgumentParser(cls):
112 """Create a suitable ArgumentParser.
114 We will use the ArgumentParser to get a list of data
115 references for patches; the RunnerClass will sort them into lists
116 of data references for the same patch.
118 References first of self.inputDatasets, rather than
124 """Read input catalogs
126 Read all the input datasets given by the 'inputDatasets'
131 patchRef : `lsst.daf.persistence.ButlerDataRef`
132 Data reference for patch
136 Tuple consisting of filter name and a dict of catalogs, keyed by
139 filterName = patchRef.dataId[
"filter"]
142 catalog = patchRef.get(self.config.coaddName +
"Coadd_" + dataset, immediate=
True)
143 self.log.info(
"Read %d sources from %s for filter %s: %s" %
144 (len(catalog), dataset, filterName, patchRef.dataId))
145 catalogDict[dataset] = catalog
146 return filterName, catalogDict
148 def run(self, catalogs, tract, patch):
149 """Merge multiple catalogs.
154 Mapping from filter names to dict of catalogs.
156 tractId to use for the tractId column
158 patchId to use for the patchId column
162 catalog : `lsst.pipe.tasks.parquetTable.ParquetTable`
163 Merged dataframe, with each column prefixed by
164 `filter_tag(filt)`, wrapped in the parquet writer shim class.
168 for filt, tableDict
in catalogs.items():
169 for dataset, table
in tableDict.items():
171 df = table.asAstropy().to_pandas().set_index(
'id', drop=
True)
174 df = df.reindex(sorted(df.columns), axis=1)
175 df[
'tractId'] = tract
176 df[
'patchId'] = patch
179 df.columns = pd.MultiIndex.from_tuples([(dataset, filt, c)
for c
in df.columns],
180 names=(
'dataset',
'filter',
'column'))
183 catalog = functools.reduce(
lambda d1, d2: d1.join(d2), dfs)
191 catalog : `ParquetTable`
193 patchRef : `lsst.daf.persistence.ButlerDataRef`
194 Data reference for patch
196 patchRef.put(catalog, self.config.coaddName +
"Coadd_" + self.
outputDataset)
199 mergeDataId = patchRef.dataId.copy()
200 del mergeDataId[
"filter"]
201 self.log.info(
"Wrote merged catalog: %s" % (mergeDataId,))
204 """No metadata to write, and not sure how to write it for a list of dataRefs.
209 class WriteSourceTableConfig(pexConfig.Config):
210 doApplyExternalPhotoCalib = pexConfig.Field(
213 doc=(
"Add local photoCalib columns from the calexp.photoCalib? Should only set True if "
214 "generating Source Tables from older src tables which do not already have local calib columns")
216 doApplyExternalSkyWcs = pexConfig.Field(
219 doc=(
"Add local WCS columns from the calexp.wcs? Should only set True if "
220 "generating Source Tables from older src tables which do not already have local calib columns")
225 """Write source table to parquet
227 _DefaultName =
"writeSourceTable"
228 ConfigClass = WriteSourceTableConfig
231 src = dataRef.get(
'src')
232 if self.config.doApplyExternalPhotoCalib
or self.config.doApplyExternalSkyWcs:
235 ccdVisitId = dataRef.get(
'ccdExposureId')
236 result = self.
run(src, ccdVisitId=ccdVisitId)
237 dataRef.put(result.table,
'source')
239 def run(self, catalog, ccdVisitId=None):
240 """Convert `src` catalog to parquet
244 catalog: `afwTable.SourceCatalog`
245 catalog to be converted
247 ccdVisitId to be added as a column
251 result : `lsst.pipe.base.Struct`
253 `ParquetTable` version of the input catalog
255 self.log.info(
"Generating parquet table from src catalog")
256 df = catalog.asAstropy().to_pandas().set_index(
'id', drop=
True)
257 df[
'ccdVisitId'] = ccdVisitId
258 return pipeBase.Struct(table=
ParquetTable(dataFrame=df))
261 """Add columns with local calibration evaluated at each centroid
263 for backwards compatibility with old repos.
264 This exists for the purpose of converting old src catalogs
265 (which don't have the expected local calib columns) to Source Tables.
269 catalog: `afwTable.SourceCatalog`
270 catalog to which calib columns will be added
271 dataRef: `lsst.daf.persistence.ButlerDataRef
272 for fetching the calibs from disk.
276 newCat: `afwTable.SourceCatalog`
277 Source Catalog with requested local calib columns
279 mapper = afwTable.SchemaMapper(catalog.schema)
280 measureConfig = SingleFrameMeasurementTask.ConfigClass()
281 measureConfig.doReplaceWithNoise =
False
284 exposure = dataRef.get(
'calexp_sub',
287 mapper = afwTable.SchemaMapper(catalog.schema)
288 mapper.addMinimalSchema(catalog.schema,
True)
289 schema = mapper.getOutputSchema()
291 exposureIdInfo = dataRef.get(
"expIdInfo")
292 measureConfig.plugins.names = []
293 if self.config.doApplyExternalSkyWcs:
294 plugin =
'base_LocalWcs'
296 raise RuntimeError(f
"{plugin} already in src catalog. Set doApplyExternalSkyWcs=False")
298 measureConfig.plugins.names.add(plugin)
300 if self.config.doApplyExternalPhotoCalib:
301 plugin =
'base_LocalPhotoCalib'
303 raise RuntimeError(f
"{plugin} already in src catalog. Set doApplyExternalPhotoCalib=False")
305 measureConfig.plugins.names.add(plugin)
307 measurement = SingleFrameMeasurementTask(config=measureConfig, schema=schema)
308 newCat = afwTable.SourceCatalog(schema)
309 newCat.extend(catalog, mapper=mapper)
310 measurement.run(measCat=newCat, exposure=exposure, exposureId=exposureIdInfo.expId)
314 """No metadata to write.
319 def _makeArgumentParser(cls):
321 parser.add_id_argument(
"--id",
'src',
322 help=
"data ID, e.g. --id visit=12345 ccd=0")
327 """Calculate columns from ParquetTable
329 This object manages and organizes an arbitrary set of computations
330 on a catalog. The catalog is defined by a
331 `lsst.pipe.tasks.parquetTable.ParquetTable` object (or list thereof), such as a
332 `deepCoadd_obj` dataset, and the computations are defined by a collection
333 of `lsst.pipe.tasks.functor.Functor` objects (or, equivalently,
334 a `CompositeFunctor`).
336 After the object is initialized, accessing the `.df` attribute (which
337 holds the `pandas.DataFrame` containing the results of the calculations) triggers
338 computation of said dataframe.
340 One of the conveniences of using this object is the ability to define a desired common
341 filter for all functors. This enables the same functor collection to be passed to
342 several different `PostprocessAnalysis` objects without having to change the original
343 functor collection, since the `filt` keyword argument of this object triggers an
344 overwrite of the `filt` property for all functors in the collection.
346 This object also allows a list of refFlags to be passed, and defines a set of default
347 refFlags that are always included even if not requested.
349 If a list of `ParquetTable` object is passed, rather than a single one, then the
350 calculations will be mapped over all the input catalogs. In principle, it should
351 be straightforward to parallelize this activity, but initial tests have failed
352 (see TODO in code comments).
356 parq : `lsst.pipe.tasks.ParquetTable` (or list of such)
357 Source catalog(s) for computation
359 functors : `list`, `dict`, or `lsst.pipe.tasks.functors.CompositeFunctor`
360 Computations to do (functors that act on `parq`).
361 If a dict, the output
362 DataFrame will have columns keyed accordingly.
363 If a list, the column keys will come from the
364 `.shortname` attribute of each functor.
366 filt : `str` (optional)
367 Filter in which to calculate. If provided,
368 this will overwrite any existing `.filt` attribute
369 of the provided functors.
371 flags : `list` (optional)
372 List of flags (per-band) to include in output table.
374 refFlags : `list` (optional)
375 List of refFlags (only reference band) to include in output table.
379 _defaultRefFlags = []
380 _defaultFuncs = ((
'coord_ra',
RAColumn()),
383 def __init__(self, parq, functors, filt=None, flags=None, refFlags=None):
388 self.
flags = list(flags)
if flags
is not None else []
390 if refFlags
is not None:
403 additionalFuncs.update({flag:
Column(flag, dataset=
'ref')
for flag
in self.
refFlags})
404 additionalFuncs.update({flag:
Column(flag, dataset=
'meas')
for flag
in self.
flags})
406 if isinstance(self.
functors, CompositeFunctor):
411 func.funcDict.update(additionalFuncs)
412 func.filt = self.
filt
418 return [name
for name, func
in self.
func.funcDict.items()
if func.noDup
or func.dataset ==
'ref']
428 if type(self.
parq)
in (list, tuple):
430 dflist = [self.
func(parq, dropna=dropna)
for parq
in self.
parq]
433 dflist = pool.map(functools.partial(self.
func, dropna=dropna), self.
parq)
434 self.
_df = pd.concat(dflist)
442 functorFile = pexConfig.Field(
444 doc=
'Path to YAML file specifying functors to be computed',
451 """Base class for transforming/standardizing a catalog
453 by applying functors that convert units and apply calibrations.
454 The purpose of this task is to perform a set of computations on
455 an input `ParquetTable` dataset (such as `deepCoadd_obj`) and write the
456 results to a new dataset (which needs to be declared in an `outputDataset`
459 The calculations to be performed are defined in a YAML file that specifies
460 a set of functors to be computed, provided as
461 a `--functorFile` config parameter. An example of such a YAML file
486 - base_InputCount_value
489 functor: DeconvolvedMoments
494 - merge_measurement_i
495 - merge_measurement_r
496 - merge_measurement_z
497 - merge_measurement_y
498 - merge_measurement_g
499 - base_PixelFlags_flag_inexact_psfCenter
502 The names for each entry under "func" will become the names of columns in the
503 output dataset. All the functors referenced are defined in `lsst.pipe.tasks.functors`.
504 Positional arguments to be passed to each functor are in the `args` list,
505 and any additional entries for each column other than "functor" or "args" (e.g., `'filt'`,
506 `'dataset'`) are treated as keyword arguments to be passed to the functor initialization.
508 The "refFlags" entry is shortcut for a bunch of `Column` functors with the original column and
509 taken from the `'ref'` dataset.
511 The "flags" entry will be expanded out per band.
513 Note, if `'filter'` is provided as part of the `dataId` when running this task (even though
514 `deepCoadd_obj` does not use `'filter'`), then this will override the `filt` kwargs
515 provided in the YAML file, and the calculations will be done in that filter.
517 This task uses the `lsst.pipe.tasks.postprocess.PostprocessAnalysis` object
518 to organize and excecute the calculations.
522 def _DefaultName(self):
523 raise NotImplementedError(
'Subclass must define "_DefaultName" attribute')
527 raise NotImplementedError(
'Subclass must define "outputDataset" attribute')
531 raise NotImplementedError(
'Subclass must define "inputDataset" attribute')
535 raise NotImplementedError(
'Subclass must define "ConfigClass" attribute')
540 df = self.
run(parq, funcs=funcs, dataId=dataRef.dataId)
541 self.
write(df, dataRef)
544 def run(self, parq, funcs=None, dataId=None):
545 """Do postprocessing calculations
547 Takes a `ParquetTable` object and dataId,
548 returns a dataframe with results of postprocessing calculations.
552 parq : `lsst.pipe.tasks.parquetTable.ParquetTable`
553 ParquetTable from which calculations are done.
554 funcs : `lsst.pipe.tasks.functors.Functors`
555 Functors to apply to the table's columns
556 dataId : dict, optional
557 Used to add a `patchId` column to the output dataframe.
564 self.log.info(
"Transforming/standardizing the source table dataId: %s", dataId)
566 filt = dataId.get(
'filter',
None)
567 df = self.
transform(filt, parq, funcs, dataId).df
568 self.log.info(
"Made a table of %d columns and %d rows", len(df.columns), len(df))
572 funcs = CompositeFunctor.from_file(self.config.functorFile)
573 funcs.update(dict(PostprocessAnalysis._defaultFuncs))
584 analysis = self.
getAnalysis(parq, funcs=funcs, filt=filt)
586 if dataId
is not None:
587 for key, value
in dataId.items():
590 return pipeBase.Struct(
599 """No metadata to write.
604 class TransformObjectCatalogConfig(TransformCatalogBaseConfig):
605 coaddName = pexConfig.Field(
610 filterMap = pexConfig.DictField(
614 doc=(
"Dictionary mapping full filter name to short one for column name munging."
615 "These filters determine the output columns no matter what filters the "
616 "input data actually contain.")
618 camelCase = pexConfig.Field(
621 doc=(
"Write per-filter columns names with camelCase, else underscore "
622 "For example: gPsfFlux instead of g_PsfFlux.")
624 multilevelOutput = pexConfig.Field(
627 doc=(
"Whether results dataframe should have a multilevel column index (True) or be flat "
628 "and name-munged (False).")
633 """Compute Flatted Object Table as defined in the DPDD
635 Do the same set of postprocessing calculations on all bands
637 This is identical to `TransformCatalogBaseTask`, except for that it does the
638 specified functor calculations for all filters present in the
639 input `deepCoadd_obj` table. Any specific `"filt"` keywords specified
640 by the YAML file will be superceded.
642 _DefaultName =
"transformObjectCatalog"
643 ConfigClass = TransformObjectCatalogConfig
645 inputDataset =
'deepCoadd_obj'
646 outputDataset =
'objectTable'
649 def _makeArgumentParser(cls):
652 ContainerClass=CoaddDataIdContainer,
653 help=
"data ID, e.g. --id tract=12345 patch=1,2")
656 def run(self, parq, funcs=None, dataId=None):
659 templateDf = pd.DataFrame()
662 for filt
in parq.columnLevelNames[
'filter']:
663 if filt
not in self.config.filterMap:
664 self.log.info(
"Ignoring %s data in the input", filt)
666 self.log.info(
"Transforming the catalog of filter %s", filt)
667 result = self.
transform(filt, parq, funcs, dataId)
668 dfDict[filt] = result.df
669 analysisDict[filt] = result.analysis
671 templateDf = result.df
674 for filt
in self.config.filterMap:
675 if filt
not in dfDict:
676 self.log.info(
"Adding empty columns for filter %s", filt)
677 dfDict[filt] = pd.DataFrame().reindex_like(templateDf)
680 df = pd.concat(dfDict, axis=1, names=[
'filter',
'column'])
682 if not self.config.multilevelOutput:
683 noDupCols = list(set.union(*[set(v.noDupCols)
for v
in analysisDict.values()]))
684 if dataId
is not None:
685 noDupCols += list(dataId.keys())
686 df =
flattenFilters(df, self.config.filterMap, noDupCols=noDupCols,
687 camelCase=self.config.camelCase)
689 self.log.info(
"Made a table of %d columns and %d rows", len(df.columns), len(df))
696 """Make self.refList from self.idList
698 Generate a list of data references given tract and/or patch.
699 This was adapted from `TractQADataIdContainer`, which was
700 `TractDataIdContainer` modifie to not require "filter".
701 Only existing dataRefs are returned.
703 def getPatchRefList(tract):
704 return [namespace.butler.dataRef(datasetType=self.datasetType,
706 patch=
"%d,%d" % patch.getIndex())
for patch
in tract]
708 tractRefs = defaultdict(list)
709 for dataId
in self.idList:
712 if "tract" in dataId:
713 tractId = dataId[
"tract"]
714 if "patch" in dataId:
715 tractRefs[tractId].append(namespace.butler.dataRef(datasetType=self.datasetType,
717 patch=dataId[
'patch']))
719 tractRefs[tractId] += getPatchRefList(skymap[tractId])
721 tractRefs = dict((tract.getId(), tractRefs.get(tract.getId(), []) + getPatchRefList(tract))
724 for tractRefList
in tractRefs.values():
725 existingRefs = [ref
for ref
in tractRefList
if ref.datasetExists()]
726 outputRefList.append(existingRefs)
732 coaddName = pexConfig.Field(
740 """Write patch-merged source tables to a tract-level parquet file
742 _DefaultName =
"consolidateObjectTable"
743 ConfigClass = ConsolidateObjectTableConfig
745 inputDataset =
'objectTable'
746 outputDataset =
'objectTable_tract'
749 def _makeArgumentParser(cls):
753 help=
"data ID, e.g. --id tract=12345",
754 ContainerClass=TractObjectDataIdContainer)
758 df = pd.concat([patchRef.get().toDataFrame()
for patchRef
in patchRefList])
762 """No metadata to write.
767 class TransformSourceTableConfig(TransformCatalogBaseConfig):
772 """Transform/standardize a source catalog
774 _DefaultName =
"transformSourceTable"
775 ConfigClass = TransformSourceTableConfig
777 inputDataset =
'source'
778 outputDataset =
'sourceTable'
781 """No metadata to write.
786 def _makeArgumentParser(cls):
788 parser.add_id_argument(
"--id", datasetType=cls.
inputDataset,
790 help=
"data ID, e.g. --id visit=12345 ccd=0")
795 dimensions=(
"instrument",
"visit",),
796 defaultTemplates={}):
797 calexp = connectionTypes.Input(
798 doc=
"Processed exposures used for metadata",
800 storageClass=
"ExposureF",
801 dimensions=(
"instrument",
"visit",
"detector"),
805 visitSummary = connectionTypes.Output(
806 doc=
"Consolidated visit-level exposure metadata",
808 storageClass=
"ExposureCatalog",
809 dimensions=(
"instrument",
"visit"),
814 pipelineConnections=ConsolidateVisitSummaryConnections):
815 """Config for ConsolidateVisitSummaryTask"""
820 """Task to consolidate per-detector visit metadata.
822 This task aggregates the following metadata from all the detectors in a
823 single visit into an exposure catalog:
827 - The physical_filter and band (if available).
828 - The psf size, shape, and effective area at the center of the detector.
829 - The corners of the bounding box in right ascension/declination.
831 Other quantities such as Psf, ApCorrMap, and TransmissionCurve are not
832 persisted here because of storage concerns, and because of their limited
833 utility as summary statistics.
835 Tests for this task are performed in ci_hsc_gen3.
837 _DefaultName =
"consolidateVisitSummary"
838 ConfigClass = ConsolidateVisitSummaryConfig
841 def _makeArgumentParser(cls):
844 parser.add_id_argument(
"--id",
"calexp",
845 help=
"data ID, e.g. --id visit=12345",
846 ContainerClass=VisitDataIdContainer)
850 """No metadata to persist, so override to remove metadata persistance.
855 """No config to persist, so override to remove config persistance.
860 visit = dataRefList[0].dataId[
'visit']
862 self.log.debug(
"Concatenating metadata from %d per-detector calexps (visit %d)" %
863 (len(dataRefList), visit))
865 expCatalog = self._combineExposureMetadata(visit, dataRefList, isGen3=
False)
867 dataRefList[0].put(expCatalog,
'visitSummary', visit=visit)
870 dataRefs = butlerQC.get(inputRefs.calexp)
871 visit = dataRefs[0].dataId.byName()[
'visit']
873 self.log.debug(
"Concatenating metadata from %d per-detector calexps (visit %d)" %
874 (len(dataRefs), visit))
878 butlerQC.put(expCatalog, outputRefs.visitSummary)
880 def _combineExposureMetadata(self, visit, dataRefs, isGen3=True):
881 """Make a combined exposure catalog from a list of dataRefs.
886 Visit identification number
888 List of calexp dataRefs in visit. May be list of
889 `lsst.daf.persistence.ButlerDataRef` (Gen2) or
890 `lsst.daf.butler.DeferredDatasetHandle` (Gen3).
891 isGen3 : `bool`, optional
892 Specifies if this is a Gen3 list of datarefs.
896 visitSummary : `lsst.afw.table.ExposureCatalog`
897 Exposure catalog with per-detector summary information.
899 schema = afwTable.ExposureTable.makeMinimalSchema()
900 schema.addField(
'visit', type=
'I', doc=
'Visit number')
901 schema.addField(
'detector_id', type=
'I', doc=
'Detector number')
902 schema.addField(
'physical_filter', type=
'String', size=32, doc=
'Physical filter')
903 schema.addField(
'band', type=
'String', size=32, doc=
'Name of band')
904 schema.addField(
'psfSigma', type=
'F',
905 doc=
'PSF model second-moments determinant radius (center of chip) (pixel)')
906 schema.addField(
'psfArea', type=
'F',
907 doc=
'PSF model effective area (center of chip) (pixel**2)')
908 schema.addField(
'psfIxx', type=
'F',
909 doc=
'PSF model Ixx (center of chip) (pixel**2)')
910 schema.addField(
'psfIyy', type=
'F',
911 doc=
'PSF model Iyy (center of chip) (pixel**2)')
912 schema.addField(
'psfIxy', type=
'F',
913 doc=
'PSF model Ixy (center of chip) (pixel**2)')
914 schema.addField(
'raCorners', type=
'ArrayD', size=4,
915 doc=
'Right Ascension of bounding box corners (degrees)')
916 schema.addField(
'decCorners', type=
'ArrayD', size=4,
917 doc=
'Declination of bounding box corners (degrees)')
919 cat = afwTable.ExposureCatalog(schema)
920 cat.resize(len(dataRefs))
924 for i, dataRef
in enumerate(dataRefs):
926 visitInfo = dataRef.get(component=
'visitInfo')
927 filter_ = dataRef.get(component=
'filter')
928 psf = dataRef.get(component=
'psf')
929 wcs = dataRef.get(component=
'wcs')
930 photoCalib = dataRef.get(component=
'photoCalib')
931 detector = dataRef.get(component=
'detector')
932 bbox = dataRef.get(component=
'bbox')
933 validPolygon = dataRef.get(component=
'validPolygon')
938 exp = dataRef.get(datasetType=
'calexp_sub', bbox=gen2_read_bbox)
939 visitInfo = exp.getInfo().getVisitInfo()
940 filter_ = exp.getFilter()
943 photoCalib = exp.getPhotoCalib()
944 detector = exp.getDetector()
945 bbox = dataRef.get(datasetType=
'calexp_bbox')
946 validPolygon = exp.getInfo().getValidPolygon()
950 rec.setVisitInfo(visitInfo)
952 rec.setPhotoCalib(photoCalib)
953 rec.setDetector(detector)
954 rec.setValidPolygon(validPolygon)
957 rec[
'physical_filter'] = filter_.getName()
959 rec[
'detector_id'] = detector.getId()
960 shape = psf.computeShape(bbox.getCenter())
961 rec[
'psfSigma'] = shape.getDeterminantRadius()
962 rec[
'psfIxx'] = shape.getIxx()
963 rec[
'psfIyy'] = shape.getIyy()
964 rec[
'psfIxy'] = shape.getIxy()
965 im = psf.computeKernelImage(bbox.getCenter())
970 rec[
'psfArea'] = np.sum(im.array)/np.sum(im.array**2.)
973 rec[
'raCorners'][:] = [sph.getRa().asDegrees()
for sph
in sph_pts]
974 rec[
'decCorners'][:] = [sph.getDec().asDegrees()
for sph
in sph_pts]
980 """DataIdContainer that groups sensor-level id's by visit
984 """Make self.refList from self.idList
986 Generate a list of data references grouped by visit.
990 namespace : `argparse.Namespace`
991 Namespace used by `lsst.pipe.base.CmdLineTask` to parse command line arguments
994 visitRefs = defaultdict(list)
995 for dataId
in self.idList:
996 if "visit" in dataId:
997 visitId = dataId[
"visit"]
999 subset = namespace.butler.subset(self.datasetType, dataId=dataId)
1000 visitRefs[visitId].extend([dataRef
for dataRef
in subset])
1003 for refList
in visitRefs.values():
1004 existingRefs = [ref
for ref
in refList
if ref.datasetExists()]
1006 outputRefList.append(existingRefs)
1015 class ConsolidateSourceTableTask(CmdLineTask):
1016 """Concatenate `sourceTable` list into a per-visit `sourceTable_visit`
1018 _DefaultName =
'consolidateSourceTable'
1019 ConfigClass = ConsolidateSourceTableConfig
1021 inputDataset =
'sourceTable'
1022 outputDataset =
'sourceTable_visit'
1025 self.log.info(
"Concatenating %s per-detector Source Tables", len(dataRefList))
1026 df = pd.concat([dataRef.get().toDataFrame()
for dataRef
in dataRefList])
1030 def _makeArgumentParser(cls):
1034 help=
"data ID, e.g. --id visit=12345",
1035 ContainerClass=VisitDataIdContainer)
1039 """No metadata to write.
1044 """No config to write.