Coverage for python / lsst / pipe / tasks / prettyPictureMaker / _equalizers.py: 17%

59 statements  

« prev     ^ index     » next       coverage.py v7.13.5, created at 2026-05-07 08:39 +0000

1# This file is part of pipe_tasks. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21 

22__all__ = ("tone_equalizer", "contrast_equalizer") 

23 

24import numpy as np 

25import cv2 

26from numpy.typing import NDArray 

27from scipy.ndimage import gaussian_filter 

28 

29 

30from ._localContrast import levelPadder, makeLapPyramid 

31from .types import FloatImagePlane 

32 

33 

34def _eigf_variance_analysis_no_mask(guide: FloatImagePlane, sigma: float) -> NDArray: 

35 """Computes average and variance of guide using Gaussian filtering. 

36 

37 Parameters 

38 ---------- 

39 guide : `FloatImagePlane` 

40 2D array representing the guide image. 

41 sigma : `float` 

42 Standard deviation for Gaussian kernel. 

43 

44 Returns 

45 ------- 

46 result : `numpy.ndarray` 

47 Array where each pixel has [average, variance]. 

48 """ 

49 # Compute average of guide 

50 mu_guide = gaussian_filter(guide, sigma=sigma) 

51 

52 # Compute average of squared guide values 

53 guide_squared = guide**2 

54 mu_guide_squared = gaussian_filter(guide_squared, sigma=sigma) 

55 

56 # Calculate variance as E[guide^2] - (E[guide])^2 

57 var_guide = mu_guide_squared - mu_guide**2 

58 

59 # Combine into an output array with shape (height, width, 2) 

60 output = np.stack((mu_guide, var_guide), axis=2) 

61 

62 return output 

63 

64 

65def _eigf_blending_no_mask(image: FloatImagePlane, av: NDArray, feathering: float, filter_type: int) -> None: 

66 """Applies blending without a mask using averages and variances. 

67 

68 Parameters 

69 ---------- 

70 image : `FloatImagePlane` 

71 2D input image array. Modified in-place. 

72 av : `numpy.ndarray` 

73 Array with shape (height, width, 2) containing averages and variances. 

74 feathering : `float` 

75 Feathering parameter for blending. 

76 filter_type : `int` 

77 Blending type: 0 for linear, 1 for geometric mean. 

78 """ 

79 # Reshape 'av' to match image dimensions 

80 av_reshaped = av.reshape(image.shape[0], image.shape[1], -1) 

81 

82 avg_g = av_reshaped[..., 0] 

83 var_g = av_reshaped[..., 1] 

84 

85 norm_g = np.maximum(avg_g * image, 1e-6) 

86 normalized_var_guide = var_g / norm_g 

87 

88 a = normalized_var_guide / (normalized_var_guide + feathering) 

89 b = avg_g - a * avg_g 

90 

91 # Apply blending 

92 if filter_type == 0: # Linear blending 

93 image[:] = np.maximum(image * a + b, np.finfo(float).min) 

94 else: # Geometric mean blending 

95 image[:] *= np.maximum(image * a + b, np.finfo(float).min) 

96 image[:] = np.sqrt(image[:]) 

97 

98 

99def _fast_eigf_surface_blur( 

100 image: FloatImagePlane, sigma: float, feathering: float, iterations: int = 1, filter_type: int = 1 

101) -> None: 

102 """Applies exposure-independent guided blur with down-scaling and up-sampling. 

103 

104 Parameters 

105 ---------- 

106 image : `FloatImagePlane` 

107 Input image array of shape (height, width). Modified in-place. 

108 sigma : `float` 

109 Standard deviation for Gaussian kernel. 

110 feathering : `float` 

111 Feathering parameter. 

112 iterations : `int`, optional 

113 Number of iterations to model diffusion. Default is 1. 

114 filter_type : `int`, optional 

115 Blending type: 0 for linear, 1 for geometric mean. Default is 1. 

116 """ 

117 scaling = np.maximum(np.minimum(sigma, 4.0), 1.0) 

118 ds_sigma = np.maximum(sigma / scaling, 1.0) 

119 

120 # Down-sampling dimensions 

121 

122 for _ in range(iterations): 

123 av = _eigf_variance_analysis_no_mask(image, ds_sigma) 

124 _eigf_blending_no_mask(image, av.reshape(-1, 2), feathering, filter_type) 

125 

126 

127def tone_equalizer( 

128 image: FloatImagePlane, 

129 tone_factors: list[float], 

130 weight: float, 

131 sigma: float, 

132 feathering: float, 

133 iterations: int = 1, 

134 filter_type: int = 1, 

135) -> FloatImagePlane: 

136 """Enhance image brightness using exposure-dependent correction. 

137 

138 This function adjusts image brightness by applying exposure-dependent 

139 corrections based on tone factors. It uses exposure centers spanning from 

140 0 to 1 (10 levels) and applies Gaussian-weighted adjustments using edge 

141 informed guided filters. A copy of the input image is made before processing. 

142 

143 Parameters 

144 ---------- 

145 image : `FloatImagePlane` 

146 Input image array of shape (height, width). 

147 tone_factors : `list` of `float` 

148 List of 10 tone correction factors, one for each exposure level. 

149 weight : `float` 

150 Width of the Gaussian kernel for exposure weighting. 

151 sigma : `float` 

152 Standard deviation for Gaussian blur of luminance. 

153 feathering : `float` 

154 Feathering parameter for exposure-independent guided blur. 

155 iterations : `int`, optional 

156 Number of iterations for the blur process. Default is 1. 

157 filter_type : `int`, optional 

158 Blending type: 0 for linear, 1 for geometric mean. Default is 1. 

159 

160 Returns 

161 ------- 

162 result : `FloatImagePlane` 

163 Image with brightness adjusted based on tone factors. 

164 """ 

165 luminance = np.copy(image) 

166 _fast_eigf_surface_blur(luminance, sigma, feathering, iterations, filter_type) 

167 exposure = luminance 

168 corrections = np.zeros_like(luminance) 

169 EXPOSURE_CENTERS = np.linspace(0, 1, 10) 

170 for eq_val, factor in zip(EXPOSURE_CENTERS, tone_factors): 

171 corrections += np.exp(-1 * (exposure - eq_val) ** 2 / (2 * weight**2)) * factor 

172 return image + corrections 

173 

174 

175def contrast_equalizer(image: FloatImagePlane, contrast_factors: list[float]) -> FloatImagePlane: 

176 """Enhance image contrast using Laplacian pyramid adjustment. 

177 

178 This function performs contrast equalization by modifying the Laplacian 

179 pyramid coefficients of the input image. Each level of the pyramid 

180 corresponds to a different spatial scale, allowing for scale-dependent 

181 contrast adjustments. A padded copy of the input image is created for 

182 processing. 

183 

184 Parameters 

185 ---------- 

186 image : `FloatImagePlane` 

187 Input image array of shape (height, width). 

188 contrast_factors : `list` of `float` 

189 List of factors to multiply each pyramid level. Values > 1 increase 

190 contrast, values < 1 decrease contrast. The list should specify 

191 factors for the largest scales first; unspecified levels use a factor 

192 of 1.0. 

193 

194 Returns 

195 ------- 

196 result : `FloatImagePlane` 

197 Image with contrast adjusted at multiple spatial scales. 

198 """ 

199 maxLevel = int(np.min(np.log2(image.shape))) 

200 support = 1 << (maxLevel - 1) 

201 padY_amounts = levelPadder(image.shape[0] + support, maxLevel) 

202 padX_amounts = levelPadder(image.shape[1] + support, maxLevel) 

203 imagePadded = cv2.copyMakeBorder( 

204 image, *(0, support), *(0, support), cv2.BORDER_REPLICATE, None, None 

205 ).astype(image.dtype) 

206 lap = makeLapPyramid(imagePadded, padY_amounts, padX_amounts, None, None) 

207 for i, factor in enumerate(contrast_factors): 

208 i = i + 2 

209 if i > len(lap): 

210 break 

211 lap[-1 * i] *= factor 

212 output = lap[-1] 

213 for i in range(-2, -1 * len(lap) - 1, -1): 

214 upsampled = cv2.pyrUp(output) 

215 upsampled = upsampled[ 

216 : upsampled.shape[0] - 2 * padY_amounts[i + 1], : upsampled.shape[1] - 2 * padX_amounts[i + 1] 

217 ] 

218 output = lap[i] + upsampled 

219 return output[: image.shape[0], : image.shape[1]]