Coverage for estats/stats/Minkowski.py: 73%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

164 statements  

1# Copyright (C) 2019 ETH Zurich 

2# Institute for Particle Physics and Astrophysics 

3# Author: Dominik Zuercher 

4 

5import numpy as np 

6import healpy as hp 

7 

8 

9def context(): 

10 """ 

11 Defines the paramters used by the plugin 

12 """ 

13 stat_type = 'multi' 

14 

15 required = ['Minkowski_max', 'Minkowski_min', 'Minkowski_steps', 

16 'Minkowski_sliced_bins', 'NSIDE', 

17 'scales', 'selected_scales', 'no_V0'] 

18 defaults = [4.0, -4.0, 10, 10, 1024, 

19 [31.6, 29.0, 26.4, 23.7, 21.1, 18.5, 15.8, 13.2, 

20 10.5, 7.9, 5.3, 2.6], [31.6, 29.0, 26.4, 23.7, 21.1, 18.5, 

21 15.8, 13.2, 10.5], False] 

22 types = ['float', 'float', 'int', 'int', 'int', 'list', 'list', 'bool'] 

23 return required, defaults, types, stat_type 

24 

25 

26def Minkowski_proper(kappa, weights, ctx): 

27 """ 

28 Proper calculation of Minkowski functionals. 

29 nvolves a lot of alm decompositions and is therfore 

30 quite slow. 

31 For forward modelling use Minkowski function instead. 

32 :param kappa: A Healpix convergence map 

33 :param weights: A Healpix map with pixel weights 

34 :param ctx: Context instance 

35 :return: Minkowski functionals as V0,V1,V2. 

36 """ 

37 mask = weights > 0 

38 

39 alms = hp.sphtfunc.map2alm( 

40 kappa, use_weights=False, lmax=2*hp.npix2nside(kappa.size)) 

41 

42 # first order covariant derivatives 

43 # d_phi is d_phi / sin(theta) 

44 _, d_theta, d_phi = hp.sphtfunc.alm2map_der1( 

45 alms, hp.npix2nside(kappa.size), lmax=2*hp.npix2nside(kappa.size)) 

46 d_theta[np.isclose(weights, 0.0)] = hp.UNSEEN 

47 d_phi[np.isclose(weights, 0.0)] = hp.UNSEEN 

48 

49 # second order covariant derivatives 

50 alms_d_phi = hp.sphtfunc.map2alm(d_phi, lmax=2*hp.npix2nside(kappa.size)) 

51 alms_d_theta = hp.sphtfunc.map2alm( 

52 d_theta, lmax=2*hp.npix2nside(kappa.size)) 

53 _, _, d_phi_phi = hp.sphtfunc.alm2map_der1( 

54 alms_d_phi, hp.npix2nside(kappa.size), 

55 lmax=2*hp.npix2nside(kappa.size)) 

56 _, d_theta_theta, d_theta_phi = hp.sphtfunc.alm2map_der1( 

57 alms_d_theta, hp.npix2nside(kappa.size), 

58 lmax=2*hp.npix2nside(kappa.size)) 

59 

60 # need the inverted tangent at each pixel location 

61 theta, _ = hp.pix2ang(hp.npix2nside(kappa.size), np.arange(kappa.size)) 

62 arctan = np.arctan(theta) 

63 

64 # covariant derivatives 

65 cd_theta = d_theta 

66 cd_phi = d_phi 

67 cd_theta_theta = d_theta_theta 

68 cd_phi_phi = d_phi_phi + arctan * d_theta 

69 cd_theta_phi = d_theta_phi - arctan * d_phi 

70 

71 # masking 

72 kappa_m = kappa[mask] 

73 cd_theta = cd_theta[mask] 

74 cd_phi = cd_phi[mask] 

75 cd_theta_theta = cd_theta_theta[mask] 

76 cd_phi_phi = cd_phi_phi[mask] 

77 cd_theta_phi = cd_theta_phi[mask] 

78 

79 # calculate integrands I1, I2 

80 denom = np.sum(kappa_m) 

81 norm = np.power(cd_theta, 2.) + np.power(cd_phi, 2.) 

82 I1 = np.sqrt(norm) / (4. * denom) 

83 I2 = (2. * cd_theta * cd_phi * cd_theta_phi - np.power(cd_phi, 2.) 

84 * cd_theta_theta - np.power(cd_theta, 2.) 

85 * cd_phi_phi) / (norm * 2. * np.pi * denom) 

86 

87 # calculate Minkowski functionals 

88 thresholds = np.linspace( 

89 ctx['Minkowski_min'], ctx['Minkowski_max'], 

90 ctx['Minkowski_steps']) 

91 

92 sigma_0 = np.std(kappa_m) 

93 thresholds *= sigma_0 

94 

95 # Minkowski calculation 

96 res = np.zeros(ctx['Minkowski_steps'] * 3) 

97 for it, thres in enumerate(thresholds): 

98 # calc the three MFs 

99 V0 = np.sum(kappa_m >= thres) / denom 

100 delta_func = np.isclose(kappa_m, thres, rtol=0.0, atol=1e-6) 

101 V1 = np.sum(I1[delta_func]) 

102 V2 = np.sum(I2[delta_func]) 

103 res[it * 3] = V0 

104 res[it * 3 + 1] = V1 

105 res[it * 3 + 2] = V2 

106 

107 # reordering 

108 V0 = res[0::3] 

109 V1 = res[1::3] 

110 V2 = res[2::3] 

111 res = np.append(V0, np.append(V1, V2)) 

112 return res 

113 

114 

115def Minkowski(kappa_w, weights, ctx): 

116 """ 

117 Calculates Minkowski functionals on a convergence map. 

118 This is a very crude approximation that will not match with theory! 

119 Preferable for forward modelling due to speed. 

120 :param kappa_w: A Healpix convergence map 

121 :param weights: A Healpix map with pixel weights 

122 :param ctx: Context instance 

123 :return: Minkowski functionals as V0,V1,V2. 

124 """ 

125 

126 num_chunks = 1 

127 

128 ell = kappa_w.size 

129 nside = hp.get_nside(kappa_w) 

130 ran = np.arange(ell, dtype=np.int32) 

131 mask = weights > 0.0 

132 ran = ran[mask] 

133 

134 # restrict to pixels with neighbours in the mask 

135 counter = 0 

136 to_keep = np.ones_like(ran, dtype=bool) 

137 for r in np.array_split(ran, num_chunks): 

138 low = counter 

139 high = counter + len(r) 

140 neighbours = hp.get_all_neighbours(nside, r)[[1, 3, 5, 7]] 

141 edges = np.any(kappa_w[neighbours] < -1e20, axis=0) 

142 to_keep[low:high] = np.logical_and( 

143 to_keep[low:high], np.logical_not(edges)) 

144 counter += len(r) 

145 ran = ran[to_keep] 

146 

147 # calculate first order derivatives (with neighbours only) 

148 deriv_phi = np.zeros(ell, dtype=np.float32) 

149 deriv_theta = np.zeros(ell, dtype=np.float32) 

150 for r in np.array_split(ran, num_chunks): 

151 neighbours = hp.get_all_neighbours(nside, r)[[1, 3, 5, 7]] 

152 deriv_phi[r] = -kappa_w[neighbours[0]] + kappa_w[neighbours[2]] 

153 deriv_theta[r] = -kappa_w[neighbours[3]] + kappa_w[neighbours[1]] 

154 

155 to_keep = to_keep[to_keep] 

156 # calculate second order derivatives 

157 V1 = np.zeros_like(ran, dtype=np.float32) 

158 V2 = np.zeros_like(ran, dtype=np.float32) 

159 counter = 0 

160 for r in np.array_split(ran, num_chunks): 

161 low = counter 

162 high = counter + len(r) 

163 

164 # calculate V1 

165 ############## 

166 V1[low:high] = np.power( 

167 deriv_theta[r], 2.) + np.power(deriv_phi[r], 2.) 

168 

169 neighbours = hp.get_all_neighbours(nside, r)[[1, 3, 5, 7]] 

170 

171 # calculate V2 part by part to save RAM 

172 ####################################### 

173 # term 1 

174 deriv_phi_theta = -deriv_phi[neighbours[3]] + deriv_phi[neighbours[1]] 

175 V2[low:high] = 2. * deriv_phi[r] * deriv_phi_theta 

176 to_keep[low:high] = np.logical_and( 

177 to_keep[low:high], np.abs(deriv_phi_theta) < 1e10) 

178 V2[low:high] *= deriv_theta[r] 

179 

180 # term 2 

181 deriv_theta_theta = -deriv_theta[neighbours[3]] \ 

182 + deriv_theta[neighbours[1]] 

183 V2[low:high] -= np.power(deriv_phi[r], 2.) * deriv_theta_theta 

184 to_keep[low:high] = np.logical_and( 

185 to_keep[low:high], np.abs(deriv_theta_theta) < 1e10) 

186 

187 # term 3 

188 deriv_phi_phi = -deriv_phi[neighbours[0]] + deriv_phi[neighbours[2]] 

189 V2[low:high] -= np.power(deriv_theta[r], 2.) * deriv_phi_phi 

190 to_keep[low:high] = np.logical_and( 

191 to_keep[low:high], np.abs(deriv_phi_phi) < 1e10) 

192 

193 # removing extreme derivatives 

194 kappa_m = kappa_w[ran[to_keep]] 

195 V1 = V1[to_keep] 

196 V2 = V2[to_keep] 

197 

198 with np.errstate(divide='ignore'): 

199 V2 = np.divide(V2, V1) 

200 V1 = np.sqrt(V1) 

201 

202 # averaged standard deviation and normalization 

203 sigma_0 = np.std(kappa_m) 

204 

205 thresholds = np.linspace(ctx['Minkowski_min'], 

206 ctx['Minkowski_max'], ctx['Minkowski_steps']) 

207 thresholds *= sigma_0 

208 

209 # Minkowski calculation 

210 res = np.zeros(ctx['Minkowski_steps'] * 3, dtype=np.float32) 

211 for it, thres in enumerate(thresholds): 

212 # calc the three MFs 

213 V0_ = np.sum(kappa_m >= thres) 

214 delta_func = (kappa_m > (thres - 1e-6)) & (kappa_m < (thres + 1e-6)) 

215 V1_ = np.sum(V1[delta_func]) 

216 V2_ = np.sum(V2[delta_func]) 

217 res[it * 3] = V0_ 

218 res[it * 3 + 1] = V1_ 

219 res[it * 3 + 2] = V2_ 

220 

221 # reordering 

222 V0 = res[0::3] 

223 V1 = res[1::3] 

224 V2 = res[2::3] 

225 res = np.append(V0, np.append(V1, V2)) 

226 

227 return res 

228 

229 

230def process(data, ctx, scale_to_unity=False): 

231 num_of_scales = len(ctx['scales']) 

232 

233 new_data = np.zeros( 

234 (int(data.shape[0] / num_of_scales), data.shape[1] 

235 * num_of_scales)) 

236 for jj in range(int(data.shape[0] / num_of_scales)): 

237 new_data[jj, :] = data[jj * num_of_scales: 

238 (jj + 1) * num_of_scales, :].ravel() 

239 return new_data 

240 

241 

242def slice(ctx): 

243 # number of datavectors for each scale 

244 mult = 3 

245 # number of scales 

246 num_of_scales = len(ctx['scales']) 

247 # either mean or sum, for how to assemble the data into the bins 

248 operation = 'mean' 

249 

250 n_bins_sliced = ctx['Minkowski_sliced_bins'] 

251 

252 return num_of_scales, n_bins_sliced, operation, mult 

253 

254 

255def decide_binning_scheme(data, meta, bin, ctx): 

256 # For Minkowski perform simple equal bin width splitting. 

257 # Same splitting for each smoothing scale. 

258 range_edges = [ctx['Minkowski_min'], ctx['Minkowski_max']] 

259 n_bins_original = ctx['Minkowski_steps'] 

260 num_of_scales = len(ctx['scales']) 

261 n_bins_sliced = ctx['Minkowski_sliced_bins'] 

262 bin_centers = np.zeros((num_of_scales, n_bins_sliced)) 

263 bin_edge_indices = np.zeros((num_of_scales, n_bins_sliced + 1)) 

264 

265 orig_bin_values = np.linspace( 

266 range_edges[0], range_edges[1], n_bins_original) 

267 

268 per_bin = n_bins_original // n_bins_sliced 

269 remain = n_bins_original % n_bins_sliced 

270 remain_front = remain // 2 

271 remain_back = remain_front + remain % 2 

272 

273 # Get edge indices 

274 bin_edge_indices_temp = np.arange( 

275 remain_front, n_bins_original - remain_back, per_bin) 

276 bin_edge_indices_temp[0] -= remain_front 

277 bin_edge_indices_temp = np.append( 

278 bin_edge_indices_temp, n_bins_original) 

279 

280 # Get bin central values 

281 bin_centers_temp = np.zeros(0) 

282 for jj in range(len(bin_edge_indices_temp) - 1): 

283 bin_centers_temp = np.append(bin_centers_temp, np.nanmean( 

284 orig_bin_values[bin_edge_indices_temp[jj]: 

285 bin_edge_indices_temp[jj + 1]])) 

286 

287 # Assign splitting to each scale 

288 for scale in range(num_of_scales): 

289 bin_centers[scale, :] = bin_centers_temp 

290 bin_edge_indices[scale, :] = bin_edge_indices_temp 

291 

292 return bin_edge_indices, bin_centers 

293 

294 

295def filter(ctx): 

296 filter = np.zeros(0) 

297 for scale in ctx['scales']: 

298 if scale in ctx['selected_scales']: 

299 f = [True] * \ 

300 ctx['Minkowski_sliced_bins'] 

301 f = np.asarray(f) 

302 else: 

303 f = [False] * \ 

304 ctx['Minkowski_sliced_bins'] 

305 f = np.asarray(f) 

306 

307 f = np.tile(f, 3) 

308 if ctx['no_V0']: 

309 f[:ctx['Minkowski_sliced_bins']] = False 

310 filter = np.append(filter, f) 

311 return filter