Coverage for estats/stats/CrossStarletMinkowski.py: 19%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

72 statements  

1# Copyright (C) 2019 ETH Zurich 

2# Institute for Particle Physics and Astrophysics 

3# Author: Dominik Zuercher 

4 

5import numpy as np 

6import healpy as hp 

7from estats.stats import CrossMinkowski 

8 

9 

10def context(): 

11 """ 

12 Defines the paramters used by the plugin 

13 """ 

14 stat_type = 'convergence-cross' 

15 

16 required = ['Minkowski_max', 'Minkowski_min', 'Minkowski_steps', 

17 'Minkowski_sliced_bins', 'Starlet_scales', 

18 'Starlet_selected_scales', 

19 'NSIDE', 'no_V0'] 

20 defaults = [4.0, -4.0, 10, 10, [48, 65, 89, 121, 164, 223, 303, 412, 560, 

21 761, 1034, 1405, 1910, 2597, 3530, 

22 4799, 6523, 8867, 12053, 16384], 

23 [48, 65, 89, 121, 164, 223, 303, 412, 560, 

24 761, 1034, 1405, 1910, 2597, 3530, 

25 4799, 6523, 8867, 12053, 16384], 

26 1024, False] 

27 types = ['float', 'float', 'int', 'int', 'list', 'list', 'int', 'bool'] 

28 return required, defaults, types, stat_type 

29 

30 

31def CrossStarletMinkowski(map_w, weights, ctx): 

32 """ 

33 Performs the starlet-wavelet decomposition of map and counts the local 

34 maxima in each filter band. 

35 :param map: A Healpix convergence map 

36 :param weights: A Healpix map with pixel weights (integer >=0) 

37 :param ctx: Context instance 

38 :return: Starlet counts (num filter bands, Starlet_steps + 1) 

39 """ 

40 

41 try: 

42 from esd import esd 

43 except ImportError: 

44 raise ImportError( 

45 "Did not find esd package. " 

46 "It is required for this module to work properly. " 

47 "Download from: " 

48 "https://cosmo-gitlab.phys.ethz.ch/cosmo_public/esd") 

49 

50 # build decomposition 

51 # (remove first map that contains remaining small scales) 

52 wavelet_counts = np.zeros((len(ctx['Starlet_scales']), 

53 ctx['Minkowski_steps'] * 3)) 

54 

55 # count peaks in each filter band 

56 wave_iter = esd.calc_wavelet_decomp_iter( 

57 map_w, l_bins=ctx['Starlet_scales']) 

58 counter = 0 

59 for ii, wmap in enumerate(wave_iter): 

60 if ii == 0: 

61 continue 

62 # reapply mask 

63 wmap[np.isclose(weights, 0)] = hp.UNSEEN 

64 

65 # calc Minkowski functionals 

66 minks = CrossMinkowski.CrossMinkowski(wmap, weights, ctx) 

67 wavelet_counts[counter] = minks 

68 counter += 1 

69 

70 return wavelet_counts 

71 

72 

73def process(data, ctx, scale_to_unity=False): 

74 num_of_scales = len(ctx['Starlet_scales']) 

75 

76 new_data = np.zeros( 

77 (int(data.shape[0] / num_of_scales), data.shape[1] 

78 * num_of_scales)) 

79 for jj in range(int(data.shape[0] / num_of_scales)): 

80 new_data[jj, :] = data[jj * num_of_scales: 

81 (jj + 1) * num_of_scales, :].ravel() 

82 return new_data 

83 

84 

85def slice(ctx): 

86 # number of datavectors for each scale 

87 mult = 3 

88 # number of scales 

89 num_of_scales = len(ctx['Starlet_scales']) 

90 # either mean or sum, for how to assemble the data into the bins 

91 operation = 'mean' 

92 

93 n_bins_sliced = ctx['Minkowski_sliced_bins'] 

94 

95 return num_of_scales, n_bins_sliced, operation, mult 

96 

97 

98def decide_binning_scheme(data, meta, bin, ctx): 

99 # For Minkowski perform simple equal bin width splitting. 

100 # Same splitting for each smoothing scale. 

101 range_edges = [ctx['Minkowski_min'], ctx['Minkowski_max']] 

102 n_bins_original = ctx['Minkowski_steps'] 

103 num_of_scales = len(ctx['Starlet_scales']) 

104 n_bins_sliced = ctx['Minkowski_sliced_bins'] 

105 bin_centers = np.zeros((num_of_scales, n_bins_sliced)) 

106 bin_edge_indices = np.zeros((num_of_scales, n_bins_sliced + 1)) 

107 

108 orig_bin_values = np.linspace( 

109 range_edges[0], range_edges[1], n_bins_original) 

110 

111 per_bin = n_bins_original // n_bins_sliced 

112 remain = n_bins_original % n_bins_sliced 

113 remain_front = remain // 2 

114 remain_back = remain_front + remain % 2 

115 

116 # Get edge indices 

117 bin_edge_indices_temp = np.arange( 

118 remain_front, n_bins_original - remain_back, per_bin) 

119 bin_edge_indices_temp[0] -= remain_front 

120 bin_edge_indices_temp = np.append( 

121 bin_edge_indices_temp, n_bins_original) 

122 

123 # Get bin central values 

124 bin_centers_temp = np.zeros(0) 

125 for jj in range(len(bin_edge_indices_temp) - 1): 

126 bin_centers_temp = np.append(bin_centers_temp, np.nanmean( 

127 orig_bin_values[bin_edge_indices_temp[jj]: 

128 bin_edge_indices_temp[jj + 1]])) 

129 

130 # Assign splitting to each scale 

131 for scale in range(num_of_scales): 

132 bin_centers[scale, :] = bin_centers_temp 

133 bin_edge_indices[scale, :] = bin_edge_indices_temp 

134 

135 return bin_edge_indices, bin_centers 

136 

137 

138def filter(ctx): 

139 filter = np.zeros(0) 

140 for scale in reversed(ctx['Starlet_scales']): 

141 if scale in ctx['Starlet_selected_scales']: 

142 f = [True] * \ 

143 ctx['Minkowski_sliced_bins'] 

144 f = np.asarray(f) 

145 else: 

146 f = [False] * \ 

147 ctx['Minkowski_sliced_bins'] 

148 f = np.asarray(f) 

149 

150 f = np.tile(f, 3) 

151 if ctx['no_V0']: 

152 f[:ctx['Minkowski_sliced_bins']] = False 

153 filter = np.append(filter, f) 

154 return filter