58 #ifndef INCLUDED_volk_16i_permute_and_scalar_add_a_H 59 #define INCLUDED_volk_16i_permute_and_scalar_add_a_H 71 short* cntl0,
short* cntl1,
short* cntl2,
short* cntl3,
72 short* scalars,
unsigned int num_points)
75 const unsigned int num_bytes = num_points*2;
77 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
79 __m128i *p_target, *p_cntl0, *p_cntl1, *p_cntl2, *p_cntl3, *p_scalars;
81 short* p_permute_indexes = permute_indexes;
83 p_target = (__m128i*)target;
84 p_cntl0 = (__m128i*)cntl0;
85 p_cntl1 = (__m128i*)cntl1;
86 p_cntl2 = (__m128i*)cntl2;
87 p_cntl3 = (__m128i*)cntl3;
88 p_scalars = (__m128i*)scalars;
92 int bound = (num_bytes >> 4);
93 int leftovers = (num_bytes >> 1) & 7;
95 xmm0 = _mm_load_si128(p_scalars);
97 xmm1 = _mm_shufflelo_epi16(xmm0, 0);
98 xmm2 = _mm_shufflelo_epi16(xmm0, 0x55);
99 xmm3 = _mm_shufflelo_epi16(xmm0, 0xaa);
100 xmm4 = _mm_shufflelo_epi16(xmm0, 0xff);
102 xmm1 = _mm_shuffle_epi32(xmm1, 0x00);
103 xmm2 = _mm_shuffle_epi32(xmm2, 0x00);
104 xmm3 = _mm_shuffle_epi32(xmm3, 0x00);
105 xmm4 = _mm_shuffle_epi32(xmm4, 0x00);
108 for(; i < bound; ++
i) {
109 xmm0 = _mm_setzero_si128();
110 xmm5 = _mm_setzero_si128();
111 xmm6 = _mm_setzero_si128();
112 xmm7 = _mm_setzero_si128();
114 xmm0 = _mm_insert_epi16(xmm0, src0[p_permute_indexes[0]], 0);
115 xmm5 = _mm_insert_epi16(xmm5, src0[p_permute_indexes[1]], 1);
116 xmm6 = _mm_insert_epi16(xmm6, src0[p_permute_indexes[2]], 2);
117 xmm7 = _mm_insert_epi16(xmm7, src0[p_permute_indexes[3]], 3);
118 xmm0 = _mm_insert_epi16(xmm0, src0[p_permute_indexes[4]], 4);
119 xmm5 = _mm_insert_epi16(xmm5, src0[p_permute_indexes[5]], 5);
120 xmm6 = _mm_insert_epi16(xmm6, src0[p_permute_indexes[6]], 6);
121 xmm7 = _mm_insert_epi16(xmm7, src0[p_permute_indexes[7]], 7);
123 xmm0 = _mm_add_epi16(xmm0, xmm5);
124 xmm6 = _mm_add_epi16(xmm6, xmm7);
126 p_permute_indexes += 8;
128 xmm0 = _mm_add_epi16(xmm0, xmm6);
130 xmm5 = _mm_load_si128(p_cntl0);
131 xmm6 = _mm_load_si128(p_cntl1);
132 xmm7 = _mm_load_si128(p_cntl2);
134 xmm5 = _mm_and_si128(xmm5, xmm1);
135 xmm6 = _mm_and_si128(xmm6, xmm2);
136 xmm7 = _mm_and_si128(xmm7, xmm3);
138 xmm0 = _mm_add_epi16(xmm0, xmm5);
140 xmm5 = _mm_load_si128(p_cntl3);
142 xmm6 = _mm_add_epi16(xmm6, xmm7);
146 xmm5 = _mm_and_si128(xmm5, xmm4);
148 xmm0 = _mm_add_epi16(xmm0, xmm6);
153 xmm0 = _mm_add_epi16(xmm0, xmm5);
157 _mm_store_si128(p_target, xmm0);
162 for(i = bound * 8; i < (bound * 8) + leftovers; ++
i) {
163 target[
i] = src0[permute_indexes[
i]]
164 + (cntl0[
i] & scalars[0])
165 + (cntl1[i] & scalars[1])
166 + (cntl2[
i] & scalars[2])
167 + (cntl3[i] & scalars[3]);
173 #ifdef LV_HAVE_GENERIC 176 short* cntl0,
short* cntl1,
short* cntl2,
short* cntl3,
177 short* scalars,
unsigned int num_points)
179 const unsigned int num_bytes = num_points*2;
183 int bound = num_bytes >> 1;
185 for(i = 0; i < bound; ++
i) {
186 target[
i] = src0[permute_indexes[
i]]
187 + (cntl0[
i] & scalars[0])
188 + (cntl1[i] & scalars[1])
189 + (cntl2[
i] & scalars[2])
190 + (cntl3[i] & scalars[3]);
static void volk_16i_permute_and_scalar_add_generic(short *target, short *src0, short *permute_indexes, short *cntl0, short *cntl1, short *cntl2, short *cntl3, short *scalars, unsigned int num_points)
Definition: volk_16i_permute_and_scalar_add.h:175
static void volk_16i_permute_and_scalar_add_a_sse2(short *target, short *src0, short *permute_indexes, short *cntl0, short *cntl1, short *cntl2, short *cntl3, short *scalars, unsigned int num_points)
Definition: volk_16i_permute_and_scalar_add.h:70
for i
Definition: volk_config_fixed.tmpl.h:25