56 #ifndef INCLUDED_volk_16i_branch_4_state_8_a_H 57 #define INCLUDED_volk_16i_branch_4_state_8_a_H 64 #include <xmmintrin.h> 65 #include <emmintrin.h> 66 #include <tmmintrin.h> 71 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11;
72 __m128i *p_target, *p_src0, *p_cntl2, *p_cntl3, *p_scalars;
74 p_target = (__m128i*)target;
75 p_src0 = (__m128i*)src0;
76 p_cntl2 = (__m128i*)cntl2;
77 p_cntl3 = (__m128i*)cntl3;
78 p_scalars = (__m128i*)scalars;
84 xmm0 = _mm_load_si128(p_scalars);
86 xmm1 = _mm_shufflelo_epi16(xmm0, 0);
87 xmm2 = _mm_shufflelo_epi16(xmm0, 0x55);
88 xmm3 = _mm_shufflelo_epi16(xmm0, 0xaa);
89 xmm4 = _mm_shufflelo_epi16(xmm0, 0xff);
91 xmm1 = _mm_shuffle_epi32(xmm1, 0x00);
92 xmm2 = _mm_shuffle_epi32(xmm2, 0x00);
93 xmm3 = _mm_shuffle_epi32(xmm3, 0x00);
94 xmm4 = _mm_shuffle_epi32(xmm4, 0x00);
96 xmm0 = _mm_load_si128((__m128i*)permuters[0]);
97 xmm6 = _mm_load_si128((__m128i*)permuters[1]);
98 xmm8 = _mm_load_si128((__m128i*)permuters[2]);
99 xmm10 = _mm_load_si128((__m128i*)permuters[3]);
101 for(; i < bound; ++
i) {
102 xmm5 = _mm_load_si128(p_src0);
103 xmm0 = _mm_shuffle_epi8(xmm5, xmm0);
104 xmm6 = _mm_shuffle_epi8(xmm5, xmm6);
105 xmm8 = _mm_shuffle_epi8(xmm5, xmm8);
106 xmm10 = _mm_shuffle_epi8(xmm5, xmm10);
110 xmm5 = _mm_add_epi16(xmm1, xmm2);
112 xmm6 = _mm_add_epi16(xmm2, xmm6);
113 xmm8 = _mm_add_epi16(xmm1, xmm8);
115 xmm7 = _mm_load_si128(p_cntl2);
116 xmm9 = _mm_load_si128(p_cntl3);
118 xmm0 = _mm_add_epi16(xmm5, xmm0);
120 xmm7 = _mm_and_si128(xmm7, xmm3);
121 xmm9 = _mm_and_si128(xmm9, xmm4);
123 xmm5 = _mm_load_si128(&p_cntl2[1]);
124 xmm11 = _mm_load_si128(&p_cntl3[1]);
126 xmm7 = _mm_add_epi16(xmm7, xmm9);
128 xmm5 = _mm_and_si128(xmm5, xmm3);
129 xmm11 = _mm_and_si128(xmm11, xmm4);
131 xmm0 = _mm_add_epi16(xmm0, xmm7);
134 xmm7 = _mm_load_si128(&p_cntl2[2]);
135 xmm9 = _mm_load_si128(&p_cntl3[2]);
137 xmm5 = _mm_add_epi16(xmm5, xmm11);
139 xmm7 = _mm_and_si128(xmm7, xmm3);
140 xmm9 = _mm_and_si128(xmm9, xmm4);
142 xmm6 = _mm_add_epi16(xmm6, xmm5);
145 xmm5 = _mm_load_si128(&p_cntl2[3]);
146 xmm11 = _mm_load_si128(&p_cntl3[3]);
148 xmm7 = _mm_add_epi16(xmm7, xmm9);
150 xmm5 = _mm_and_si128(xmm5, xmm3);
151 xmm11 = _mm_and_si128(xmm11, xmm4);
153 xmm8 = _mm_add_epi16(xmm8, xmm7);
155 xmm5 = _mm_add_epi16(xmm5, xmm11);
157 _mm_store_si128(p_target, xmm0);
158 _mm_store_si128(&p_target[1], xmm6);
160 xmm10 = _mm_add_epi16(xmm5, xmm10);
162 _mm_store_si128(&p_target[2], xmm8);
164 _mm_store_si128(&p_target[3], xmm10);
173 #ifdef LV_HAVE_GENERIC 181 for(; i < bound; ++
i) {
182 target[i* 8] = src0[((char)permuters[i][0])/2]
183 + ((i + 1)%2 * scalars[0])
184 + (((i >> 1)^1) * scalars[1])
185 + (cntl2[i * 8] & scalars[2])
186 + (cntl3[i * 8] & scalars[3]);
187 target[i* 8 + 1] = src0[((char)permuters[i][1 * 2])/2]
188 + ((i + 1)%2 * scalars[0])
189 + (((i >> 1)^1) * scalars[1])
190 + (cntl2[i * 8 + 1] & scalars[2])
191 + (cntl3[i * 8 + 1] & scalars[3]);
192 target[i* 8 + 2] = src0[((char)permuters[i][2 * 2])/2]
193 + ((i + 1)%2 * scalars[0])
194 + (((i >> 1)^1) * scalars[1])
195 + (cntl2[i * 8 + 2] & scalars[2])
196 + (cntl3[i * 8 + 2] & scalars[3]);
197 target[i* 8 + 3] = src0[((char)permuters[i][3 * 2])/2]
198 + ((i + 1)%2 * scalars[0])
199 + (((i >> 1)^1) * scalars[1])
200 + (cntl2[i * 8 + 3] & scalars[2])
201 + (cntl3[i * 8 + 3] & scalars[3]);
202 target[i* 8 + 4] = src0[((char)permuters[i][4 * 2])/2]
203 + ((i + 1)%2 * scalars[0])
204 + (((i >> 1)^1) * scalars[1])
205 + (cntl2[i * 8 + 4] & scalars[2])
206 + (cntl3[i * 8 + 4] & scalars[3]);
207 target[i* 8 + 5] = src0[((char)permuters[i][5 * 2])/2]
208 + ((i + 1)%2 * scalars[0])
209 + (((i >> 1)^1) * scalars[1])
210 + (cntl2[i * 8 + 5] & scalars[2])
211 + (cntl3[i * 8 + 5] & scalars[3]);
212 target[i* 8 + 6] = src0[((char)permuters[i][6 * 2])/2]
213 + ((i + 1)%2 * scalars[0])
214 + (((i >> 1)^1) * scalars[1])
215 + (cntl2[i * 8 + 6] & scalars[2])
216 + (cntl3[i * 8 + 6] & scalars[3]);
217 target[i* 8 + 7] = src0[((char)permuters[i][7 * 2])/2]
218 + ((i + 1)%2 * scalars[0])
219 + (((i >> 1)^1) * scalars[1])
220 + (cntl2[i * 8 + 7] & scalars[2])
221 + (cntl3[i * 8 + 7] & scalars[3]);
static void volk_16i_branch_4_state_8_a_ssse3(short *target, short *src0, char **permuters, short *cntl2, short *cntl3, short *scalars)
Definition: volk_16i_branch_4_state_8.h:69
static void volk_16i_branch_4_state_8_generic(short *target, short *src0, char **permuters, short *cntl2, short *cntl3, short *scalars)
Definition: volk_16i_branch_4_state_8.h:175
for i
Definition: volk_config_fixed.tmpl.h:25