46 #ifndef INCLUDED_volk_16ic_x2_dot_prod_16ic_H 47 #define INCLUDED_volk_16ic_x2_dot_prod_16ic_H 54 #ifdef LV_HAVE_GENERIC 58 result[0] =
lv_cmake((int16_t)0, (int16_t)0);
60 for (n = 0; n < num_points; n++)
71 #include <emmintrin.h> 77 const unsigned int sse_iters = num_points / 4;
86 __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, realcacc, imagcacc;
89 realcacc = _mm_setzero_si128();
90 imagcacc = _mm_setzero_si128();
92 mask_imag = _mm_set_epi8(0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0);
93 mask_real = _mm_set_epi8(0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF);
95 for(number = 0; number < sse_iters; number++)
98 a = _mm_load_si128((__m128i*)_in_a);
100 b = _mm_load_si128((__m128i*)_in_b);
102 c = _mm_mullo_epi16(a, b);
104 c_sr = _mm_srli_si128(c, 2);
105 real = _mm_subs_epi16(c, c_sr);
107 b_sl = _mm_slli_si128(b, 2);
108 a_sl = _mm_slli_si128(a, 2);
110 imag1 = _mm_mullo_epi16(a, b_sl);
111 imag2 = _mm_mullo_epi16(b, a_sl);
113 imag = _mm_adds_epi16(imag1, imag2);
115 realcacc = _mm_adds_epi16(realcacc, real);
116 imagcacc = _mm_adds_epi16(imagcacc, imag);
122 realcacc = _mm_and_si128(realcacc, mask_real);
123 imagcacc = _mm_and_si128(imagcacc, mask_imag);
125 a = _mm_or_si128(realcacc, imagcacc);
127 _mm_store_si128((__m128i*)dotProductVector, a);
129 for (number = 0; number < 4; ++number)
135 for (number = 0; number < (num_points % 4); ++number)
148 #include <emmintrin.h> 154 const unsigned int sse_iters = num_points / 4;
163 __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, realcacc, imagcacc, result;
166 realcacc = _mm_setzero_si128();
167 imagcacc = _mm_setzero_si128();
169 mask_imag = _mm_set_epi8(0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0);
170 mask_real = _mm_set_epi8(0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF);
172 for(number = 0; number < sse_iters; number++)
175 a = _mm_loadu_si128((__m128i*)_in_a);
177 b = _mm_loadu_si128((__m128i*)_in_b);
179 c = _mm_mullo_epi16(a, b);
181 c_sr = _mm_srli_si128(c, 2);
182 real = _mm_subs_epi16(c, c_sr);
184 b_sl = _mm_slli_si128(b, 2);
185 a_sl = _mm_slli_si128(a, 2);
187 imag1 = _mm_mullo_epi16(a, b_sl);
188 imag2 = _mm_mullo_epi16(b, a_sl);
190 imag = _mm_adds_epi16(imag1, imag2);
192 realcacc = _mm_adds_epi16(realcacc, real);
193 imagcacc = _mm_adds_epi16(imagcacc, imag);
199 realcacc = _mm_and_si128(realcacc, mask_real);
200 imagcacc = _mm_and_si128(imagcacc, mask_imag);
202 result = _mm_or_si128(realcacc, imagcacc);
204 _mm_storeu_si128((__m128i*)dotProductVector, result);
206 for (number = 0; number < 4; ++number)
212 for (number = 0; number < (num_points % 4); ++number)
224 #include <immintrin.h> 226 static inline void volk_16ic_x2_dot_prod_16ic_u_axv2(
lv_16sc_t* out,
const lv_16sc_t* in_a,
const lv_16sc_t* in_b,
unsigned int num_points)
230 const unsigned int avx_iters = num_points / 8;
239 __m256i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, realcacc, imagcacc, result;
242 realcacc = _mm256_setzero_si256();
243 imagcacc = _mm256_setzero_si256();
245 mask_imag = _mm256_set_epi8(0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0);
246 mask_real = _mm256_set_epi8(0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF);
248 for(number = 0; number < avx_iters; number++)
250 a = _mm256_loadu_si256((__m256i*)_in_a);
252 b = _mm256_loadu_si256((__m256i*)_in_b);
254 c = _mm256_mullo_epi16(a, b);
256 c_sr = _mm256_srli_si256(c, 2);
257 real = _mm256_subs_epi16(c, c_sr);
259 b_sl = _mm256_slli_si256(b, 2);
260 a_sl = _mm256_slli_si256(a, 2);
262 imag1 = _mm256_mullo_epi16(a, b_sl);
263 imag2 = _mm256_mullo_epi16(b, a_sl);
265 imag = _mm256_adds_epi16(imag1, imag2);
267 realcacc = _mm256_adds_epi16(realcacc, real);
268 imagcacc = _mm256_adds_epi16(imagcacc, imag);
274 realcacc = _mm256_and_si256(realcacc, mask_real);
275 imagcacc = _mm256_and_si256(imagcacc, mask_imag);
277 result = _mm256_or_si256(realcacc, imagcacc);
279 _mm256_storeu_si256((__m256i*)dotProductVector, result);
282 for (number = 0; number < 8; ++number)
288 for (number = 0; number < (num_points % 8); ++number)
300 #include <immintrin.h> 302 static inline void volk_16ic_x2_dot_prod_16ic_a_axv2(
lv_16sc_t* out,
const lv_16sc_t* in_a,
const lv_16sc_t* in_b,
unsigned int num_points)
306 const unsigned int avx_iters = num_points / 8;
315 __m256i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, realcacc, imagcacc, result;
318 realcacc = _mm256_setzero_si256();
319 imagcacc = _mm256_setzero_si256();
321 mask_imag = _mm256_set_epi8(0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0);
322 mask_real = _mm256_set_epi8(0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF);
324 for(number = 0; number < avx_iters; number++)
326 a = _mm256_load_si256((__m256i*)_in_a);
328 b = _mm256_load_si256((__m256i*)_in_b);
330 c = _mm256_mullo_epi16(a, b);
332 c_sr = _mm256_srli_si256(c, 2);
333 real = _mm256_subs_epi16(c, c_sr);
335 b_sl = _mm256_slli_si256(b, 2);
336 a_sl = _mm256_slli_si256(a, 2);
338 imag1 = _mm256_mullo_epi16(a, b_sl);
339 imag2 = _mm256_mullo_epi16(b, a_sl);
341 imag = _mm256_adds_epi16(imag1, imag2);
343 realcacc = _mm256_adds_epi16(realcacc, real);
344 imagcacc = _mm256_adds_epi16(imagcacc, imag);
350 realcacc = _mm256_and_si256(realcacc, mask_real);
351 imagcacc = _mm256_and_si256(imagcacc, mask_imag);
353 result = _mm256_or_si256(realcacc, imagcacc);
355 _mm256_store_si256((__m256i*)dotProductVector, result);
358 for (number = 0; number < 8; ++number)
364 for (number = 0; number < (num_points % 8); ++number)
376 #include <arm_neon.h> 380 unsigned int quarter_points = num_points / 4;
385 *out =
lv_cmake((int16_t)0, (int16_t)0);
387 if (quarter_points > 0)
391 int16x4x2_t a_val, b_val, c_val, accumulator;
392 int16x4x2_t tmp_real, tmp_imag;
394 accumulator.val[0] = vdup_n_s16(0);
395 accumulator.val[1] = vdup_n_s16(0);
398 for(number = 0; number < quarter_points; ++number)
400 a_val = vld2_s16((int16_t*)a_ptr);
401 b_val = vld2_s16((int16_t*)b_ptr);
407 tmp_real.val[0] = vmul_s16(a_val.val[0], b_val.val[0]);
409 tmp_real.val[1] = vmul_s16(a_val.val[1], b_val.val[1]);
413 tmp_imag.val[0] = vmul_s16(a_val.val[0], b_val.val[1]);
415 tmp_imag.val[1] = vmul_s16(a_val.val[1], b_val.val[0]);
417 c_val.val[0] = vqsub_s16(tmp_real.val[0], tmp_real.val[1]);
418 c_val.val[1] = vqadd_s16(tmp_imag.val[0], tmp_imag.val[1]);
420 accumulator.val[0] = vqadd_s16(accumulator.val[0], c_val.val[0]);
421 accumulator.val[1] = vqadd_s16(accumulator.val[1], c_val.val[1]);
427 vst2_s16((int16_t*)accum_result, accumulator);
428 for (number = 0; number < 4; ++number)
437 for(number = quarter_points * 4; number < num_points; ++number)
439 *out += (*a_ptr++) * (*b_ptr++);
447 #include <arm_neon.h> 451 unsigned int quarter_points = num_points / 4;
458 int16x4x2_t a_val, b_val, accumulator;
461 accumulator.val[0] = vdup_n_s16(0);
462 accumulator.val[1] = vdup_n_s16(0);
464 for(number = 0; number < quarter_points; ++number)
466 a_val = vld2_s16((int16_t*)a_ptr);
467 b_val = vld2_s16((int16_t*)b_ptr);
471 tmp.val[0] = vmul_s16(a_val.val[0], b_val.val[0]);
472 tmp.val[1] = vmul_s16(a_val.val[1], b_val.val[0]);
475 tmp.val[0] = vmls_s16(tmp.val[0], a_val.val[1], b_val.val[1]);
476 tmp.val[1] = vmla_s16(tmp.val[1], a_val.val[0], b_val.val[1]);
478 accumulator.val[0] = vqadd_s16(accumulator.val[0], tmp.val[0]);
479 accumulator.val[1] = vqadd_s16(accumulator.val[1], tmp.val[1]);
485 vst2_s16((int16_t*)accum_result, accumulator);
486 *out = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
489 for(number = quarter_points * 4; number < num_points; ++number)
491 *out += (*a_ptr++) * (*b_ptr++);
499 #include <arm_neon.h> 503 unsigned int quarter_points = num_points / 4;
510 int16x4x2_t a_val, b_val, accumulator1, accumulator2;
513 accumulator1.val[0] = vdup_n_s16(0);
514 accumulator1.val[1] = vdup_n_s16(0);
515 accumulator2.val[0] = vdup_n_s16(0);
516 accumulator2.val[1] = vdup_n_s16(0);
518 for(number = 0; number < quarter_points; ++number)
520 a_val = vld2_s16((int16_t*)a_ptr);
521 b_val = vld2_s16((int16_t*)b_ptr);
526 accumulator1.val[0] = vmla_s16(accumulator1.val[0], a_val.val[0], b_val.val[0]);
527 accumulator2.val[0] = vmls_s16(accumulator2.val[0], a_val.val[1], b_val.val[1]);
528 accumulator1.val[1] = vmla_s16(accumulator1.val[1], a_val.val[0], b_val.val[1]);
529 accumulator2.val[1] = vmla_s16(accumulator2.val[1], a_val.val[1], b_val.val[0]);
535 accumulator1.val[0] = vqadd_s16(accumulator1.val[0], accumulator2.val[0]);
536 accumulator1.val[1] = vqadd_s16(accumulator1.val[1], accumulator2.val[1]);
538 vst2_s16((int16_t*)accum_result, accumulator1);
539 *out = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
542 for(number = quarter_points * 4; number < num_points; ++number)
544 *out += (*a_ptr++) * (*b_ptr++);
static int16_t sat_adds16i(int16_t x, int16_t y)
Definition: saturation_arithmetic.h:29
short complex lv_16sc_t
Definition: volk_complex.h:58
static void volk_16ic_x2_dot_prod_16ic_u_sse2(lv_16sc_t *out, const lv_16sc_t *in_a, const lv_16sc_t *in_b, unsigned int num_points)
Definition: volk_16ic_x2_dot_prod_16ic.h:150
#define lv_cmake(r, i)
Definition: volk_complex.h:64
static void volk_16ic_x2_dot_prod_16ic_neon(lv_16sc_t *out, const lv_16sc_t *in_a, const lv_16sc_t *in_b, unsigned int num_points)
Definition: volk_16ic_x2_dot_prod_16ic.h:378
#define __VOLK_PREFETCH(addr)
Definition: volk_common.h:39
static void volk_16ic_x2_dot_prod_16ic_generic(lv_16sc_t *result, const lv_16sc_t *in_a, const lv_16sc_t *in_b, unsigned int num_points)
Definition: volk_16ic_x2_dot_prod_16ic.h:56
#define __VOLK_ATTR_ALIGNED(x)
Definition: volk_common.h:33
static void volk_16ic_x2_dot_prod_16ic_a_sse2(lv_16sc_t *out, const lv_16sc_t *in_a, const lv_16sc_t *in_b, unsigned int num_points)
Definition: volk_16ic_x2_dot_prod_16ic.h:73
static void volk_16ic_x2_dot_prod_16ic_neon_optvma(lv_16sc_t *out, const lv_16sc_t *in_a, const lv_16sc_t *in_b, unsigned int num_points)
Definition: volk_16ic_x2_dot_prod_16ic.h:501
static void volk_16ic_x2_dot_prod_16ic_neon_vma(lv_16sc_t *out, const lv_16sc_t *in_a, const lv_16sc_t *in_b, unsigned int num_points)
Definition: volk_16ic_x2_dot_prod_16ic.h:449
#define lv_creal(x)
Definition: volk_complex.h:83
#define lv_cimag(x)
Definition: volk_complex.h:85