58 #ifndef INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H 59 #define INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H 67 #ifdef LV_HAVE_GENERIC 72 float * res = (
float*) result;
73 float * in = (
float*) input;
74 float * tp = (
float*) taps;
75 unsigned int n_2_ccomplex_blocks = num_points/2;
76 unsigned int isodd = num_points & 1;
78 float sum0[2] = {0,0};
79 float sum1[2] = {0,0};
82 for(i = 0; i < n_2_ccomplex_blocks; ++
i) {
83 sum0[0] += in[0] * tp[0] - in[1] * tp[1];
84 sum0[1] += in[0] * tp[1] + in[1] * tp[0];
85 sum1[0] += in[2] * tp[2] - in[3] * tp[3];
86 sum1[1] += in[2] * tp[3] + in[3] * tp[2];
92 res[0] = sum0[0] + sum1[0];
93 res[1] = sum0[1] + sum1[1];
96 for(i = 0; i < isodd; ++
i) {
97 *result += input[num_points - 1] * taps[num_points - 1];
105 #if LV_HAVE_SSE && LV_HAVE_64 107 static inline void volk_32fc_x2_dot_prod_32fc_u_sse_64(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
109 const unsigned int num_bytes = num_points*8;
110 unsigned int isodd = num_points & 1;
114 "# ccomplex_dotprod_generic (float* result, const float *input,\n\t" 115 "# const float *taps, unsigned num_bytes)\n\t" 116 "# float sum0 = 0;\n\t" 117 "# float sum1 = 0;\n\t" 118 "# float sum2 = 0;\n\t" 119 "# float sum3 = 0;\n\t" 121 "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t" 122 "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t" 123 "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t" 124 "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t" 127 "# } while (--n_2_ccomplex_blocks != 0);\n\t" 128 "# result[0] = sum0 + sum2;\n\t" 129 "# result[1] = sum1 + sum3;\n\t" 130 "# TODO: prefetch and better scheduling\n\t" 131 " xor %%r9, %%r9\n\t" 132 " xor %%r10, %%r10\n\t" 133 " movq %%rcx, %%rax\n\t" 134 " movq %%rcx, %%r8\n\t" 135 " movq %[rsi], %%r9\n\t" 136 " movq %[rdx], %%r10\n\t" 137 " xorps %%xmm6, %%xmm6 # zero accumulators\n\t" 138 " movups 0(%%r9), %%xmm0\n\t" 139 " xorps %%xmm7, %%xmm7 # zero accumulators\n\t" 140 " movups 0(%%r10), %%xmm2\n\t" 141 " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t" 143 " jmp .%=L1_test\n\t" 144 " # 4 taps / loop\n\t" 145 " # something like ?? cycles / loop\n\t" 147 "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t" 148 "# movups (%%r9), %%xmmA\n\t" 149 "# movups (%%r10), %%xmmB\n\t" 150 "# movups %%xmmA, %%xmmZ\n\t" 151 "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t" 152 "# mulps %%xmmB, %%xmmA\n\t" 153 "# mulps %%xmmZ, %%xmmB\n\t" 154 "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t" 155 "# xorps %%xmmPN, %%xmmA\n\t" 156 "# movups %%xmmA, %%xmmZ\n\t" 157 "# unpcklps %%xmmB, %%xmmA\n\t" 158 "# unpckhps %%xmmB, %%xmmZ\n\t" 159 "# movups %%xmmZ, %%xmmY\n\t" 160 "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t" 161 "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t" 162 "# addps %%xmmZ, %%xmmA\n\t" 163 "# addps %%xmmA, %%xmmC\n\t" 164 "# A=xmm0, B=xmm2, Z=xmm4\n\t" 165 "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t" 166 " movups 16(%%r9), %%xmm1\n\t" 167 " movups %%xmm0, %%xmm4\n\t" 168 " mulps %%xmm2, %%xmm0\n\t" 169 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t" 170 " movups 16(%%r10), %%xmm3\n\t" 171 " movups %%xmm1, %%xmm5\n\t" 172 " addps %%xmm0, %%xmm6\n\t" 173 " mulps %%xmm3, %%xmm1\n\t" 174 " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t" 175 " addps %%xmm1, %%xmm6\n\t" 176 " mulps %%xmm4, %%xmm2\n\t" 177 " movups 32(%%r9), %%xmm0\n\t" 178 " addps %%xmm2, %%xmm7\n\t" 179 " mulps %%xmm5, %%xmm3\n\t" 181 " movups 32(%%r10), %%xmm2\n\t" 182 " addps %%xmm3, %%xmm7\n\t" 183 " add $32, %%r10\n\t" 187 " # We've handled the bulk of multiplies up to here.\n\t" 188 " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t" 189 " # If so, we've got 2 more taps to do.\n\t" 192 " # The count was odd, do 2 more taps.\n\t" 193 " # Note that we've already got mm0/mm2 preloaded\n\t" 194 " # from the main loop.\n\t" 195 " movups %%xmm0, %%xmm4\n\t" 196 " mulps %%xmm2, %%xmm0\n\t" 197 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t" 198 " addps %%xmm0, %%xmm6\n\t" 199 " mulps %%xmm4, %%xmm2\n\t" 200 " addps %%xmm2, %%xmm7\n\t" 202 " # neg inversor\n\t" 203 " xorps %%xmm1, %%xmm1\n\t" 204 " mov $0x80000000, %%r9\n\t" 205 " movd %%r9, %%xmm1\n\t" 206 " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t" 208 " xorps %%xmm1, %%xmm6\n\t" 209 " movups %%xmm6, %%xmm2\n\t" 210 " unpcklps %%xmm7, %%xmm6\n\t" 211 " unpckhps %%xmm7, %%xmm2\n\t" 212 " movups %%xmm2, %%xmm3\n\t" 213 " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t" 214 " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t" 215 " addps %%xmm2, %%xmm6\n\t" 216 " # xmm6 = r1 i2 r3 i4\n\t" 217 " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t" 218 " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t" 219 " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) to memory\n\t" 221 :[rsi]
"r" (input), [rdx]
"r" (taps),
"c" (num_bytes), [rdi]
"r" (result)
222 :
"rax",
"r8",
"r9",
"r10" 227 *result += input[num_points - 1] * taps[num_points - 1];
241 #include <pmmintrin.h> 246 memset(&dotProduct, 0x0, 2*
sizeof(
float));
248 unsigned int number = 0;
249 const unsigned int halfPoints = num_points/2;
250 unsigned int isodd = num_points & 1;
252 __m128 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
257 dotProdVal = _mm_setzero_ps();
259 for(;number < halfPoints; number++){
261 x = _mm_loadu_ps((
float*)a);
262 y = _mm_loadu_ps((
float*)b);
264 yl = _mm_moveldup_ps(y);
265 yh = _mm_movehdup_ps(y);
267 tmp1 = _mm_mul_ps(x,yl);
269 x = _mm_shuffle_ps(x,x,0xB1);
271 tmp2 = _mm_mul_ps(x,yh);
273 z = _mm_addsub_ps(tmp1,tmp2);
275 dotProdVal = _mm_add_ps(dotProdVal, z);
283 _mm_storeu_ps((
float*)dotProductVector,dotProdVal);
285 dotProduct += ( dotProductVector[0] + dotProductVector[1] );
288 dotProduct += input[num_points - 1] * taps[num_points - 1];
291 *result = dotProduct;
296 #ifdef LV_HAVE_SSE4_1 298 #include <smmintrin.h> 300 static inline void volk_32fc_x2_dot_prod_32fc_u_sse4_1(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
303 const unsigned int qtr_points = num_points/4;
304 const unsigned int isodd = num_points & 3;
306 __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, real0, real1, im0, im1;
307 float *p_input, *p_taps;
310 p_result = (__m64*)result;
311 p_input = (
float*)input;
312 p_taps = (
float*)taps;
314 static const __m128i neg = {0x000000000000000080000000};
316 real0 = _mm_setzero_ps();
317 real1 = _mm_setzero_ps();
318 im0 = _mm_setzero_ps();
319 im1 = _mm_setzero_ps();
321 for(; i < qtr_points; ++
i) {
322 xmm0 = _mm_loadu_ps(p_input);
323 xmm1 = _mm_loadu_ps(p_taps);
328 xmm2 = _mm_loadu_ps(p_input);
329 xmm3 = _mm_loadu_ps(p_taps);
334 xmm4 = _mm_unpackhi_ps(xmm0, xmm2);
335 xmm5 = _mm_unpackhi_ps(xmm1, xmm3);
336 xmm0 = _mm_unpacklo_ps(xmm0, xmm2);
337 xmm2 = _mm_unpacklo_ps(xmm1, xmm3);
340 xmm1 = _mm_unpackhi_ps(xmm0, xmm4);
342 xmm3 = _mm_unpacklo_ps(xmm0, xmm4);
344 xmm0 = _mm_unpackhi_ps(xmm2, xmm5);
346 xmm2 = _mm_unpacklo_ps(xmm2, xmm5);
348 xmm4 = _mm_dp_ps(xmm3, xmm2, 0xf1);
349 xmm5 = _mm_dp_ps(xmm1, xmm0, 0xf1);
351 xmm6 = _mm_dp_ps(xmm3, xmm0, 0xf2);
352 xmm7 = _mm_dp_ps(xmm1, xmm2, 0xf2);
354 real0 = _mm_add_ps(xmm4, real0);
355 real1 = _mm_add_ps(xmm5, real1);
356 im0 = _mm_add_ps(xmm6, im0);
357 im1 = _mm_add_ps(xmm7, im1);
362 im0 = _mm_add_ps(im0, im1);
363 real0 = _mm_add_ps(real0, real1);
365 im0 = _mm_add_ps(im0, real0);
367 _mm_storel_pi(p_result, im0);
369 for(i = num_points-isodd; i < num_points; i++) {
370 *result += input[
i] * taps[
i];
378 #include <immintrin.h> 382 unsigned int isodd = num_points & 3;
385 memset(&dotProduct, 0x0, 2*
sizeof(
float));
387 unsigned int number = 0;
388 const unsigned int quarterPoints = num_points / 4;
390 __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
395 dotProdVal = _mm256_setzero_ps();
397 for(;number < quarterPoints; number++){
398 x = _mm256_loadu_ps((
float*)a);
399 y = _mm256_loadu_ps((
float*)b);
401 yl = _mm256_moveldup_ps(y);
402 yh = _mm256_movehdup_ps(y);
404 tmp1 = _mm256_mul_ps(x,yl);
406 x = _mm256_shuffle_ps(x,x,0xB1);
408 tmp2 = _mm256_mul_ps(x,yh);
410 z = _mm256_addsub_ps(tmp1,tmp2);
412 dotProdVal = _mm256_add_ps(dotProdVal, z);
420 _mm256_storeu_ps((
float*)dotProductVector,dotProdVal);
422 dotProduct += ( dotProductVector[0] + dotProductVector[1] + dotProductVector[2] + dotProductVector[3]);
424 for(i = num_points-isodd; i < num_points; i++) {
425 dotProduct += input[
i] * taps[
i];
428 *result = dotProduct;
433 #if LV_HAVE_AVX && LV_HAVE_FMA 434 #include <immintrin.h> 436 static inline void volk_32fc_x2_dot_prod_32fc_u_avx_fma(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
438 unsigned int isodd = num_points & 3;
441 memset(&dotProduct, 0x0, 2*
sizeof(
float));
443 unsigned int number = 0;
444 const unsigned int quarterPoints = num_points / 4;
446 __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
451 dotProdVal = _mm256_setzero_ps();
453 for(;number < quarterPoints; number++){
455 x = _mm256_loadu_ps((
float*)a);
456 y = _mm256_loadu_ps((
float*)b);
458 yl = _mm256_moveldup_ps(y);
459 yh = _mm256_movehdup_ps(y);
463 x = _mm256_shuffle_ps(x,x,0xB1);
465 tmp2 = _mm256_mul_ps(x,yh);
467 z = _mm256_fmaddsub_ps(tmp1, yl,tmp2);
469 dotProdVal = _mm256_add_ps(dotProdVal, z);
477 _mm256_storeu_ps((
float*)dotProductVector,dotProdVal);
479 dotProduct += ( dotProductVector[0] + dotProductVector[1] + dotProductVector[2] + dotProductVector[3]);
481 for(i = num_points-isodd; i < num_points; i++) {
482 dotProduct += input[
i] * taps[
i];
485 *result = dotProduct;
492 #ifndef INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H 493 #define INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H 501 #ifdef LV_HAVE_GENERIC 506 const unsigned int num_bytes = num_points*8;
508 float * res = (
float*) result;
509 float * in = (
float*) input;
510 float * tp = (
float*) taps;
511 unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
512 unsigned int isodd = num_points & 1;
514 float sum0[2] = {0,0};
515 float sum1[2] = {0,0};
518 for(i = 0; i < n_2_ccomplex_blocks; ++
i) {
519 sum0[0] += in[0] * tp[0] - in[1] * tp[1];
520 sum0[1] += in[0] * tp[1] + in[1] * tp[0];
521 sum1[0] += in[2] * tp[2] - in[3] * tp[3];
522 sum1[1] += in[2] * tp[3] + in[3] * tp[2];
528 res[0] = sum0[0] + sum1[0];
529 res[1] = sum0[1] + sum1[1];
531 for(i = 0; i < isodd; ++
i) {
532 *result += input[num_points - 1] * taps[num_points - 1];
539 #if LV_HAVE_SSE && LV_HAVE_64 542 static inline void volk_32fc_x2_dot_prod_32fc_a_sse_64(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
544 const unsigned int num_bytes = num_points*8;
545 unsigned int isodd = num_points & 1;
549 "# ccomplex_dotprod_generic (float* result, const float *input,\n\t" 550 "# const float *taps, unsigned num_bytes)\n\t" 551 "# float sum0 = 0;\n\t" 552 "# float sum1 = 0;\n\t" 553 "# float sum2 = 0;\n\t" 554 "# float sum3 = 0;\n\t" 556 "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t" 557 "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t" 558 "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t" 559 "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t" 562 "# } while (--n_2_ccomplex_blocks != 0);\n\t" 563 "# result[0] = sum0 + sum2;\n\t" 564 "# result[1] = sum1 + sum3;\n\t" 565 "# TODO: prefetch and better scheduling\n\t" 566 " xor %%r9, %%r9\n\t" 567 " xor %%r10, %%r10\n\t" 568 " movq %%rcx, %%rax\n\t" 569 " movq %%rcx, %%r8\n\t" 570 " movq %[rsi], %%r9\n\t" 571 " movq %[rdx], %%r10\n\t" 572 " xorps %%xmm6, %%xmm6 # zero accumulators\n\t" 573 " movaps 0(%%r9), %%xmm0\n\t" 574 " xorps %%xmm7, %%xmm7 # zero accumulators\n\t" 575 " movaps 0(%%r10), %%xmm2\n\t" 576 " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t" 578 " jmp .%=L1_test\n\t" 579 " # 4 taps / loop\n\t" 580 " # something like ?? cycles / loop\n\t" 582 "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t" 583 "# movaps (%%r9), %%xmmA\n\t" 584 "# movaps (%%r10), %%xmmB\n\t" 585 "# movaps %%xmmA, %%xmmZ\n\t" 586 "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t" 587 "# mulps %%xmmB, %%xmmA\n\t" 588 "# mulps %%xmmZ, %%xmmB\n\t" 589 "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t" 590 "# xorps %%xmmPN, %%xmmA\n\t" 591 "# movaps %%xmmA, %%xmmZ\n\t" 592 "# unpcklps %%xmmB, %%xmmA\n\t" 593 "# unpckhps %%xmmB, %%xmmZ\n\t" 594 "# movaps %%xmmZ, %%xmmY\n\t" 595 "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t" 596 "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t" 597 "# addps %%xmmZ, %%xmmA\n\t" 598 "# addps %%xmmA, %%xmmC\n\t" 599 "# A=xmm0, B=xmm2, Z=xmm4\n\t" 600 "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t" 601 " movaps 16(%%r9), %%xmm1\n\t" 602 " movaps %%xmm0, %%xmm4\n\t" 603 " mulps %%xmm2, %%xmm0\n\t" 604 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t" 605 " movaps 16(%%r10), %%xmm3\n\t" 606 " movaps %%xmm1, %%xmm5\n\t" 607 " addps %%xmm0, %%xmm6\n\t" 608 " mulps %%xmm3, %%xmm1\n\t" 609 " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t" 610 " addps %%xmm1, %%xmm6\n\t" 611 " mulps %%xmm4, %%xmm2\n\t" 612 " movaps 32(%%r9), %%xmm0\n\t" 613 " addps %%xmm2, %%xmm7\n\t" 614 " mulps %%xmm5, %%xmm3\n\t" 616 " movaps 32(%%r10), %%xmm2\n\t" 617 " addps %%xmm3, %%xmm7\n\t" 618 " add $32, %%r10\n\t" 622 " # We've handled the bulk of multiplies up to here.\n\t" 623 " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t" 624 " # If so, we've got 2 more taps to do.\n\t" 627 " # The count was odd, do 2 more taps.\n\t" 628 " # Note that we've already got mm0/mm2 preloaded\n\t" 629 " # from the main loop.\n\t" 630 " movaps %%xmm0, %%xmm4\n\t" 631 " mulps %%xmm2, %%xmm0\n\t" 632 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t" 633 " addps %%xmm0, %%xmm6\n\t" 634 " mulps %%xmm4, %%xmm2\n\t" 635 " addps %%xmm2, %%xmm7\n\t" 637 " # neg inversor\n\t" 638 " xorps %%xmm1, %%xmm1\n\t" 639 " mov $0x80000000, %%r9\n\t" 640 " movd %%r9, %%xmm1\n\t" 641 " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t" 643 " xorps %%xmm1, %%xmm6\n\t" 644 " movaps %%xmm6, %%xmm2\n\t" 645 " unpcklps %%xmm7, %%xmm6\n\t" 646 " unpckhps %%xmm7, %%xmm2\n\t" 647 " movaps %%xmm2, %%xmm3\n\t" 648 " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t" 649 " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t" 650 " addps %%xmm2, %%xmm6\n\t" 651 " # xmm6 = r1 i2 r3 i4\n\t" 652 " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t" 653 " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t" 654 " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) to memory\n\t" 656 :[rsi]
"r" (input), [rdx]
"r" (taps),
"c" (num_bytes), [rdi]
"r" (result)
657 :
"rax",
"r8",
"r9",
"r10" 662 *result += input[num_points - 1] * taps[num_points - 1];
671 #if LV_HAVE_SSE && LV_HAVE_32 673 static inline void volk_32fc_x2_dot_prod_32fc_a_sse_32(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
678 const unsigned int num_bytes = num_points*8;
679 unsigned int isodd = num_points & 1;
684 " #movl %%esp, %%ebp\n\t" 685 " movl 12(%%ebp), %%eax # input\n\t" 686 " movl 16(%%ebp), %%edx # taps\n\t" 687 " movl 20(%%ebp), %%ecx # n_bytes\n\t" 688 " xorps %%xmm6, %%xmm6 # zero accumulators\n\t" 689 " movaps 0(%%eax), %%xmm0\n\t" 690 " xorps %%xmm7, %%xmm7 # zero accumulators\n\t" 691 " movaps 0(%%edx), %%xmm2\n\t" 692 " shrl $5, %%ecx # ecx = n_2_ccomplex_blocks / 2\n\t" 693 " jmp .%=L1_test\n\t" 694 " # 4 taps / loop\n\t" 695 " # something like ?? cycles / loop\n\t" 697 "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t" 698 "# movaps (%%eax), %%xmmA\n\t" 699 "# movaps (%%edx), %%xmmB\n\t" 700 "# movaps %%xmmA, %%xmmZ\n\t" 701 "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t" 702 "# mulps %%xmmB, %%xmmA\n\t" 703 "# mulps %%xmmZ, %%xmmB\n\t" 704 "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t" 705 "# xorps %%xmmPN, %%xmmA\n\t" 706 "# movaps %%xmmA, %%xmmZ\n\t" 707 "# unpcklps %%xmmB, %%xmmA\n\t" 708 "# unpckhps %%xmmB, %%xmmZ\n\t" 709 "# movaps %%xmmZ, %%xmmY\n\t" 710 "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t" 711 "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t" 712 "# addps %%xmmZ, %%xmmA\n\t" 713 "# addps %%xmmA, %%xmmC\n\t" 714 "# A=xmm0, B=xmm2, Z=xmm4\n\t" 715 "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t" 716 " movaps 16(%%eax), %%xmm1\n\t" 717 " movaps %%xmm0, %%xmm4\n\t" 718 " mulps %%xmm2, %%xmm0\n\t" 719 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t" 720 " movaps 16(%%edx), %%xmm3\n\t" 721 " movaps %%xmm1, %%xmm5\n\t" 722 " addps %%xmm0, %%xmm6\n\t" 723 " mulps %%xmm3, %%xmm1\n\t" 724 " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t" 725 " addps %%xmm1, %%xmm6\n\t" 726 " mulps %%xmm4, %%xmm2\n\t" 727 " movaps 32(%%eax), %%xmm0\n\t" 728 " addps %%xmm2, %%xmm7\n\t" 729 " mulps %%xmm5, %%xmm3\n\t" 730 " addl $32, %%eax\n\t" 731 " movaps 32(%%edx), %%xmm2\n\t" 732 " addps %%xmm3, %%xmm7\n\t" 733 " addl $32, %%edx\n\t" 737 " # We've handled the bulk of multiplies up to here.\n\t" 738 " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t" 739 " # If so, we've got 2 more taps to do.\n\t" 740 " movl 20(%%ebp), %%ecx # n_2_ccomplex_blocks\n\t" 741 " shrl $4, %%ecx\n\t" 742 " andl $1, %%ecx\n\t" 744 " # The count was odd, do 2 more taps.\n\t" 745 " # Note that we've already got mm0/mm2 preloaded\n\t" 746 " # from the main loop.\n\t" 747 " movaps %%xmm0, %%xmm4\n\t" 748 " mulps %%xmm2, %%xmm0\n\t" 749 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t" 750 " addps %%xmm0, %%xmm6\n\t" 751 " mulps %%xmm4, %%xmm2\n\t" 752 " addps %%xmm2, %%xmm7\n\t" 754 " # neg inversor\n\t" 755 " movl 8(%%ebp), %%eax \n\t" 756 " xorps %%xmm1, %%xmm1\n\t" 757 " movl $0x80000000, (%%eax)\n\t" 758 " movss (%%eax), %%xmm1\n\t" 759 " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t" 761 " xorps %%xmm1, %%xmm6\n\t" 762 " movaps %%xmm6, %%xmm2\n\t" 763 " unpcklps %%xmm7, %%xmm6\n\t" 764 " unpckhps %%xmm7, %%xmm2\n\t" 765 " movaps %%xmm2, %%xmm3\n\t" 766 " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t" 767 " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t" 768 " addps %%xmm2, %%xmm6\n\t" 769 " # xmm6 = r1 i2 r3 i4\n\t" 770 " #movl 8(%%ebp), %%eax # @result\n\t" 771 " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t" 772 " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t" 773 " movlps %%xmm6, (%%eax) # store low 2x32 bits (complex) to memory\n\t" 777 :
"eax",
"ecx",
"edx" 781 int getem = num_bytes % 16;
784 *result += (input[num_points - 1] * taps[num_points - 1]);
795 #include <pmmintrin.h> 799 const unsigned int num_bytes = num_points*8;
800 unsigned int isodd = num_points & 1;
803 memset(&dotProduct, 0x0, 2*
sizeof(
float));
805 unsigned int number = 0;
806 const unsigned int halfPoints = num_bytes >> 4;
808 __m128 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
813 dotProdVal = _mm_setzero_ps();
815 for(;number < halfPoints; number++){
817 x = _mm_load_ps((
float*)a);
818 y = _mm_load_ps((
float*)b);
820 yl = _mm_moveldup_ps(y);
821 yh = _mm_movehdup_ps(y);
823 tmp1 = _mm_mul_ps(x,yl);
825 x = _mm_shuffle_ps(x,x,0xB1);
827 tmp2 = _mm_mul_ps(x,yh);
829 z = _mm_addsub_ps(tmp1,tmp2);
831 dotProdVal = _mm_add_ps(dotProdVal, z);
839 _mm_store_ps((
float*)dotProductVector,dotProdVal);
841 dotProduct += ( dotProductVector[0] + dotProductVector[1] );
844 dotProduct += input[num_points - 1] * taps[num_points - 1];
847 *result = dotProduct;
853 #ifdef LV_HAVE_SSE4_1 855 #include <smmintrin.h> 857 static inline void volk_32fc_x2_dot_prod_32fc_a_sse4_1(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
860 const unsigned int qtr_points = num_points/4;
861 const unsigned int isodd = num_points & 3;
863 __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, real0, real1, im0, im1;
864 float *p_input, *p_taps;
867 static const __m128i neg = {0x000000000000000080000000};
869 p_result = (__m64*)result;
870 p_input = (
float*)input;
871 p_taps = (
float*)taps;
873 real0 = _mm_setzero_ps();
874 real1 = _mm_setzero_ps();
875 im0 = _mm_setzero_ps();
876 im1 = _mm_setzero_ps();
878 for(; i < qtr_points; ++
i) {
879 xmm0 = _mm_load_ps(p_input);
880 xmm1 = _mm_load_ps(p_taps);
885 xmm2 = _mm_load_ps(p_input);
886 xmm3 = _mm_load_ps(p_taps);
891 xmm4 = _mm_unpackhi_ps(xmm0, xmm2);
892 xmm5 = _mm_unpackhi_ps(xmm1, xmm3);
893 xmm0 = _mm_unpacklo_ps(xmm0, xmm2);
894 xmm2 = _mm_unpacklo_ps(xmm1, xmm3);
897 xmm1 = _mm_unpackhi_ps(xmm0, xmm4);
899 xmm3 = _mm_unpacklo_ps(xmm0, xmm4);
901 xmm0 = _mm_unpackhi_ps(xmm2, xmm5);
903 xmm2 = _mm_unpacklo_ps(xmm2, xmm5);
905 xmm4 = _mm_dp_ps(xmm3, xmm2, 0xf1);
906 xmm5 = _mm_dp_ps(xmm1, xmm0, 0xf1);
908 xmm6 = _mm_dp_ps(xmm3, xmm0, 0xf2);
909 xmm7 = _mm_dp_ps(xmm1, xmm2, 0xf2);
911 real0 = _mm_add_ps(xmm4, real0);
912 real1 = _mm_add_ps(xmm5, real1);
913 im0 = _mm_add_ps(xmm6, im0);
914 im1 = _mm_add_ps(xmm7, im1);
919 im0 = _mm_add_ps(im0, im1);
920 real0 = _mm_add_ps(real0, real1);
922 im0 = _mm_add_ps(im0, real0);
924 _mm_storel_pi(p_result, im0);
926 for(i = num_points-isodd; i < num_points; i++) {
927 *result += input[
i] * taps[
i];
934 #include <arm_neon.h> 938 unsigned int quarter_points = num_points / 4;
945 float32x4x2_t a_val, b_val, c_val, accumulator;
946 float32x4x2_t tmp_real, tmp_imag;
947 accumulator.val[0] = vdupq_n_f32(0);
948 accumulator.val[1] = vdupq_n_f32(0);
950 for(number = 0; number < quarter_points; ++number) {
951 a_val = vld2q_f32((
float*)a_ptr);
952 b_val = vld2q_f32((
float*)b_ptr);
958 tmp_real.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
960 tmp_real.val[1] = vmulq_f32(a_val.val[1], b_val.val[1]);
964 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[1]);
966 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
968 c_val.val[0] = vsubq_f32(tmp_real.val[0], tmp_real.val[1]);
969 c_val.val[1] = vaddq_f32(tmp_imag.val[0], tmp_imag.val[1]);
971 accumulator.val[0] = vaddq_f32(accumulator.val[0], c_val.val[0]);
972 accumulator.val[1] = vaddq_f32(accumulator.val[1], c_val.val[1]);
978 vst2q_f32((
float*)accum_result, accumulator);
979 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
982 for(number = quarter_points*4; number < num_points; ++number) {
983 *result += (*a_ptr++) * (*b_ptr++);
990 #include <arm_neon.h> 993 unsigned int quarter_points = num_points / 4;
1000 float32x4x2_t a_val, b_val, accumulator;
1001 float32x4x2_t tmp_imag;
1002 accumulator.val[0] = vdupq_n_f32(0);
1003 accumulator.val[1] = vdupq_n_f32(0);
1005 for(number = 0; number < quarter_points; ++number) {
1006 a_val = vld2q_f32((
float*)a_ptr);
1007 b_val = vld2q_f32((
float*)b_ptr);
1012 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
1013 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
1016 tmp_imag.val[1] = vmlaq_f32(tmp_imag.val[1], a_val.val[0], b_val.val[1]);
1017 tmp_imag.val[0] = vmlsq_f32(tmp_imag.val[0], a_val.val[1], b_val.val[1]);
1019 accumulator.val[0] = vaddq_f32(accumulator.val[0], tmp_imag.val[0]);
1020 accumulator.val[1] = vaddq_f32(accumulator.val[1], tmp_imag.val[1]);
1027 vst2q_f32((
float*)accum_result, accumulator);
1028 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1031 for(number = quarter_points*4; number < num_points; ++number) {
1032 *result += (*a_ptr++) * (*b_ptr++);
1041 unsigned int quarter_points = num_points / 4;
1042 unsigned int number;
1048 float32x4x2_t a_val, b_val, accumulator1, accumulator2;
1049 accumulator1.val[0] = vdupq_n_f32(0);
1050 accumulator1.val[1] = vdupq_n_f32(0);
1051 accumulator2.val[0] = vdupq_n_f32(0);
1052 accumulator2.val[1] = vdupq_n_f32(0);
1054 for(number = 0; number < quarter_points; ++number) {
1055 a_val = vld2q_f32((
float*)a_ptr);
1056 b_val = vld2q_f32((
float*)b_ptr);
1061 accumulator1.val[0] = vmlaq_f32(accumulator1.val[0], a_val.val[0], b_val.val[0]);
1062 accumulator1.val[1] = vmlaq_f32(accumulator1.val[1], a_val.val[0], b_val.val[1]);
1063 accumulator2.val[0] = vmlsq_f32(accumulator2.val[0], a_val.val[1], b_val.val[1]);
1064 accumulator2.val[1] = vmlaq_f32(accumulator2.val[1], a_val.val[1], b_val.val[0]);
1069 accumulator1.val[0] = vaddq_f32(accumulator1.val[0], accumulator2.val[0]);
1070 accumulator1.val[1] = vaddq_f32(accumulator1.val[1], accumulator2.val[1]);
1072 vst2q_f32((
float*)accum_result, accumulator1);
1073 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1076 for(number = quarter_points*4; number < num_points; ++number) {
1077 *result += (*a_ptr++) * (*b_ptr++);
1087 unsigned int quarter_points = num_points / 8;
1088 unsigned int number;
1094 float32x4x4_t a_val, b_val, accumulator1, accumulator2;
1095 float32x4x2_t reduced_accumulator;
1096 accumulator1.val[0] = vdupq_n_f32(0);
1097 accumulator1.val[1] = vdupq_n_f32(0);
1098 accumulator1.val[2] = vdupq_n_f32(0);
1099 accumulator1.val[3] = vdupq_n_f32(0);
1100 accumulator2.val[0] = vdupq_n_f32(0);
1101 accumulator2.val[1] = vdupq_n_f32(0);
1102 accumulator2.val[2] = vdupq_n_f32(0);
1103 accumulator2.val[3] = vdupq_n_f32(0);
1106 for(number = 0; number < quarter_points; ++number) {
1107 a_val = vld4q_f32((
float*)a_ptr);
1108 b_val = vld4q_f32((
float*)b_ptr);
1113 accumulator1.val[0] = vmlaq_f32(accumulator1.val[0], a_val.val[0], b_val.val[0]);
1114 accumulator1.val[1] = vmlaq_f32(accumulator1.val[1], a_val.val[0], b_val.val[1]);
1116 accumulator1.val[2] = vmlaq_f32(accumulator1.val[2], a_val.val[2], b_val.val[2]);
1117 accumulator1.val[3] = vmlaq_f32(accumulator1.val[3], a_val.val[2], b_val.val[3]);
1119 accumulator2.val[0] = vmlsq_f32(accumulator2.val[0], a_val.val[1], b_val.val[1]);
1120 accumulator2.val[1] = vmlaq_f32(accumulator2.val[1], a_val.val[1], b_val.val[0]);
1122 accumulator2.val[2] = vmlsq_f32(accumulator2.val[2], a_val.val[3], b_val.val[3]);
1123 accumulator2.val[3] = vmlaq_f32(accumulator2.val[3], a_val.val[3], b_val.val[2]);
1129 accumulator1.val[0] = vaddq_f32(accumulator1.val[0], accumulator1.val[2]);
1130 accumulator1.val[1] = vaddq_f32(accumulator1.val[1], accumulator1.val[3]);
1131 accumulator2.val[0] = vaddq_f32(accumulator2.val[0], accumulator2.val[2]);
1132 accumulator2.val[1] = vaddq_f32(accumulator2.val[1], accumulator2.val[3]);
1133 reduced_accumulator.val[0] = vaddq_f32(accumulator1.val[0], accumulator2.val[0]);
1134 reduced_accumulator.val[1] = vaddq_f32(accumulator1.val[1], accumulator2.val[1]);
1137 vst2q_f32((
float*)accum_result, reduced_accumulator);
1138 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1141 for(number = quarter_points*8; number < num_points; ++number) {
1142 *result += (*a_ptr++) * (*b_ptr++);
1151 #include <immintrin.h> 1155 unsigned int isodd = num_points & 3;
1158 memset(&dotProduct, 0x0, 2*
sizeof(
float));
1160 unsigned int number = 0;
1161 const unsigned int quarterPoints = num_points / 4;
1163 __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
1168 dotProdVal = _mm256_setzero_ps();
1170 for(;number < quarterPoints; number++){
1172 x = _mm256_load_ps((
float*)a);
1173 y = _mm256_load_ps((
float*)b);
1175 yl = _mm256_moveldup_ps(y);
1176 yh = _mm256_movehdup_ps(y);
1178 tmp1 = _mm256_mul_ps(x,yl);
1180 x = _mm256_shuffle_ps(x,x,0xB1);
1182 tmp2 = _mm256_mul_ps(x,yh);
1184 z = _mm256_addsub_ps(tmp1,tmp2);
1186 dotProdVal = _mm256_add_ps(dotProdVal, z);
1194 _mm256_store_ps((
float*)dotProductVector,dotProdVal);
1196 dotProduct += ( dotProductVector[0] + dotProductVector[1] + dotProductVector[2] + dotProductVector[3]);
1198 for(i = num_points-isodd; i < num_points; i++) {
1199 dotProduct += input[
i] * taps[
i];
1202 *result = dotProduct;
1207 #if LV_HAVE_AVX && LV_HAVE_FMA 1208 #include <immintrin.h> 1210 static inline void volk_32fc_x2_dot_prod_32fc_a_avx_fma(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
1212 unsigned int isodd = num_points & 3;
1215 memset(&dotProduct, 0x0, 2*
sizeof(
float));
1217 unsigned int number = 0;
1218 const unsigned int quarterPoints = num_points / 4;
1220 __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
1225 dotProdVal = _mm256_setzero_ps();
1227 for(;number < quarterPoints; number++){
1229 x = _mm256_load_ps((
float*)a);
1230 y = _mm256_load_ps((
float*)b);
1232 yl = _mm256_moveldup_ps(y);
1233 yh = _mm256_movehdup_ps(y);
1237 x = _mm256_shuffle_ps(x,x,0xB1);
1239 tmp2 = _mm256_mul_ps(x,yh);
1241 z = _mm256_fmaddsub_ps(tmp1, yl,tmp2);
1243 dotProdVal = _mm256_add_ps(dotProdVal, z);
1251 _mm256_store_ps((
float*)dotProductVector,dotProdVal);
1253 dotProduct += ( dotProductVector[0] + dotProductVector[1] + dotProductVector[2] + dotProductVector[3]);
1255 for(i = num_points-isodd; i < num_points; i++) {
1256 dotProduct += input[
i] * taps[
i];
1259 *result = dotProduct;
static void volk_32fc_x2_dot_prod_32fc_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:70
static void volk_32fc_x2_dot_prod_32fc_u_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:243
#define __VOLK_ASM
Definition: volk_common.h:40
static void volk_32fc_x2_dot_prod_32fc_a_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:797
#define __VOLK_VOLATILE
Definition: volk_common.h:41
#define bit128_p(x)
Definition: volk_common.h:118
static void volk_32fc_x2_dot_prod_32fc_neon_optfma(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:1039
static void volk_32fc_x2_dot_prod_32fc_a_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:504
static void volk_32fc_x2_dot_prod_32fc_neon(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:936
#define __VOLK_PREFETCH(addr)
Definition: volk_common.h:39
for i
Definition: volk_config_fixed.tmpl.h:25
#define __VOLK_ATTR_ALIGNED(x)
Definition: volk_common.h:33
static void volk_32fc_x2_dot_prod_32fc_u_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:380
float complex lv_32fc_t
Definition: volk_complex.h:61
__m256 float_vec
Definition: volk_common.h:112
static void volk_32fc_x2_dot_prod_32fc_neon_optfmaunroll(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:1084
static void volk_32fc_x2_dot_prod_32fc_a_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:1153
static void volk_32fc_x2_dot_prod_32fc_neon_opttests(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:991