Vector Optimized Library of Kernels  2.5.0
Architecture-tuned implementations of math kernels
volk_32fc_x2_dot_prod_32fc.h
Go to the documentation of this file.
1 /* -*- c++ -*- */
2 /*
3  * Copyright 2012, 2013, 2014 Free Software Foundation, Inc.
4  *
5  * This file is part of GNU Radio
6  *
7  * GNU Radio is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 3, or (at your option)
10  * any later version.
11  *
12  * GNU Radio is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with GNU Radio; see the file COPYING. If not, write to
19  * the Free Software Foundation, Inc., 51 Franklin Street,
20  * Boston, MA 02110-1301, USA.
21  */
22 
58 #ifndef INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H
59 #define INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H
60 
61 #include <stdio.h>
62 #include <string.h>
63 #include <volk/volk_common.h>
64 #include <volk/volk_complex.h>
65 
66 
67 #ifdef LV_HAVE_GENERIC
68 
69 
70 static inline void volk_32fc_x2_dot_prod_32fc_generic(lv_32fc_t* result,
71  const lv_32fc_t* input,
72  const lv_32fc_t* taps,
73  unsigned int num_points)
74 {
75 
76  float* res = (float*)result;
77  float* in = (float*)input;
78  float* tp = (float*)taps;
79  unsigned int n_2_ccomplex_blocks = num_points / 2;
80 
81  float sum0[2] = { 0, 0 };
82  float sum1[2] = { 0, 0 };
83  unsigned int i = 0;
84 
85  for (i = 0; i < n_2_ccomplex_blocks; ++i) {
86  sum0[0] += in[0] * tp[0] - in[1] * tp[1];
87  sum0[1] += in[0] * tp[1] + in[1] * tp[0];
88  sum1[0] += in[2] * tp[2] - in[3] * tp[3];
89  sum1[1] += in[2] * tp[3] + in[3] * tp[2];
90 
91  in += 4;
92  tp += 4;
93  }
94 
95  res[0] = sum0[0] + sum1[0];
96  res[1] = sum0[1] + sum1[1];
97 
98  // Cleanup if we had an odd number of points
99  if (num_points & 1) {
100  *result += input[num_points - 1] * taps[num_points - 1];
101  }
102 }
103 
104 #endif /*LV_HAVE_GENERIC*/
105 
106 
107 #if LV_HAVE_SSE && LV_HAVE_64
108 
109 static inline void volk_32fc_x2_dot_prod_32fc_u_sse_64(lv_32fc_t* result,
110  const lv_32fc_t* input,
111  const lv_32fc_t* taps,
112  unsigned int num_points)
113 {
114 
115  const unsigned int num_bytes = num_points * 8;
116  unsigned int isodd = num_points & 1;
117 
118  __VOLK_ASM(
119  "# ccomplex_dotprod_generic (float* result, const float *input,\n\t"
120  "# const float *taps, unsigned num_bytes)\n\t"
121  "# float sum0 = 0;\n\t"
122  "# float sum1 = 0;\n\t"
123  "# float sum2 = 0;\n\t"
124  "# float sum3 = 0;\n\t"
125  "# do {\n\t"
126  "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
127  "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
128  "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
129  "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
130  "# input += 4;\n\t"
131  "# taps += 4; \n\t"
132  "# } while (--n_2_ccomplex_blocks != 0);\n\t"
133  "# result[0] = sum0 + sum2;\n\t"
134  "# result[1] = sum1 + sum3;\n\t"
135  "# TODO: prefetch and better scheduling\n\t"
136  " xor %%r9, %%r9\n\t"
137  " xor %%r10, %%r10\n\t"
138  " movq %%rcx, %%rax\n\t"
139  " movq %%rcx, %%r8\n\t"
140  " movq %[rsi], %%r9\n\t"
141  " movq %[rdx], %%r10\n\t"
142  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
143  " movups 0(%%r9), %%xmm0\n\t"
144  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
145  " movups 0(%%r10), %%xmm2\n\t"
146  " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
147  " shr $4, %%r8\n\t"
148  " jmp .%=L1_test\n\t"
149  " # 4 taps / loop\n\t"
150  " # something like ?? cycles / loop\n\t"
151  ".%=Loop1: \n\t"
152  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
153  "# movups (%%r9), %%xmmA\n\t"
154  "# movups (%%r10), %%xmmB\n\t"
155  "# movups %%xmmA, %%xmmZ\n\t"
156  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
157  "# mulps %%xmmB, %%xmmA\n\t"
158  "# mulps %%xmmZ, %%xmmB\n\t"
159  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
160  "# xorps %%xmmPN, %%xmmA\n\t"
161  "# movups %%xmmA, %%xmmZ\n\t"
162  "# unpcklps %%xmmB, %%xmmA\n\t"
163  "# unpckhps %%xmmB, %%xmmZ\n\t"
164  "# movups %%xmmZ, %%xmmY\n\t"
165  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
166  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
167  "# addps %%xmmZ, %%xmmA\n\t"
168  "# addps %%xmmA, %%xmmC\n\t"
169  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
170  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
171  " movups 16(%%r9), %%xmm1\n\t"
172  " movups %%xmm0, %%xmm4\n\t"
173  " mulps %%xmm2, %%xmm0\n\t"
174  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
175  " movups 16(%%r10), %%xmm3\n\t"
176  " movups %%xmm1, %%xmm5\n\t"
177  " addps %%xmm0, %%xmm6\n\t"
178  " mulps %%xmm3, %%xmm1\n\t"
179  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
180  " addps %%xmm1, %%xmm6\n\t"
181  " mulps %%xmm4, %%xmm2\n\t"
182  " movups 32(%%r9), %%xmm0\n\t"
183  " addps %%xmm2, %%xmm7\n\t"
184  " mulps %%xmm5, %%xmm3\n\t"
185  " add $32, %%r9\n\t"
186  " movups 32(%%r10), %%xmm2\n\t"
187  " addps %%xmm3, %%xmm7\n\t"
188  " add $32, %%r10\n\t"
189  ".%=L1_test:\n\t"
190  " dec %%rax\n\t"
191  " jge .%=Loop1\n\t"
192  " # We've handled the bulk of multiplies up to here.\n\t"
193  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
194  " # If so, we've got 2 more taps to do.\n\t"
195  " and $1, %%r8\n\t"
196  " je .%=Leven\n\t"
197  " # The count was odd, do 2 more taps.\n\t"
198  " # Note that we've already got mm0/mm2 preloaded\n\t"
199  " # from the main loop.\n\t"
200  " movups %%xmm0, %%xmm4\n\t"
201  " mulps %%xmm2, %%xmm0\n\t"
202  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
203  " addps %%xmm0, %%xmm6\n\t"
204  " mulps %%xmm4, %%xmm2\n\t"
205  " addps %%xmm2, %%xmm7\n\t"
206  ".%=Leven:\n\t"
207  " # neg inversor\n\t"
208  " xorps %%xmm1, %%xmm1\n\t"
209  " mov $0x80000000, %%r9\n\t"
210  " movd %%r9, %%xmm1\n\t"
211  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
212  " # pfpnacc\n\t"
213  " xorps %%xmm1, %%xmm6\n\t"
214  " movups %%xmm6, %%xmm2\n\t"
215  " unpcklps %%xmm7, %%xmm6\n\t"
216  " unpckhps %%xmm7, %%xmm2\n\t"
217  " movups %%xmm2, %%xmm3\n\t"
218  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
219  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
220  " addps %%xmm2, %%xmm6\n\t"
221  " # xmm6 = r1 i2 r3 i4\n\t"
222  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
223  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
224  " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) "
225  "to memory\n\t"
226  :
227  : [rsi] "r"(input), [rdx] "r"(taps), "c"(num_bytes), [rdi] "r"(result)
228  : "rax", "r8", "r9", "r10");
229 
230 
231  if (isodd) {
232  *result += input[num_points - 1] * taps[num_points - 1];
233  }
234 
235  return;
236 }
237 
238 #endif /* LV_HAVE_SSE && LV_HAVE_64 */
239 
240 
241 #ifdef LV_HAVE_SSE3
242 
243 #include <pmmintrin.h>
244 
245 static inline void volk_32fc_x2_dot_prod_32fc_u_sse3(lv_32fc_t* result,
246  const lv_32fc_t* input,
247  const lv_32fc_t* taps,
248  unsigned int num_points)
249 {
250 
251  lv_32fc_t dotProduct;
252  memset(&dotProduct, 0x0, 2 * sizeof(float));
253 
254  unsigned int number = 0;
255  const unsigned int halfPoints = num_points / 2;
256  unsigned int isodd = num_points & 1;
257 
258  __m128 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
259 
260  const lv_32fc_t* a = input;
261  const lv_32fc_t* b = taps;
262 
263  dotProdVal = _mm_setzero_ps();
264 
265  for (; number < halfPoints; number++) {
266 
267  x = _mm_loadu_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
268  y = _mm_loadu_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
269 
270  yl = _mm_moveldup_ps(y); // Load yl with cr,cr,dr,dr
271  yh = _mm_movehdup_ps(y); // Load yh with ci,ci,di,di
272 
273  tmp1 = _mm_mul_ps(x, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
274 
275  x = _mm_shuffle_ps(x, x, 0xB1); // Re-arrange x to be ai,ar,bi,br
276 
277  tmp2 = _mm_mul_ps(x, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
278 
279  z = _mm_addsub_ps(tmp1,
280  tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
281 
282  dotProdVal =
283  _mm_add_ps(dotProdVal, z); // Add the complex multiplication results together
284 
285  a += 2;
286  b += 2;
287  }
288 
289  __VOLK_ATTR_ALIGNED(16) lv_32fc_t dotProductVector[2];
290 
291  _mm_storeu_ps((float*)dotProductVector,
292  dotProdVal); // Store the results back into the dot product vector
293 
294  dotProduct += (dotProductVector[0] + dotProductVector[1]);
295 
296  if (isodd) {
297  dotProduct += input[num_points - 1] * taps[num_points - 1];
298  }
299 
300  *result = dotProduct;
301 }
302 
303 #endif /*LV_HAVE_SSE3*/
304 
305 // #ifdef LV_HAVE_SSE4_1
306 
307 // #include <smmintrin.h>
308 
309 // static inline void volk_32fc_x2_dot_prod_32fc_u_sse4_1(lv_32fc_t* result,
310 // const lv_32fc_t* input,
311 // const lv_32fc_t* taps,
312 // unsigned int num_points)
313 // {
314 
315 // unsigned int i = 0;
316 // const unsigned int qtr_points = num_points / 4;
317 // const unsigned int isodd = num_points & 3;
318 
319 // __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, real0, real1, im0, im1;
320 // float *p_input, *p_taps;
321 // __m64* p_result;
322 
323 // p_result = (__m64*)result;
324 // p_input = (float*)input;
325 // p_taps = (float*)taps;
326 
327 // static const __m128i neg = { 0x000000000000000080000000 };
328 
329 // real0 = _mm_setzero_ps();
330 // real1 = _mm_setzero_ps();
331 // im0 = _mm_setzero_ps();
332 // im1 = _mm_setzero_ps();
333 
334 // for (; i < qtr_points; ++i) {
335 // xmm0 = _mm_loadu_ps(p_input);
336 // xmm1 = _mm_loadu_ps(p_taps);
337 
338 // p_input += 4;
339 // p_taps += 4;
340 
341 // xmm2 = _mm_loadu_ps(p_input);
342 // xmm3 = _mm_loadu_ps(p_taps);
343 
344 // p_input += 4;
345 // p_taps += 4;
346 
347 // xmm4 = _mm_unpackhi_ps(xmm0, xmm2);
348 // xmm5 = _mm_unpackhi_ps(xmm1, xmm3);
349 // xmm0 = _mm_unpacklo_ps(xmm0, xmm2);
350 // xmm2 = _mm_unpacklo_ps(xmm1, xmm3);
351 
352 // // imaginary vector from input
353 // xmm1 = _mm_unpackhi_ps(xmm0, xmm4);
354 // // real vector from input
355 // xmm3 = _mm_unpacklo_ps(xmm0, xmm4);
356 // // imaginary vector from taps
357 // xmm0 = _mm_unpackhi_ps(xmm2, xmm5);
358 // // real vector from taps
359 // xmm2 = _mm_unpacklo_ps(xmm2, xmm5);
360 
361 // xmm4 = _mm_dp_ps(xmm3, xmm2, 0xf1);
362 // xmm5 = _mm_dp_ps(xmm1, xmm0, 0xf1);
363 
364 // xmm6 = _mm_dp_ps(xmm3, xmm0, 0xf2);
365 // xmm7 = _mm_dp_ps(xmm1, xmm2, 0xf2);
366 
367 // real0 = _mm_add_ps(xmm4, real0);
368 // real1 = _mm_add_ps(xmm5, real1);
369 // im0 = _mm_add_ps(xmm6, im0);
370 // im1 = _mm_add_ps(xmm7, im1);
371 // }
372 
373 // real1 = _mm_xor_ps(real1, bit128_p(&neg)->float_vec);
374 
375 // im0 = _mm_add_ps(im0, im1);
376 // real0 = _mm_add_ps(real0, real1);
377 
378 // im0 = _mm_add_ps(im0, real0);
379 
380 // _mm_storel_pi(p_result, im0);
381 
382 // for (i = num_points - isodd; i < num_points; i++) {
383 // *result += input[i] * taps[i];
384 // }
385 // }
386 
387 // #endif /*LV_HAVE_SSE4_1*/
388 
389 #ifdef LV_HAVE_AVX
390 
391 #include <immintrin.h>
392 
393 static inline void volk_32fc_x2_dot_prod_32fc_u_avx(lv_32fc_t* result,
394  const lv_32fc_t* input,
395  const lv_32fc_t* taps,
396  unsigned int num_points)
397 {
398 
399  unsigned int isodd = num_points & 3;
400  unsigned int i = 0;
401  lv_32fc_t dotProduct;
402  memset(&dotProduct, 0x0, 2 * sizeof(float));
403 
404  unsigned int number = 0;
405  const unsigned int quarterPoints = num_points / 4;
406 
407  __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
408 
409  const lv_32fc_t* a = input;
410  const lv_32fc_t* b = taps;
411 
412  dotProdVal = _mm256_setzero_ps();
413 
414  for (; number < quarterPoints; number++) {
415  x = _mm256_loadu_ps((float*)a); // Load a,b,e,f as ar,ai,br,bi,er,ei,fr,fi
416  y = _mm256_loadu_ps((float*)b); // Load c,d,g,h as cr,ci,dr,di,gr,gi,hr,hi
417 
418  yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr,gr,gr,hr,hr
419  yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di,gi,gi,hi,hi
420 
421  tmp1 = _mm256_mul_ps(x, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr ...
422 
423  x = _mm256_shuffle_ps(x, x, 0xB1); // Re-arrange x to be ai,ar,bi,br,ei,er,fi,fr
424 
425  tmp2 = _mm256_mul_ps(x, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di ...
426 
427  z = _mm256_addsub_ps(tmp1,
428  tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
429 
430  dotProdVal = _mm256_add_ps(dotProdVal,
431  z); // Add the complex multiplication results together
432 
433  a += 4;
434  b += 4;
435  }
436 
437  __VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
438 
439  _mm256_storeu_ps((float*)dotProductVector,
440  dotProdVal); // Store the results back into the dot product vector
441 
442  dotProduct += (dotProductVector[0] + dotProductVector[1] + dotProductVector[2] +
443  dotProductVector[3]);
444 
445  for (i = num_points - isodd; i < num_points; i++) {
446  dotProduct += input[i] * taps[i];
447  }
448 
449  *result = dotProduct;
450 }
451 
452 #endif /*LV_HAVE_AVX*/
453 
454 #if LV_HAVE_AVX && LV_HAVE_FMA
455 #include <immintrin.h>
456 
457 static inline void volk_32fc_x2_dot_prod_32fc_u_avx_fma(lv_32fc_t* result,
458  const lv_32fc_t* input,
459  const lv_32fc_t* taps,
460  unsigned int num_points)
461 {
462 
463  unsigned int isodd = num_points & 3;
464  unsigned int i = 0;
465  lv_32fc_t dotProduct;
466  memset(&dotProduct, 0x0, 2 * sizeof(float));
467 
468  unsigned int number = 0;
469  const unsigned int quarterPoints = num_points / 4;
470 
471  __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
472 
473  const lv_32fc_t* a = input;
474  const lv_32fc_t* b = taps;
475 
476  dotProdVal = _mm256_setzero_ps();
477 
478  for (; number < quarterPoints; number++) {
479 
480  x = _mm256_loadu_ps((float*)a); // Load a,b,e,f as ar,ai,br,bi,er,ei,fr,fi
481  y = _mm256_loadu_ps((float*)b); // Load c,d,g,h as cr,ci,dr,di,gr,gi,hr,hi
482 
483  yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr,gr,gr,hr,hr
484  yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di,gi,gi,hi,hi
485 
486  tmp1 = x;
487 
488  x = _mm256_shuffle_ps(x, x, 0xB1); // Re-arrange x to be ai,ar,bi,br,ei,er,fi,fr
489 
490  tmp2 = _mm256_mul_ps(x, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di ...
491 
492  z = _mm256_fmaddsub_ps(
493  tmp1, yl, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
494 
495  dotProdVal = _mm256_add_ps(dotProdVal,
496  z); // Add the complex multiplication results together
497 
498  a += 4;
499  b += 4;
500  }
501 
502  __VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
503 
504  _mm256_storeu_ps((float*)dotProductVector,
505  dotProdVal); // Store the results back into the dot product vector
506 
507  dotProduct += (dotProductVector[0] + dotProductVector[1] + dotProductVector[2] +
508  dotProductVector[3]);
509 
510  for (i = num_points - isodd; i < num_points; i++) {
511  dotProduct += input[i] * taps[i];
512  }
513 
514  *result = dotProduct;
515 }
516 
517 #endif /*LV_HAVE_AVX && LV_HAVE_FMA*/
518 
519 #endif /*INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H*/
520 
521 #ifndef INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H
522 #define INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H
523 
524 #include <stdio.h>
525 #include <string.h>
526 #include <volk/volk_common.h>
527 #include <volk/volk_complex.h>
528 
529 
530 #ifdef LV_HAVE_GENERIC
531 
532 
534  const lv_32fc_t* input,
535  const lv_32fc_t* taps,
536  unsigned int num_points)
537 {
538 
539  const unsigned int num_bytes = num_points * 8;
540 
541  float* res = (float*)result;
542  float* in = (float*)input;
543  float* tp = (float*)taps;
544  unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
545 
546  float sum0[2] = { 0, 0 };
547  float sum1[2] = { 0, 0 };
548  unsigned int i = 0;
549 
550  for (i = 0; i < n_2_ccomplex_blocks; ++i) {
551  sum0[0] += in[0] * tp[0] - in[1] * tp[1];
552  sum0[1] += in[0] * tp[1] + in[1] * tp[0];
553  sum1[0] += in[2] * tp[2] - in[3] * tp[3];
554  sum1[1] += in[2] * tp[3] + in[3] * tp[2];
555 
556  in += 4;
557  tp += 4;
558  }
559 
560  res[0] = sum0[0] + sum1[0];
561  res[1] = sum0[1] + sum1[1];
562 
563  if (num_points & 1) {
564  *result += input[num_points - 1] * taps[num_points - 1];
565  }
566 }
567 
568 #endif /*LV_HAVE_GENERIC*/
569 
570 
571 #if LV_HAVE_SSE && LV_HAVE_64
572 
573 
574 static inline void volk_32fc_x2_dot_prod_32fc_a_sse_64(lv_32fc_t* result,
575  const lv_32fc_t* input,
576  const lv_32fc_t* taps,
577  unsigned int num_points)
578 {
579 
580  const unsigned int num_bytes = num_points * 8;
581  unsigned int isodd = num_points & 1;
582 
583  __VOLK_ASM(
584  "# ccomplex_dotprod_generic (float* result, const float *input,\n\t"
585  "# const float *taps, unsigned num_bytes)\n\t"
586  "# float sum0 = 0;\n\t"
587  "# float sum1 = 0;\n\t"
588  "# float sum2 = 0;\n\t"
589  "# float sum3 = 0;\n\t"
590  "# do {\n\t"
591  "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
592  "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
593  "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
594  "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
595  "# input += 4;\n\t"
596  "# taps += 4; \n\t"
597  "# } while (--n_2_ccomplex_blocks != 0);\n\t"
598  "# result[0] = sum0 + sum2;\n\t"
599  "# result[1] = sum1 + sum3;\n\t"
600  "# TODO: prefetch and better scheduling\n\t"
601  " xor %%r9, %%r9\n\t"
602  " xor %%r10, %%r10\n\t"
603  " movq %%rcx, %%rax\n\t"
604  " movq %%rcx, %%r8\n\t"
605  " movq %[rsi], %%r9\n\t"
606  " movq %[rdx], %%r10\n\t"
607  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
608  " movaps 0(%%r9), %%xmm0\n\t"
609  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
610  " movaps 0(%%r10), %%xmm2\n\t"
611  " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
612  " shr $4, %%r8\n\t"
613  " jmp .%=L1_test\n\t"
614  " # 4 taps / loop\n\t"
615  " # something like ?? cycles / loop\n\t"
616  ".%=Loop1: \n\t"
617  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
618  "# movaps (%%r9), %%xmmA\n\t"
619  "# movaps (%%r10), %%xmmB\n\t"
620  "# movaps %%xmmA, %%xmmZ\n\t"
621  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
622  "# mulps %%xmmB, %%xmmA\n\t"
623  "# mulps %%xmmZ, %%xmmB\n\t"
624  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
625  "# xorps %%xmmPN, %%xmmA\n\t"
626  "# movaps %%xmmA, %%xmmZ\n\t"
627  "# unpcklps %%xmmB, %%xmmA\n\t"
628  "# unpckhps %%xmmB, %%xmmZ\n\t"
629  "# movaps %%xmmZ, %%xmmY\n\t"
630  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
631  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
632  "# addps %%xmmZ, %%xmmA\n\t"
633  "# addps %%xmmA, %%xmmC\n\t"
634  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
635  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
636  " movaps 16(%%r9), %%xmm1\n\t"
637  " movaps %%xmm0, %%xmm4\n\t"
638  " mulps %%xmm2, %%xmm0\n\t"
639  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
640  " movaps 16(%%r10), %%xmm3\n\t"
641  " movaps %%xmm1, %%xmm5\n\t"
642  " addps %%xmm0, %%xmm6\n\t"
643  " mulps %%xmm3, %%xmm1\n\t"
644  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
645  " addps %%xmm1, %%xmm6\n\t"
646  " mulps %%xmm4, %%xmm2\n\t"
647  " movaps 32(%%r9), %%xmm0\n\t"
648  " addps %%xmm2, %%xmm7\n\t"
649  " mulps %%xmm5, %%xmm3\n\t"
650  " add $32, %%r9\n\t"
651  " movaps 32(%%r10), %%xmm2\n\t"
652  " addps %%xmm3, %%xmm7\n\t"
653  " add $32, %%r10\n\t"
654  ".%=L1_test:\n\t"
655  " dec %%rax\n\t"
656  " jge .%=Loop1\n\t"
657  " # We've handled the bulk of multiplies up to here.\n\t"
658  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
659  " # If so, we've got 2 more taps to do.\n\t"
660  " and $1, %%r8\n\t"
661  " je .%=Leven\n\t"
662  " # The count was odd, do 2 more taps.\n\t"
663  " # Note that we've already got mm0/mm2 preloaded\n\t"
664  " # from the main loop.\n\t"
665  " movaps %%xmm0, %%xmm4\n\t"
666  " mulps %%xmm2, %%xmm0\n\t"
667  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
668  " addps %%xmm0, %%xmm6\n\t"
669  " mulps %%xmm4, %%xmm2\n\t"
670  " addps %%xmm2, %%xmm7\n\t"
671  ".%=Leven:\n\t"
672  " # neg inversor\n\t"
673  " xorps %%xmm1, %%xmm1\n\t"
674  " mov $0x80000000, %%r9\n\t"
675  " movd %%r9, %%xmm1\n\t"
676  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
677  " # pfpnacc\n\t"
678  " xorps %%xmm1, %%xmm6\n\t"
679  " movaps %%xmm6, %%xmm2\n\t"
680  " unpcklps %%xmm7, %%xmm6\n\t"
681  " unpckhps %%xmm7, %%xmm2\n\t"
682  " movaps %%xmm2, %%xmm3\n\t"
683  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
684  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
685  " addps %%xmm2, %%xmm6\n\t"
686  " # xmm6 = r1 i2 r3 i4\n\t"
687  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
688  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
689  " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) "
690  "to memory\n\t"
691  :
692  : [rsi] "r"(input), [rdx] "r"(taps), "c"(num_bytes), [rdi] "r"(result)
693  : "rax", "r8", "r9", "r10");
694 
695 
696  if (isodd) {
697  *result += input[num_points - 1] * taps[num_points - 1];
698  }
699 
700  return;
701 }
702 
703 #endif
704 
705 #if LV_HAVE_SSE && LV_HAVE_32
706 
707 static inline void volk_32fc_x2_dot_prod_32fc_a_sse_32(lv_32fc_t* result,
708  const lv_32fc_t* input,
709  const lv_32fc_t* taps,
710  unsigned int num_points)
711 {
712 
713  volk_32fc_x2_dot_prod_32fc_a_generic(result, input, taps, num_points);
714 
715 #if 0
716  const unsigned int num_bytes = num_points*8;
717  unsigned int isodd = num_points & 1;
718 
720  (
721  " #pushl %%ebp\n\t"
722  " #movl %%esp, %%ebp\n\t"
723  " movl 12(%%ebp), %%eax # input\n\t"
724  " movl 16(%%ebp), %%edx # taps\n\t"
725  " movl 20(%%ebp), %%ecx # n_bytes\n\t"
726  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
727  " movaps 0(%%eax), %%xmm0\n\t"
728  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
729  " movaps 0(%%edx), %%xmm2\n\t"
730  " shrl $5, %%ecx # ecx = n_2_ccomplex_blocks / 2\n\t"
731  " jmp .%=L1_test\n\t"
732  " # 4 taps / loop\n\t"
733  " # something like ?? cycles / loop\n\t"
734  ".%=Loop1: \n\t"
735  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
736  "# movaps (%%eax), %%xmmA\n\t"
737  "# movaps (%%edx), %%xmmB\n\t"
738  "# movaps %%xmmA, %%xmmZ\n\t"
739  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
740  "# mulps %%xmmB, %%xmmA\n\t"
741  "# mulps %%xmmZ, %%xmmB\n\t"
742  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
743  "# xorps %%xmmPN, %%xmmA\n\t"
744  "# movaps %%xmmA, %%xmmZ\n\t"
745  "# unpcklps %%xmmB, %%xmmA\n\t"
746  "# unpckhps %%xmmB, %%xmmZ\n\t"
747  "# movaps %%xmmZ, %%xmmY\n\t"
748  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
749  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
750  "# addps %%xmmZ, %%xmmA\n\t"
751  "# addps %%xmmA, %%xmmC\n\t"
752  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
753  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
754  " movaps 16(%%eax), %%xmm1\n\t"
755  " movaps %%xmm0, %%xmm4\n\t"
756  " mulps %%xmm2, %%xmm0\n\t"
757  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
758  " movaps 16(%%edx), %%xmm3\n\t"
759  " movaps %%xmm1, %%xmm5\n\t"
760  " addps %%xmm0, %%xmm6\n\t"
761  " mulps %%xmm3, %%xmm1\n\t"
762  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
763  " addps %%xmm1, %%xmm6\n\t"
764  " mulps %%xmm4, %%xmm2\n\t"
765  " movaps 32(%%eax), %%xmm0\n\t"
766  " addps %%xmm2, %%xmm7\n\t"
767  " mulps %%xmm5, %%xmm3\n\t"
768  " addl $32, %%eax\n\t"
769  " movaps 32(%%edx), %%xmm2\n\t"
770  " addps %%xmm3, %%xmm7\n\t"
771  " addl $32, %%edx\n\t"
772  ".%=L1_test:\n\t"
773  " decl %%ecx\n\t"
774  " jge .%=Loop1\n\t"
775  " # We've handled the bulk of multiplies up to here.\n\t"
776  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
777  " # If so, we've got 2 more taps to do.\n\t"
778  " movl 20(%%ebp), %%ecx # n_2_ccomplex_blocks\n\t"
779  " shrl $4, %%ecx\n\t"
780  " andl $1, %%ecx\n\t"
781  " je .%=Leven\n\t"
782  " # The count was odd, do 2 more taps.\n\t"
783  " # Note that we've already got mm0/mm2 preloaded\n\t"
784  " # from the main loop.\n\t"
785  " movaps %%xmm0, %%xmm4\n\t"
786  " mulps %%xmm2, %%xmm0\n\t"
787  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
788  " addps %%xmm0, %%xmm6\n\t"
789  " mulps %%xmm4, %%xmm2\n\t"
790  " addps %%xmm2, %%xmm7\n\t"
791  ".%=Leven:\n\t"
792  " # neg inversor\n\t"
793  " movl 8(%%ebp), %%eax \n\t"
794  " xorps %%xmm1, %%xmm1\n\t"
795  " movl $0x80000000, (%%eax)\n\t"
796  " movss (%%eax), %%xmm1\n\t"
797  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
798  " # pfpnacc\n\t"
799  " xorps %%xmm1, %%xmm6\n\t"
800  " movaps %%xmm6, %%xmm2\n\t"
801  " unpcklps %%xmm7, %%xmm6\n\t"
802  " unpckhps %%xmm7, %%xmm2\n\t"
803  " movaps %%xmm2, %%xmm3\n\t"
804  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
805  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
806  " addps %%xmm2, %%xmm6\n\t"
807  " # xmm6 = r1 i2 r3 i4\n\t"
808  " #movl 8(%%ebp), %%eax # @result\n\t"
809  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
810  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
811  " movlps %%xmm6, (%%eax) # store low 2x32 bits (complex) to memory\n\t"
812  " #popl %%ebp\n\t"
813  :
814  :
815  : "eax", "ecx", "edx"
816  );
817 
818 
819  int getem = num_bytes % 16;
820 
821  if(isodd) {
822  *result += (input[num_points - 1] * taps[num_points - 1]);
823  }
824 
825  return;
826 #endif
827 }
828 
829 #endif /*LV_HAVE_SSE*/
830 
831 #ifdef LV_HAVE_SSE3
832 
833 #include <pmmintrin.h>
834 
835 static inline void volk_32fc_x2_dot_prod_32fc_a_sse3(lv_32fc_t* result,
836  const lv_32fc_t* input,
837  const lv_32fc_t* taps,
838  unsigned int num_points)
839 {
840 
841  const unsigned int num_bytes = num_points * 8;
842  unsigned int isodd = num_points & 1;
843 
844  lv_32fc_t dotProduct;
845  memset(&dotProduct, 0x0, 2 * sizeof(float));
846 
847  unsigned int number = 0;
848  const unsigned int halfPoints = num_bytes >> 4;
849 
850  __m128 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
851 
852  const lv_32fc_t* a = input;
853  const lv_32fc_t* b = taps;
854 
855  dotProdVal = _mm_setzero_ps();
856 
857  for (; number < halfPoints; number++) {
858 
859  x = _mm_load_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
860  y = _mm_load_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
861 
862  yl = _mm_moveldup_ps(y); // Load yl with cr,cr,dr,dr
863  yh = _mm_movehdup_ps(y); // Load yh with ci,ci,di,di
864 
865  tmp1 = _mm_mul_ps(x, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
866 
867  x = _mm_shuffle_ps(x, x, 0xB1); // Re-arrange x to be ai,ar,bi,br
868 
869  tmp2 = _mm_mul_ps(x, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
870 
871  z = _mm_addsub_ps(tmp1,
872  tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
873 
874  dotProdVal =
875  _mm_add_ps(dotProdVal, z); // Add the complex multiplication results together
876 
877  a += 2;
878  b += 2;
879  }
880 
881  __VOLK_ATTR_ALIGNED(16) lv_32fc_t dotProductVector[2];
882 
883  _mm_store_ps((float*)dotProductVector,
884  dotProdVal); // Store the results back into the dot product vector
885 
886  dotProduct += (dotProductVector[0] + dotProductVector[1]);
887 
888  if (isodd) {
889  dotProduct += input[num_points - 1] * taps[num_points - 1];
890  }
891 
892  *result = dotProduct;
893 }
894 
895 #endif /*LV_HAVE_SSE3*/
896 
897 
898 // #ifdef LV_HAVE_SSE4_1
899 
900 // #include <smmintrin.h>
901 
902 // static inline void volk_32fc_x2_dot_prod_32fc_a_sse4_1(lv_32fc_t* result,
903 // const lv_32fc_t* input,
904 // const lv_32fc_t* taps,
905 // unsigned int num_points)
906 // {
907 
908 // unsigned int i = 0;
909 // const unsigned int qtr_points = num_points / 4;
910 // const unsigned int isodd = num_points & 3;
911 
912 // __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, real0, real1, im0, im1;
913 // float *p_input, *p_taps;
914 // __m64* p_result;
915 
916 // static const __m128i neg = { 0x000000000000000080000000 };
917 
918 // p_result = (__m64*)result;
919 // p_input = (float*)input;
920 // p_taps = (float*)taps;
921 
922 // real0 = _mm_setzero_ps();
923 // real1 = _mm_setzero_ps();
924 // im0 = _mm_setzero_ps();
925 // im1 = _mm_setzero_ps();
926 
927 // for (; i < qtr_points; ++i) {
928 // xmm0 = _mm_load_ps(p_input);
929 // xmm1 = _mm_load_ps(p_taps);
930 
931 // p_input += 4;
932 // p_taps += 4;
933 
934 // xmm2 = _mm_load_ps(p_input);
935 // xmm3 = _mm_load_ps(p_taps);
936 
937 // p_input += 4;
938 // p_taps += 4;
939 
940 // xmm4 = _mm_unpackhi_ps(xmm0, xmm2);
941 // xmm5 = _mm_unpackhi_ps(xmm1, xmm3);
942 // xmm0 = _mm_unpacklo_ps(xmm0, xmm2);
943 // xmm2 = _mm_unpacklo_ps(xmm1, xmm3);
944 
945 // // imaginary vector from input
946 // xmm1 = _mm_unpackhi_ps(xmm0, xmm4);
947 // // real vector from input
948 // xmm3 = _mm_unpacklo_ps(xmm0, xmm4);
949 // // imaginary vector from taps
950 // xmm0 = _mm_unpackhi_ps(xmm2, xmm5);
951 // // real vector from taps
952 // xmm2 = _mm_unpacklo_ps(xmm2, xmm5);
953 
954 // xmm4 = _mm_dp_ps(xmm3, xmm2, 0xf1);
955 // xmm5 = _mm_dp_ps(xmm1, xmm0, 0xf1);
956 
957 // xmm6 = _mm_dp_ps(xmm3, xmm0, 0xf2);
958 // xmm7 = _mm_dp_ps(xmm1, xmm2, 0xf2);
959 
960 // real0 = _mm_add_ps(xmm4, real0);
961 // real1 = _mm_add_ps(xmm5, real1);
962 // im0 = _mm_add_ps(xmm6, im0);
963 // im1 = _mm_add_ps(xmm7, im1);
964 // }
965 
966 // real1 = _mm_xor_ps(real1, bit128_p(&neg)->float_vec);
967 
968 // im0 = _mm_add_ps(im0, im1);
969 // real0 = _mm_add_ps(real0, real1);
970 
971 // im0 = _mm_add_ps(im0, real0);
972 
973 // _mm_storel_pi(p_result, im0);
974 
975 // for (i = num_points - isodd; i < num_points; i++) {
976 // *result += input[i] * taps[i];
977 // }
978 // }
979 
980 // #endif /*LV_HAVE_SSE4_1*/
981 
982 #ifdef LV_HAVE_NEON
983 #include <arm_neon.h>
984 
985 static inline void volk_32fc_x2_dot_prod_32fc_neon(lv_32fc_t* result,
986  const lv_32fc_t* input,
987  const lv_32fc_t* taps,
988  unsigned int num_points)
989 {
990 
991  unsigned int quarter_points = num_points / 4;
992  unsigned int number;
993 
994  lv_32fc_t* a_ptr = (lv_32fc_t*)taps;
995  lv_32fc_t* b_ptr = (lv_32fc_t*)input;
996  // for 2-lane vectors, 1st lane holds the real part,
997  // 2nd lane holds the imaginary part
998  float32x4x2_t a_val, b_val, c_val, accumulator;
999  float32x4x2_t tmp_real, tmp_imag;
1000  accumulator.val[0] = vdupq_n_f32(0);
1001  accumulator.val[1] = vdupq_n_f32(0);
1002 
1003  for (number = 0; number < quarter_points; ++number) {
1004  a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
1005  b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
1006  __VOLK_PREFETCH(a_ptr + 8);
1007  __VOLK_PREFETCH(b_ptr + 8);
1008 
1009  // multiply the real*real and imag*imag to get real result
1010  // a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r
1011  tmp_real.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
1012  // a0i*b0i|a1i*b1i|a2i*b2i|a3i*b3i
1013  tmp_real.val[1] = vmulq_f32(a_val.val[1], b_val.val[1]);
1014 
1015  // Multiply cross terms to get the imaginary result
1016  // a0r*b0i|a1r*b1i|a2r*b2i|a3r*b3i
1017  tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[1]);
1018  // a0i*b0r|a1i*b1r|a2i*b2r|a3i*b3r
1019  tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
1020 
1021  c_val.val[0] = vsubq_f32(tmp_real.val[0], tmp_real.val[1]);
1022  c_val.val[1] = vaddq_f32(tmp_imag.val[0], tmp_imag.val[1]);
1023 
1024  accumulator.val[0] = vaddq_f32(accumulator.val[0], c_val.val[0]);
1025  accumulator.val[1] = vaddq_f32(accumulator.val[1], c_val.val[1]);
1026 
1027  a_ptr += 4;
1028  b_ptr += 4;
1029  }
1030  lv_32fc_t accum_result[4];
1031  vst2q_f32((float*)accum_result, accumulator);
1032  *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1033 
1034  // tail case
1035  for (number = quarter_points * 4; number < num_points; ++number) {
1036  *result += (*a_ptr++) * (*b_ptr++);
1037  }
1038 }
1039 #endif /*LV_HAVE_NEON*/
1040 
1041 #ifdef LV_HAVE_NEON
1042 #include <arm_neon.h>
1044  const lv_32fc_t* input,
1045  const lv_32fc_t* taps,
1046  unsigned int num_points)
1047 {
1048 
1049  unsigned int quarter_points = num_points / 4;
1050  unsigned int number;
1051 
1052  lv_32fc_t* a_ptr = (lv_32fc_t*)taps;
1053  lv_32fc_t* b_ptr = (lv_32fc_t*)input;
1054  // for 2-lane vectors, 1st lane holds the real part,
1055  // 2nd lane holds the imaginary part
1056  float32x4x2_t a_val, b_val, accumulator;
1057  float32x4x2_t tmp_imag;
1058  accumulator.val[0] = vdupq_n_f32(0);
1059  accumulator.val[1] = vdupq_n_f32(0);
1060 
1061  for (number = 0; number < quarter_points; ++number) {
1062  a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
1063  b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
1064  __VOLK_PREFETCH(a_ptr + 8);
1065  __VOLK_PREFETCH(b_ptr + 8);
1066 
1067  // do the first multiply
1068  tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
1069  tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
1070 
1071  // use multiply accumulate/subtract to get result
1072  tmp_imag.val[1] = vmlaq_f32(tmp_imag.val[1], a_val.val[0], b_val.val[1]);
1073  tmp_imag.val[0] = vmlsq_f32(tmp_imag.val[0], a_val.val[1], b_val.val[1]);
1074 
1075  accumulator.val[0] = vaddq_f32(accumulator.val[0], tmp_imag.val[0]);
1076  accumulator.val[1] = vaddq_f32(accumulator.val[1], tmp_imag.val[1]);
1077 
1078  // increment pointers
1079  a_ptr += 4;
1080  b_ptr += 4;
1081  }
1082  lv_32fc_t accum_result[4];
1083  vst2q_f32((float*)accum_result, accumulator);
1084  *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1085 
1086  // tail case
1087  for (number = quarter_points * 4; number < num_points; ++number) {
1088  *result += (*a_ptr++) * (*b_ptr++);
1089  }
1090 }
1091 #endif /*LV_HAVE_NEON*/
1092 
1093 #ifdef LV_HAVE_NEON
1095  const lv_32fc_t* input,
1096  const lv_32fc_t* taps,
1097  unsigned int num_points)
1098 {
1099 
1100  unsigned int quarter_points = num_points / 4;
1101  unsigned int number;
1102 
1103  lv_32fc_t* a_ptr = (lv_32fc_t*)taps;
1104  lv_32fc_t* b_ptr = (lv_32fc_t*)input;
1105  // for 2-lane vectors, 1st lane holds the real part,
1106  // 2nd lane holds the imaginary part
1107  float32x4x2_t a_val, b_val, accumulator1, accumulator2;
1108  accumulator1.val[0] = vdupq_n_f32(0);
1109  accumulator1.val[1] = vdupq_n_f32(0);
1110  accumulator2.val[0] = vdupq_n_f32(0);
1111  accumulator2.val[1] = vdupq_n_f32(0);
1112 
1113  for (number = 0; number < quarter_points; ++number) {
1114  a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
1115  b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
1116  __VOLK_PREFETCH(a_ptr + 8);
1117  __VOLK_PREFETCH(b_ptr + 8);
1118 
1119  // use 2 accumulators to remove inter-instruction data dependencies
1120  accumulator1.val[0] = vmlaq_f32(accumulator1.val[0], a_val.val[0], b_val.val[0]);
1121  accumulator1.val[1] = vmlaq_f32(accumulator1.val[1], a_val.val[0], b_val.val[1]);
1122  accumulator2.val[0] = vmlsq_f32(accumulator2.val[0], a_val.val[1], b_val.val[1]);
1123  accumulator2.val[1] = vmlaq_f32(accumulator2.val[1], a_val.val[1], b_val.val[0]);
1124  // increment pointers
1125  a_ptr += 4;
1126  b_ptr += 4;
1127  }
1128  accumulator1.val[0] = vaddq_f32(accumulator1.val[0], accumulator2.val[0]);
1129  accumulator1.val[1] = vaddq_f32(accumulator1.val[1], accumulator2.val[1]);
1130  lv_32fc_t accum_result[4];
1131  vst2q_f32((float*)accum_result, accumulator1);
1132  *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1133 
1134  // tail case
1135  for (number = quarter_points * 4; number < num_points; ++number) {
1136  *result += (*a_ptr++) * (*b_ptr++);
1137  }
1138 }
1139 #endif /*LV_HAVE_NEON*/
1140 
1141 #ifdef LV_HAVE_NEON
1143  const lv_32fc_t* input,
1144  const lv_32fc_t* taps,
1145  unsigned int num_points)
1146 {
1147  // NOTE: GCC does a poor job with this kernel, but the equivalent ASM code is very
1148  // fast
1149 
1150  unsigned int quarter_points = num_points / 8;
1151  unsigned int number;
1152 
1153  lv_32fc_t* a_ptr = (lv_32fc_t*)taps;
1154  lv_32fc_t* b_ptr = (lv_32fc_t*)input;
1155  // for 2-lane vectors, 1st lane holds the real part,
1156  // 2nd lane holds the imaginary part
1157  float32x4x4_t a_val, b_val, accumulator1, accumulator2;
1158  float32x4x2_t reduced_accumulator;
1159  accumulator1.val[0] = vdupq_n_f32(0);
1160  accumulator1.val[1] = vdupq_n_f32(0);
1161  accumulator1.val[2] = vdupq_n_f32(0);
1162  accumulator1.val[3] = vdupq_n_f32(0);
1163  accumulator2.val[0] = vdupq_n_f32(0);
1164  accumulator2.val[1] = vdupq_n_f32(0);
1165  accumulator2.val[2] = vdupq_n_f32(0);
1166  accumulator2.val[3] = vdupq_n_f32(0);
1167 
1168  // 8 input regs, 8 accumulators -> 16/16 neon regs are used
1169  for (number = 0; number < quarter_points; ++number) {
1170  a_val = vld4q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
1171  b_val = vld4q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
1172  __VOLK_PREFETCH(a_ptr + 8);
1173  __VOLK_PREFETCH(b_ptr + 8);
1174 
1175  // use 2 accumulators to remove inter-instruction data dependencies
1176  accumulator1.val[0] = vmlaq_f32(accumulator1.val[0], a_val.val[0], b_val.val[0]);
1177  accumulator1.val[1] = vmlaq_f32(accumulator1.val[1], a_val.val[0], b_val.val[1]);
1178 
1179  accumulator1.val[2] = vmlaq_f32(accumulator1.val[2], a_val.val[2], b_val.val[2]);
1180  accumulator1.val[3] = vmlaq_f32(accumulator1.val[3], a_val.val[2], b_val.val[3]);
1181 
1182  accumulator2.val[0] = vmlsq_f32(accumulator2.val[0], a_val.val[1], b_val.val[1]);
1183  accumulator2.val[1] = vmlaq_f32(accumulator2.val[1], a_val.val[1], b_val.val[0]);
1184 
1185  accumulator2.val[2] = vmlsq_f32(accumulator2.val[2], a_val.val[3], b_val.val[3]);
1186  accumulator2.val[3] = vmlaq_f32(accumulator2.val[3], a_val.val[3], b_val.val[2]);
1187  // increment pointers
1188  a_ptr += 8;
1189  b_ptr += 8;
1190  }
1191  // reduce 8 accumulator lanes down to 2 (1 real and 1 imag)
1192  accumulator1.val[0] = vaddq_f32(accumulator1.val[0], accumulator1.val[2]);
1193  accumulator1.val[1] = vaddq_f32(accumulator1.val[1], accumulator1.val[3]);
1194  accumulator2.val[0] = vaddq_f32(accumulator2.val[0], accumulator2.val[2]);
1195  accumulator2.val[1] = vaddq_f32(accumulator2.val[1], accumulator2.val[3]);
1196  reduced_accumulator.val[0] = vaddq_f32(accumulator1.val[0], accumulator2.val[0]);
1197  reduced_accumulator.val[1] = vaddq_f32(accumulator1.val[1], accumulator2.val[1]);
1198  // now reduce accumulators to scalars
1199  lv_32fc_t accum_result[4];
1200  vst2q_f32((float*)accum_result, reduced_accumulator);
1201  *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1202 
1203  // tail case
1204  for (number = quarter_points * 8; number < num_points; ++number) {
1205  *result += (*a_ptr++) * (*b_ptr++);
1206  }
1207 }
1208 #endif /*LV_HAVE_NEON*/
1209 
1210 
1211 #ifdef LV_HAVE_AVX
1212 
1213 #include <immintrin.h>
1214 
1215 static inline void volk_32fc_x2_dot_prod_32fc_a_avx(lv_32fc_t* result,
1216  const lv_32fc_t* input,
1217  const lv_32fc_t* taps,
1218  unsigned int num_points)
1219 {
1220 
1221  unsigned int isodd = num_points & 3;
1222  unsigned int i = 0;
1223  lv_32fc_t dotProduct;
1224  memset(&dotProduct, 0x0, 2 * sizeof(float));
1225 
1226  unsigned int number = 0;
1227  const unsigned int quarterPoints = num_points / 4;
1228 
1229  __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
1230 
1231  const lv_32fc_t* a = input;
1232  const lv_32fc_t* b = taps;
1233 
1234  dotProdVal = _mm256_setzero_ps();
1235 
1236  for (; number < quarterPoints; number++) {
1237 
1238  x = _mm256_load_ps((float*)a); // Load a,b,e,f as ar,ai,br,bi,er,ei,fr,fi
1239  y = _mm256_load_ps((float*)b); // Load c,d,g,h as cr,ci,dr,di,gr,gi,hr,hi
1240 
1241  yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr,gr,gr,hr,hr
1242  yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di,gi,gi,hi,hi
1243 
1244  tmp1 = _mm256_mul_ps(x, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr ...
1245 
1246  x = _mm256_shuffle_ps(x, x, 0xB1); // Re-arrange x to be ai,ar,bi,br,ei,er,fi,fr
1247 
1248  tmp2 = _mm256_mul_ps(x, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di ...
1249 
1250  z = _mm256_addsub_ps(tmp1,
1251  tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
1252 
1253  dotProdVal = _mm256_add_ps(dotProdVal,
1254  z); // Add the complex multiplication results together
1255 
1256  a += 4;
1257  b += 4;
1258  }
1259 
1260  __VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
1261 
1262  _mm256_store_ps((float*)dotProductVector,
1263  dotProdVal); // Store the results back into the dot product vector
1264 
1265  dotProduct += (dotProductVector[0] + dotProductVector[1] + dotProductVector[2] +
1266  dotProductVector[3]);
1267 
1268  for (i = num_points - isodd; i < num_points; i++) {
1269  dotProduct += input[i] * taps[i];
1270  }
1271 
1272  *result = dotProduct;
1273 }
1274 
1275 #endif /*LV_HAVE_AVX*/
1276 
1277 #if LV_HAVE_AVX && LV_HAVE_FMA
1278 #include <immintrin.h>
1279 
1280 static inline void volk_32fc_x2_dot_prod_32fc_a_avx_fma(lv_32fc_t* result,
1281  const lv_32fc_t* input,
1282  const lv_32fc_t* taps,
1283  unsigned int num_points)
1284 {
1285 
1286  unsigned int isodd = num_points & 3;
1287  unsigned int i = 0;
1288  lv_32fc_t dotProduct;
1289  memset(&dotProduct, 0x0, 2 * sizeof(float));
1290 
1291  unsigned int number = 0;
1292  const unsigned int quarterPoints = num_points / 4;
1293 
1294  __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
1295 
1296  const lv_32fc_t* a = input;
1297  const lv_32fc_t* b = taps;
1298 
1299  dotProdVal = _mm256_setzero_ps();
1300 
1301  for (; number < quarterPoints; number++) {
1302 
1303  x = _mm256_load_ps((float*)a); // Load a,b,e,f as ar,ai,br,bi,er,ei,fr,fi
1304  y = _mm256_load_ps((float*)b); // Load c,d,g,h as cr,ci,dr,di,gr,gi,hr,hi
1305 
1306  yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr,gr,gr,hr,hr
1307  yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di,gi,gi,hi,hi
1308 
1309  tmp1 = x;
1310 
1311  x = _mm256_shuffle_ps(x, x, 0xB1); // Re-arrange x to be ai,ar,bi,br,ei,er,fi,fr
1312 
1313  tmp2 = _mm256_mul_ps(x, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di ...
1314 
1315  z = _mm256_fmaddsub_ps(
1316  tmp1, yl, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
1317 
1318  dotProdVal = _mm256_add_ps(dotProdVal,
1319  z); // Add the complex multiplication results together
1320 
1321  a += 4;
1322  b += 4;
1323  }
1324 
1325  __VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
1326 
1327  _mm256_store_ps((float*)dotProductVector,
1328  dotProdVal); // Store the results back into the dot product vector
1329 
1330  dotProduct += (dotProductVector[0] + dotProductVector[1] + dotProductVector[2] +
1331  dotProductVector[3]);
1332 
1333  for (i = num_points - isodd; i < num_points; i++) {
1334  dotProduct += input[i] * taps[i];
1335  }
1336 
1337  *result = dotProduct;
1338 }
1339 
1340 #endif /*LV_HAVE_AVX && LV_HAVE_FMA*/
1341 
1342 
1343 #endif /*INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H*/
static void volk_32fc_x2_dot_prod_32fc_neon_optfmaunroll(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:1142
static void volk_32fc_x2_dot_prod_32fc_a_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:835
static void volk_32fc_x2_dot_prod_32fc_a_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:1215
static void volk_32fc_x2_dot_prod_32fc_u_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:393
static void volk_32fc_x2_dot_prod_32fc_neon(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:985
static void volk_32fc_x2_dot_prod_32fc_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:70
static void volk_32fc_x2_dot_prod_32fc_u_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:245
static void volk_32fc_x2_dot_prod_32fc_neon_opttests(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:1043
static void volk_32fc_x2_dot_prod_32fc_a_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:533
static void volk_32fc_x2_dot_prod_32fc_neon_optfma(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:1094
#define __VOLK_VOLATILE
Definition: volk_common.h:64
#define __VOLK_PREFETCH(addr)
Definition: volk_common.h:62
#define __VOLK_ASM
Definition: volk_common.h:63
#define __VOLK_ATTR_ALIGNED(x)
Definition: volk_common.h:56
float complex lv_32fc_t
Definition: volk_complex.h:65
for i
Definition: volk_config_fixed.tmpl.h:25