Vector Optimized Library of Kernels  2.0
Architecture-tuned implementations of math kernels
volk_32fc_x2_dot_prod_32fc.h
Go to the documentation of this file.
1 /* -*- c++ -*- */
2 /*
3  * Copyright 2012, 2013, 2014 Free Software Foundation, Inc.
4  *
5  * This file is part of GNU Radio
6  *
7  * GNU Radio is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 3, or (at your option)
10  * any later version.
11  *
12  * GNU Radio is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with GNU Radio; see the file COPYING. If not, write to
19  * the Free Software Foundation, Inc., 51 Franklin Street,
20  * Boston, MA 02110-1301, USA.
21  */
22 
58 #ifndef INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H
59 #define INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H
60 
61 #include <volk/volk_common.h>
62 #include <volk/volk_complex.h>
63 #include <stdio.h>
64 #include <string.h>
65 
66 
67 #ifdef LV_HAVE_GENERIC
68 
69 
70 static inline void volk_32fc_x2_dot_prod_32fc_generic(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
71 
72  float * res = (float*) result;
73  float * in = (float*) input;
74  float * tp = (float*) taps;
75  unsigned int n_2_ccomplex_blocks = num_points/2;
76  unsigned int isodd = num_points & 1;
77 
78  float sum0[2] = {0,0};
79  float sum1[2] = {0,0};
80  unsigned int i = 0;
81 
82  for(i = 0; i < n_2_ccomplex_blocks; ++i) {
83  sum0[0] += in[0] * tp[0] - in[1] * tp[1];
84  sum0[1] += in[0] * tp[1] + in[1] * tp[0];
85  sum1[0] += in[2] * tp[2] - in[3] * tp[3];
86  sum1[1] += in[2] * tp[3] + in[3] * tp[2];
87 
88  in += 4;
89  tp += 4;
90  }
91 
92  res[0] = sum0[0] + sum1[0];
93  res[1] = sum0[1] + sum1[1];
94 
95  // Cleanup if we had an odd number of points
96  for(i = 0; i < isodd; ++i) {
97  *result += input[num_points - 1] * taps[num_points - 1];
98  }
99 }
100 
101 #endif /*LV_HAVE_GENERIC*/
102 
103 
104 
105 #if LV_HAVE_SSE && LV_HAVE_64
106 
107 static inline void volk_32fc_x2_dot_prod_32fc_u_sse_64(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
108 
109  const unsigned int num_bytes = num_points*8;
110  unsigned int isodd = num_points & 1;
111 
112  __VOLK_ASM
113  (
114  "# ccomplex_dotprod_generic (float* result, const float *input,\n\t"
115  "# const float *taps, unsigned num_bytes)\n\t"
116  "# float sum0 = 0;\n\t"
117  "# float sum1 = 0;\n\t"
118  "# float sum2 = 0;\n\t"
119  "# float sum3 = 0;\n\t"
120  "# do {\n\t"
121  "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
122  "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
123  "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
124  "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
125  "# input += 4;\n\t"
126  "# taps += 4; \n\t"
127  "# } while (--n_2_ccomplex_blocks != 0);\n\t"
128  "# result[0] = sum0 + sum2;\n\t"
129  "# result[1] = sum1 + sum3;\n\t"
130  "# TODO: prefetch and better scheduling\n\t"
131  " xor %%r9, %%r9\n\t"
132  " xor %%r10, %%r10\n\t"
133  " movq %%rcx, %%rax\n\t"
134  " movq %%rcx, %%r8\n\t"
135  " movq %[rsi], %%r9\n\t"
136  " movq %[rdx], %%r10\n\t"
137  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
138  " movups 0(%%r9), %%xmm0\n\t"
139  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
140  " movups 0(%%r10), %%xmm2\n\t"
141  " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
142  " shr $4, %%r8\n\t"
143  " jmp .%=L1_test\n\t"
144  " # 4 taps / loop\n\t"
145  " # something like ?? cycles / loop\n\t"
146  ".%=Loop1: \n\t"
147  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
148  "# movups (%%r9), %%xmmA\n\t"
149  "# movups (%%r10), %%xmmB\n\t"
150  "# movups %%xmmA, %%xmmZ\n\t"
151  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
152  "# mulps %%xmmB, %%xmmA\n\t"
153  "# mulps %%xmmZ, %%xmmB\n\t"
154  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
155  "# xorps %%xmmPN, %%xmmA\n\t"
156  "# movups %%xmmA, %%xmmZ\n\t"
157  "# unpcklps %%xmmB, %%xmmA\n\t"
158  "# unpckhps %%xmmB, %%xmmZ\n\t"
159  "# movups %%xmmZ, %%xmmY\n\t"
160  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
161  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
162  "# addps %%xmmZ, %%xmmA\n\t"
163  "# addps %%xmmA, %%xmmC\n\t"
164  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
165  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
166  " movups 16(%%r9), %%xmm1\n\t"
167  " movups %%xmm0, %%xmm4\n\t"
168  " mulps %%xmm2, %%xmm0\n\t"
169  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
170  " movups 16(%%r10), %%xmm3\n\t"
171  " movups %%xmm1, %%xmm5\n\t"
172  " addps %%xmm0, %%xmm6\n\t"
173  " mulps %%xmm3, %%xmm1\n\t"
174  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
175  " addps %%xmm1, %%xmm6\n\t"
176  " mulps %%xmm4, %%xmm2\n\t"
177  " movups 32(%%r9), %%xmm0\n\t"
178  " addps %%xmm2, %%xmm7\n\t"
179  " mulps %%xmm5, %%xmm3\n\t"
180  " add $32, %%r9\n\t"
181  " movups 32(%%r10), %%xmm2\n\t"
182  " addps %%xmm3, %%xmm7\n\t"
183  " add $32, %%r10\n\t"
184  ".%=L1_test:\n\t"
185  " dec %%rax\n\t"
186  " jge .%=Loop1\n\t"
187  " # We've handled the bulk of multiplies up to here.\n\t"
188  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
189  " # If so, we've got 2 more taps to do.\n\t"
190  " and $1, %%r8\n\t"
191  " je .%=Leven\n\t"
192  " # The count was odd, do 2 more taps.\n\t"
193  " # Note that we've already got mm0/mm2 preloaded\n\t"
194  " # from the main loop.\n\t"
195  " movups %%xmm0, %%xmm4\n\t"
196  " mulps %%xmm2, %%xmm0\n\t"
197  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
198  " addps %%xmm0, %%xmm6\n\t"
199  " mulps %%xmm4, %%xmm2\n\t"
200  " addps %%xmm2, %%xmm7\n\t"
201  ".%=Leven:\n\t"
202  " # neg inversor\n\t"
203  " xorps %%xmm1, %%xmm1\n\t"
204  " mov $0x80000000, %%r9\n\t"
205  " movd %%r9, %%xmm1\n\t"
206  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
207  " # pfpnacc\n\t"
208  " xorps %%xmm1, %%xmm6\n\t"
209  " movups %%xmm6, %%xmm2\n\t"
210  " unpcklps %%xmm7, %%xmm6\n\t"
211  " unpckhps %%xmm7, %%xmm2\n\t"
212  " movups %%xmm2, %%xmm3\n\t"
213  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
214  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
215  " addps %%xmm2, %%xmm6\n\t"
216  " # xmm6 = r1 i2 r3 i4\n\t"
217  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
218  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
219  " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) to memory\n\t"
220  :
221  :[rsi] "r" (input), [rdx] "r" (taps), "c" (num_bytes), [rdi] "r" (result)
222  :"rax", "r8", "r9", "r10"
223  );
224 
225 
226  if(isodd) {
227  *result += input[num_points - 1] * taps[num_points - 1];
228  }
229 
230  return;
231 
232 }
233 
234 #endif /* LV_HAVE_SSE && LV_HAVE_64 */
235 
236 
237 
238 
239 #ifdef LV_HAVE_SSE3
240 
241 #include <pmmintrin.h>
242 
243 static inline void volk_32fc_x2_dot_prod_32fc_u_sse3(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
244 
245  lv_32fc_t dotProduct;
246  memset(&dotProduct, 0x0, 2*sizeof(float));
247 
248  unsigned int number = 0;
249  const unsigned int halfPoints = num_points/2;
250  unsigned int isodd = num_points & 1;
251 
252  __m128 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
253 
254  const lv_32fc_t* a = input;
255  const lv_32fc_t* b = taps;
256 
257  dotProdVal = _mm_setzero_ps();
258 
259  for(;number < halfPoints; number++){
260 
261  x = _mm_loadu_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
262  y = _mm_loadu_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
263 
264  yl = _mm_moveldup_ps(y); // Load yl with cr,cr,dr,dr
265  yh = _mm_movehdup_ps(y); // Load yh with ci,ci,di,di
266 
267  tmp1 = _mm_mul_ps(x,yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
268 
269  x = _mm_shuffle_ps(x,x,0xB1); // Re-arrange x to be ai,ar,bi,br
270 
271  tmp2 = _mm_mul_ps(x,yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
272 
273  z = _mm_addsub_ps(tmp1,tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
274 
275  dotProdVal = _mm_add_ps(dotProdVal, z); // Add the complex multiplication results together
276 
277  a += 2;
278  b += 2;
279  }
280 
281  __VOLK_ATTR_ALIGNED(16) lv_32fc_t dotProductVector[2];
282 
283  _mm_storeu_ps((float*)dotProductVector,dotProdVal); // Store the results back into the dot product vector
284 
285  dotProduct += ( dotProductVector[0] + dotProductVector[1] );
286 
287  if(isodd) {
288  dotProduct += input[num_points - 1] * taps[num_points - 1];
289  }
290 
291  *result = dotProduct;
292 }
293 
294 #endif /*LV_HAVE_SSE3*/
295 
296 #ifdef LV_HAVE_SSE4_1
297 
298 #include <smmintrin.h>
299 
300 static inline void volk_32fc_x2_dot_prod_32fc_u_sse4_1(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
301 
302  unsigned int i = 0;
303  const unsigned int qtr_points = num_points/4;
304  const unsigned int isodd = num_points & 3;
305 
306  __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, real0, real1, im0, im1;
307  float *p_input, *p_taps;
308  __m64 *p_result;
309 
310  p_result = (__m64*)result;
311  p_input = (float*)input;
312  p_taps = (float*)taps;
313 
314  static const __m128i neg = {0x000000000000000080000000};
315 
316  real0 = _mm_setzero_ps();
317  real1 = _mm_setzero_ps();
318  im0 = _mm_setzero_ps();
319  im1 = _mm_setzero_ps();
320 
321  for(; i < qtr_points; ++i) {
322  xmm0 = _mm_loadu_ps(p_input);
323  xmm1 = _mm_loadu_ps(p_taps);
324 
325  p_input += 4;
326  p_taps += 4;
327 
328  xmm2 = _mm_loadu_ps(p_input);
329  xmm3 = _mm_loadu_ps(p_taps);
330 
331  p_input += 4;
332  p_taps += 4;
333 
334  xmm4 = _mm_unpackhi_ps(xmm0, xmm2);
335  xmm5 = _mm_unpackhi_ps(xmm1, xmm3);
336  xmm0 = _mm_unpacklo_ps(xmm0, xmm2);
337  xmm2 = _mm_unpacklo_ps(xmm1, xmm3);
338 
339  //imaginary vector from input
340  xmm1 = _mm_unpackhi_ps(xmm0, xmm4);
341  //real vector from input
342  xmm3 = _mm_unpacklo_ps(xmm0, xmm4);
343  //imaginary vector from taps
344  xmm0 = _mm_unpackhi_ps(xmm2, xmm5);
345  //real vector from taps
346  xmm2 = _mm_unpacklo_ps(xmm2, xmm5);
347 
348  xmm4 = _mm_dp_ps(xmm3, xmm2, 0xf1);
349  xmm5 = _mm_dp_ps(xmm1, xmm0, 0xf1);
350 
351  xmm6 = _mm_dp_ps(xmm3, xmm0, 0xf2);
352  xmm7 = _mm_dp_ps(xmm1, xmm2, 0xf2);
353 
354  real0 = _mm_add_ps(xmm4, real0);
355  real1 = _mm_add_ps(xmm5, real1);
356  im0 = _mm_add_ps(xmm6, im0);
357  im1 = _mm_add_ps(xmm7, im1);
358  }
359 
360  real1 = _mm_xor_ps(real1, bit128_p(&neg)->float_vec);
361 
362  im0 = _mm_add_ps(im0, im1);
363  real0 = _mm_add_ps(real0, real1);
364 
365  im0 = _mm_add_ps(im0, real0);
366 
367  _mm_storel_pi(p_result, im0);
368 
369  for(i = num_points-isodd; i < num_points; i++) {
370  *result += input[i] * taps[i];
371  }
372 }
373 
374 #endif /*LV_HAVE_SSE4_1*/
375 
376 #ifdef LV_HAVE_AVX
377 
378 #include <immintrin.h>
379 
380 static inline void volk_32fc_x2_dot_prod_32fc_u_avx(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
381 
382  unsigned int isodd = num_points & 3;
383  unsigned int i = 0;
384  lv_32fc_t dotProduct;
385  memset(&dotProduct, 0x0, 2*sizeof(float));
386 
387  unsigned int number = 0;
388  const unsigned int quarterPoints = num_points / 4;
389 
390  __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
391 
392  const lv_32fc_t* a = input;
393  const lv_32fc_t* b = taps;
394 
395  dotProdVal = _mm256_setzero_ps();
396 
397  for(;number < quarterPoints; number++){
398  x = _mm256_loadu_ps((float*)a); // Load a,b,e,f as ar,ai,br,bi,er,ei,fr,fi
399  y = _mm256_loadu_ps((float*)b); // Load c,d,g,h as cr,ci,dr,di,gr,gi,hr,hi
400 
401  yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr,gr,gr,hr,hr
402  yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di,gi,gi,hi,hi
403 
404  tmp1 = _mm256_mul_ps(x,yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr ...
405 
406  x = _mm256_shuffle_ps(x,x,0xB1); // Re-arrange x to be ai,ar,bi,br,ei,er,fi,fr
407 
408  tmp2 = _mm256_mul_ps(x,yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di ...
409 
410  z = _mm256_addsub_ps(tmp1,tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
411 
412  dotProdVal = _mm256_add_ps(dotProdVal, z); // Add the complex multiplication results together
413 
414  a += 4;
415  b += 4;
416  }
417 
418  __VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
419 
420  _mm256_storeu_ps((float*)dotProductVector,dotProdVal); // Store the results back into the dot product vector
421 
422  dotProduct += ( dotProductVector[0] + dotProductVector[1] + dotProductVector[2] + dotProductVector[3]);
423 
424  for(i = num_points-isodd; i < num_points; i++) {
425  dotProduct += input[i] * taps[i];
426  }
427 
428  *result = dotProduct;
429 }
430 
431 #endif /*LV_HAVE_AVX*/
432 
433 #if LV_HAVE_AVX && LV_HAVE_FMA
434 #include <immintrin.h>
435 
436 static inline void volk_32fc_x2_dot_prod_32fc_u_avx_fma(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
437 
438  unsigned int isodd = num_points & 3;
439  unsigned int i = 0;
440  lv_32fc_t dotProduct;
441  memset(&dotProduct, 0x0, 2*sizeof(float));
442 
443  unsigned int number = 0;
444  const unsigned int quarterPoints = num_points / 4;
445 
446  __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
447 
448  const lv_32fc_t* a = input;
449  const lv_32fc_t* b = taps;
450 
451  dotProdVal = _mm256_setzero_ps();
452 
453  for(;number < quarterPoints; number++){
454 
455  x = _mm256_loadu_ps((float*)a); // Load a,b,e,f as ar,ai,br,bi,er,ei,fr,fi
456  y = _mm256_loadu_ps((float*)b); // Load c,d,g,h as cr,ci,dr,di,gr,gi,hr,hi
457 
458  yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr,gr,gr,hr,hr
459  yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di,gi,gi,hi,hi
460 
461  tmp1 = x;
462 
463  x = _mm256_shuffle_ps(x,x,0xB1); // Re-arrange x to be ai,ar,bi,br,ei,er,fi,fr
464 
465  tmp2 = _mm256_mul_ps(x,yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di ...
466 
467  z = _mm256_fmaddsub_ps(tmp1, yl,tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
468 
469  dotProdVal = _mm256_add_ps(dotProdVal, z); // Add the complex multiplication results together
470 
471  a += 4;
472  b += 4;
473  }
474 
475  __VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
476 
477  _mm256_storeu_ps((float*)dotProductVector,dotProdVal); // Store the results back into the dot product vector
478 
479  dotProduct += ( dotProductVector[0] + dotProductVector[1] + dotProductVector[2] + dotProductVector[3]);
480 
481  for(i = num_points-isodd; i < num_points; i++) {
482  dotProduct += input[i] * taps[i];
483  }
484 
485  *result = dotProduct;
486 }
487 
488 #endif /*LV_HAVE_AVX && LV_HAVE_FMA*/
489 
490 #endif /*INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H*/
491 
492 #ifndef INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H
493 #define INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H
494 
495 #include <volk/volk_common.h>
496 #include <volk/volk_complex.h>
497 #include <stdio.h>
498 #include <string.h>
499 
500 
501 #ifdef LV_HAVE_GENERIC
502 
503 
504 static inline void volk_32fc_x2_dot_prod_32fc_a_generic(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
505 
506  const unsigned int num_bytes = num_points*8;
507 
508  float * res = (float*) result;
509  float * in = (float*) input;
510  float * tp = (float*) taps;
511  unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
512  unsigned int isodd = num_points & 1;
513 
514  float sum0[2] = {0,0};
515  float sum1[2] = {0,0};
516  unsigned int i = 0;
517 
518  for(i = 0; i < n_2_ccomplex_blocks; ++i) {
519  sum0[0] += in[0] * tp[0] - in[1] * tp[1];
520  sum0[1] += in[0] * tp[1] + in[1] * tp[0];
521  sum1[0] += in[2] * tp[2] - in[3] * tp[3];
522  sum1[1] += in[2] * tp[3] + in[3] * tp[2];
523 
524  in += 4;
525  tp += 4;
526  }
527 
528  res[0] = sum0[0] + sum1[0];
529  res[1] = sum0[1] + sum1[1];
530 
531  for(i = 0; i < isodd; ++i) {
532  *result += input[num_points - 1] * taps[num_points - 1];
533  }
534 }
535 
536 #endif /*LV_HAVE_GENERIC*/
537 
538 
539 #if LV_HAVE_SSE && LV_HAVE_64
540 
541 
542 static inline void volk_32fc_x2_dot_prod_32fc_a_sse_64(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
543 
544  const unsigned int num_bytes = num_points*8;
545  unsigned int isodd = num_points & 1;
546 
547  __VOLK_ASM
548  (
549  "# ccomplex_dotprod_generic (float* result, const float *input,\n\t"
550  "# const float *taps, unsigned num_bytes)\n\t"
551  "# float sum0 = 0;\n\t"
552  "# float sum1 = 0;\n\t"
553  "# float sum2 = 0;\n\t"
554  "# float sum3 = 0;\n\t"
555  "# do {\n\t"
556  "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
557  "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
558  "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
559  "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
560  "# input += 4;\n\t"
561  "# taps += 4; \n\t"
562  "# } while (--n_2_ccomplex_blocks != 0);\n\t"
563  "# result[0] = sum0 + sum2;\n\t"
564  "# result[1] = sum1 + sum3;\n\t"
565  "# TODO: prefetch and better scheduling\n\t"
566  " xor %%r9, %%r9\n\t"
567  " xor %%r10, %%r10\n\t"
568  " movq %%rcx, %%rax\n\t"
569  " movq %%rcx, %%r8\n\t"
570  " movq %[rsi], %%r9\n\t"
571  " movq %[rdx], %%r10\n\t"
572  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
573  " movaps 0(%%r9), %%xmm0\n\t"
574  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
575  " movaps 0(%%r10), %%xmm2\n\t"
576  " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
577  " shr $4, %%r8\n\t"
578  " jmp .%=L1_test\n\t"
579  " # 4 taps / loop\n\t"
580  " # something like ?? cycles / loop\n\t"
581  ".%=Loop1: \n\t"
582  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
583  "# movaps (%%r9), %%xmmA\n\t"
584  "# movaps (%%r10), %%xmmB\n\t"
585  "# movaps %%xmmA, %%xmmZ\n\t"
586  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
587  "# mulps %%xmmB, %%xmmA\n\t"
588  "# mulps %%xmmZ, %%xmmB\n\t"
589  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
590  "# xorps %%xmmPN, %%xmmA\n\t"
591  "# movaps %%xmmA, %%xmmZ\n\t"
592  "# unpcklps %%xmmB, %%xmmA\n\t"
593  "# unpckhps %%xmmB, %%xmmZ\n\t"
594  "# movaps %%xmmZ, %%xmmY\n\t"
595  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
596  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
597  "# addps %%xmmZ, %%xmmA\n\t"
598  "# addps %%xmmA, %%xmmC\n\t"
599  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
600  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
601  " movaps 16(%%r9), %%xmm1\n\t"
602  " movaps %%xmm0, %%xmm4\n\t"
603  " mulps %%xmm2, %%xmm0\n\t"
604  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
605  " movaps 16(%%r10), %%xmm3\n\t"
606  " movaps %%xmm1, %%xmm5\n\t"
607  " addps %%xmm0, %%xmm6\n\t"
608  " mulps %%xmm3, %%xmm1\n\t"
609  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
610  " addps %%xmm1, %%xmm6\n\t"
611  " mulps %%xmm4, %%xmm2\n\t"
612  " movaps 32(%%r9), %%xmm0\n\t"
613  " addps %%xmm2, %%xmm7\n\t"
614  " mulps %%xmm5, %%xmm3\n\t"
615  " add $32, %%r9\n\t"
616  " movaps 32(%%r10), %%xmm2\n\t"
617  " addps %%xmm3, %%xmm7\n\t"
618  " add $32, %%r10\n\t"
619  ".%=L1_test:\n\t"
620  " dec %%rax\n\t"
621  " jge .%=Loop1\n\t"
622  " # We've handled the bulk of multiplies up to here.\n\t"
623  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
624  " # If so, we've got 2 more taps to do.\n\t"
625  " and $1, %%r8\n\t"
626  " je .%=Leven\n\t"
627  " # The count was odd, do 2 more taps.\n\t"
628  " # Note that we've already got mm0/mm2 preloaded\n\t"
629  " # from the main loop.\n\t"
630  " movaps %%xmm0, %%xmm4\n\t"
631  " mulps %%xmm2, %%xmm0\n\t"
632  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
633  " addps %%xmm0, %%xmm6\n\t"
634  " mulps %%xmm4, %%xmm2\n\t"
635  " addps %%xmm2, %%xmm7\n\t"
636  ".%=Leven:\n\t"
637  " # neg inversor\n\t"
638  " xorps %%xmm1, %%xmm1\n\t"
639  " mov $0x80000000, %%r9\n\t"
640  " movd %%r9, %%xmm1\n\t"
641  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
642  " # pfpnacc\n\t"
643  " xorps %%xmm1, %%xmm6\n\t"
644  " movaps %%xmm6, %%xmm2\n\t"
645  " unpcklps %%xmm7, %%xmm6\n\t"
646  " unpckhps %%xmm7, %%xmm2\n\t"
647  " movaps %%xmm2, %%xmm3\n\t"
648  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
649  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
650  " addps %%xmm2, %%xmm6\n\t"
651  " # xmm6 = r1 i2 r3 i4\n\t"
652  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
653  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
654  " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) to memory\n\t"
655  :
656  :[rsi] "r" (input), [rdx] "r" (taps), "c" (num_bytes), [rdi] "r" (result)
657  :"rax", "r8", "r9", "r10"
658  );
659 
660 
661  if(isodd) {
662  *result += input[num_points - 1] * taps[num_points - 1];
663  }
664 
665  return;
666 
667 }
668 
669 #endif
670 
671 #if LV_HAVE_SSE && LV_HAVE_32
672 
673 static inline void volk_32fc_x2_dot_prod_32fc_a_sse_32(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
674 
675  volk_32fc_x2_dot_prod_32fc_a_generic(result, input, taps, num_points);
676 
677 #if 0
678  const unsigned int num_bytes = num_points*8;
679  unsigned int isodd = num_points & 1;
680 
682  (
683  " #pushl %%ebp\n\t"
684  " #movl %%esp, %%ebp\n\t"
685  " movl 12(%%ebp), %%eax # input\n\t"
686  " movl 16(%%ebp), %%edx # taps\n\t"
687  " movl 20(%%ebp), %%ecx # n_bytes\n\t"
688  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
689  " movaps 0(%%eax), %%xmm0\n\t"
690  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
691  " movaps 0(%%edx), %%xmm2\n\t"
692  " shrl $5, %%ecx # ecx = n_2_ccomplex_blocks / 2\n\t"
693  " jmp .%=L1_test\n\t"
694  " # 4 taps / loop\n\t"
695  " # something like ?? cycles / loop\n\t"
696  ".%=Loop1: \n\t"
697  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
698  "# movaps (%%eax), %%xmmA\n\t"
699  "# movaps (%%edx), %%xmmB\n\t"
700  "# movaps %%xmmA, %%xmmZ\n\t"
701  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
702  "# mulps %%xmmB, %%xmmA\n\t"
703  "# mulps %%xmmZ, %%xmmB\n\t"
704  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
705  "# xorps %%xmmPN, %%xmmA\n\t"
706  "# movaps %%xmmA, %%xmmZ\n\t"
707  "# unpcklps %%xmmB, %%xmmA\n\t"
708  "# unpckhps %%xmmB, %%xmmZ\n\t"
709  "# movaps %%xmmZ, %%xmmY\n\t"
710  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
711  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
712  "# addps %%xmmZ, %%xmmA\n\t"
713  "# addps %%xmmA, %%xmmC\n\t"
714  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
715  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
716  " movaps 16(%%eax), %%xmm1\n\t"
717  " movaps %%xmm0, %%xmm4\n\t"
718  " mulps %%xmm2, %%xmm0\n\t"
719  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
720  " movaps 16(%%edx), %%xmm3\n\t"
721  " movaps %%xmm1, %%xmm5\n\t"
722  " addps %%xmm0, %%xmm6\n\t"
723  " mulps %%xmm3, %%xmm1\n\t"
724  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
725  " addps %%xmm1, %%xmm6\n\t"
726  " mulps %%xmm4, %%xmm2\n\t"
727  " movaps 32(%%eax), %%xmm0\n\t"
728  " addps %%xmm2, %%xmm7\n\t"
729  " mulps %%xmm5, %%xmm3\n\t"
730  " addl $32, %%eax\n\t"
731  " movaps 32(%%edx), %%xmm2\n\t"
732  " addps %%xmm3, %%xmm7\n\t"
733  " addl $32, %%edx\n\t"
734  ".%=L1_test:\n\t"
735  " decl %%ecx\n\t"
736  " jge .%=Loop1\n\t"
737  " # We've handled the bulk of multiplies up to here.\n\t"
738  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
739  " # If so, we've got 2 more taps to do.\n\t"
740  " movl 20(%%ebp), %%ecx # n_2_ccomplex_blocks\n\t"
741  " shrl $4, %%ecx\n\t"
742  " andl $1, %%ecx\n\t"
743  " je .%=Leven\n\t"
744  " # The count was odd, do 2 more taps.\n\t"
745  " # Note that we've already got mm0/mm2 preloaded\n\t"
746  " # from the main loop.\n\t"
747  " movaps %%xmm0, %%xmm4\n\t"
748  " mulps %%xmm2, %%xmm0\n\t"
749  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
750  " addps %%xmm0, %%xmm6\n\t"
751  " mulps %%xmm4, %%xmm2\n\t"
752  " addps %%xmm2, %%xmm7\n\t"
753  ".%=Leven:\n\t"
754  " # neg inversor\n\t"
755  " movl 8(%%ebp), %%eax \n\t"
756  " xorps %%xmm1, %%xmm1\n\t"
757  " movl $0x80000000, (%%eax)\n\t"
758  " movss (%%eax), %%xmm1\n\t"
759  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
760  " # pfpnacc\n\t"
761  " xorps %%xmm1, %%xmm6\n\t"
762  " movaps %%xmm6, %%xmm2\n\t"
763  " unpcklps %%xmm7, %%xmm6\n\t"
764  " unpckhps %%xmm7, %%xmm2\n\t"
765  " movaps %%xmm2, %%xmm3\n\t"
766  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
767  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
768  " addps %%xmm2, %%xmm6\n\t"
769  " # xmm6 = r1 i2 r3 i4\n\t"
770  " #movl 8(%%ebp), %%eax # @result\n\t"
771  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
772  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
773  " movlps %%xmm6, (%%eax) # store low 2x32 bits (complex) to memory\n\t"
774  " #popl %%ebp\n\t"
775  :
776  :
777  : "eax", "ecx", "edx"
778  );
779 
780 
781  int getem = num_bytes % 16;
782 
783  if(isodd) {
784  *result += (input[num_points - 1] * taps[num_points - 1]);
785  }
786 
787  return;
788 #endif
789 }
790 
791 #endif /*LV_HAVE_SSE*/
792 
793 #ifdef LV_HAVE_SSE3
794 
795 #include <pmmintrin.h>
796 
797 static inline void volk_32fc_x2_dot_prod_32fc_a_sse3(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
798 
799  const unsigned int num_bytes = num_points*8;
800  unsigned int isodd = num_points & 1;
801 
802  lv_32fc_t dotProduct;
803  memset(&dotProduct, 0x0, 2*sizeof(float));
804 
805  unsigned int number = 0;
806  const unsigned int halfPoints = num_bytes >> 4;
807 
808  __m128 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
809 
810  const lv_32fc_t* a = input;
811  const lv_32fc_t* b = taps;
812 
813  dotProdVal = _mm_setzero_ps();
814 
815  for(;number < halfPoints; number++){
816 
817  x = _mm_load_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
818  y = _mm_load_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
819 
820  yl = _mm_moveldup_ps(y); // Load yl with cr,cr,dr,dr
821  yh = _mm_movehdup_ps(y); // Load yh with ci,ci,di,di
822 
823  tmp1 = _mm_mul_ps(x,yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
824 
825  x = _mm_shuffle_ps(x,x,0xB1); // Re-arrange x to be ai,ar,bi,br
826 
827  tmp2 = _mm_mul_ps(x,yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
828 
829  z = _mm_addsub_ps(tmp1,tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
830 
831  dotProdVal = _mm_add_ps(dotProdVal, z); // Add the complex multiplication results together
832 
833  a += 2;
834  b += 2;
835  }
836 
837  __VOLK_ATTR_ALIGNED(16) lv_32fc_t dotProductVector[2];
838 
839  _mm_store_ps((float*)dotProductVector,dotProdVal); // Store the results back into the dot product vector
840 
841  dotProduct += ( dotProductVector[0] + dotProductVector[1] );
842 
843  if(isodd) {
844  dotProduct += input[num_points - 1] * taps[num_points - 1];
845  }
846 
847  *result = dotProduct;
848 }
849 
850 #endif /*LV_HAVE_SSE3*/
851 
852 
853 #ifdef LV_HAVE_SSE4_1
854 
855 #include <smmintrin.h>
856 
857 static inline void volk_32fc_x2_dot_prod_32fc_a_sse4_1(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
858 
859  unsigned int i = 0;
860  const unsigned int qtr_points = num_points/4;
861  const unsigned int isodd = num_points & 3;
862 
863  __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, real0, real1, im0, im1;
864  float *p_input, *p_taps;
865  __m64 *p_result;
866 
867  static const __m128i neg = {0x000000000000000080000000};
868 
869  p_result = (__m64*)result;
870  p_input = (float*)input;
871  p_taps = (float*)taps;
872 
873  real0 = _mm_setzero_ps();
874  real1 = _mm_setzero_ps();
875  im0 = _mm_setzero_ps();
876  im1 = _mm_setzero_ps();
877 
878  for(; i < qtr_points; ++i) {
879  xmm0 = _mm_load_ps(p_input);
880  xmm1 = _mm_load_ps(p_taps);
881 
882  p_input += 4;
883  p_taps += 4;
884 
885  xmm2 = _mm_load_ps(p_input);
886  xmm3 = _mm_load_ps(p_taps);
887 
888  p_input += 4;
889  p_taps += 4;
890 
891  xmm4 = _mm_unpackhi_ps(xmm0, xmm2);
892  xmm5 = _mm_unpackhi_ps(xmm1, xmm3);
893  xmm0 = _mm_unpacklo_ps(xmm0, xmm2);
894  xmm2 = _mm_unpacklo_ps(xmm1, xmm3);
895 
896  //imaginary vector from input
897  xmm1 = _mm_unpackhi_ps(xmm0, xmm4);
898  //real vector from input
899  xmm3 = _mm_unpacklo_ps(xmm0, xmm4);
900  //imaginary vector from taps
901  xmm0 = _mm_unpackhi_ps(xmm2, xmm5);
902  //real vector from taps
903  xmm2 = _mm_unpacklo_ps(xmm2, xmm5);
904 
905  xmm4 = _mm_dp_ps(xmm3, xmm2, 0xf1);
906  xmm5 = _mm_dp_ps(xmm1, xmm0, 0xf1);
907 
908  xmm6 = _mm_dp_ps(xmm3, xmm0, 0xf2);
909  xmm7 = _mm_dp_ps(xmm1, xmm2, 0xf2);
910 
911  real0 = _mm_add_ps(xmm4, real0);
912  real1 = _mm_add_ps(xmm5, real1);
913  im0 = _mm_add_ps(xmm6, im0);
914  im1 = _mm_add_ps(xmm7, im1);
915  }
916 
917  real1 = _mm_xor_ps(real1, bit128_p(&neg)->float_vec);
918 
919  im0 = _mm_add_ps(im0, im1);
920  real0 = _mm_add_ps(real0, real1);
921 
922  im0 = _mm_add_ps(im0, real0);
923 
924  _mm_storel_pi(p_result, im0);
925 
926  for(i = num_points-isodd; i < num_points; i++) {
927  *result += input[i] * taps[i];
928  }
929 }
930 
931 #endif /*LV_HAVE_SSE4_1*/
932 
933 #ifdef LV_HAVE_NEON
934 #include <arm_neon.h>
935 
936 static inline void volk_32fc_x2_dot_prod_32fc_neon(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
937 
938  unsigned int quarter_points = num_points / 4;
939  unsigned int number;
940 
941  lv_32fc_t* a_ptr = (lv_32fc_t*) taps;
942  lv_32fc_t* b_ptr = (lv_32fc_t*) input;
943  // for 2-lane vectors, 1st lane holds the real part,
944  // 2nd lane holds the imaginary part
945  float32x4x2_t a_val, b_val, c_val, accumulator;
946  float32x4x2_t tmp_real, tmp_imag;
947  accumulator.val[0] = vdupq_n_f32(0);
948  accumulator.val[1] = vdupq_n_f32(0);
949 
950  for(number = 0; number < quarter_points; ++number) {
951  a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
952  b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
953  __VOLK_PREFETCH(a_ptr+8);
954  __VOLK_PREFETCH(b_ptr+8);
955 
956  // multiply the real*real and imag*imag to get real result
957  // a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r
958  tmp_real.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
959  // a0i*b0i|a1i*b1i|a2i*b2i|a3i*b3i
960  tmp_real.val[1] = vmulq_f32(a_val.val[1], b_val.val[1]);
961 
962  // Multiply cross terms to get the imaginary result
963  // a0r*b0i|a1r*b1i|a2r*b2i|a3r*b3i
964  tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[1]);
965  // a0i*b0r|a1i*b1r|a2i*b2r|a3i*b3r
966  tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
967 
968  c_val.val[0] = vsubq_f32(tmp_real.val[0], tmp_real.val[1]);
969  c_val.val[1] = vaddq_f32(tmp_imag.val[0], tmp_imag.val[1]);
970 
971  accumulator.val[0] = vaddq_f32(accumulator.val[0], c_val.val[0]);
972  accumulator.val[1] = vaddq_f32(accumulator.val[1], c_val.val[1]);
973 
974  a_ptr += 4;
975  b_ptr += 4;
976  }
977  lv_32fc_t accum_result[4];
978  vst2q_f32((float*)accum_result, accumulator);
979  *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
980 
981  // tail case
982  for(number = quarter_points*4; number < num_points; ++number) {
983  *result += (*a_ptr++) * (*b_ptr++);
984  }
985 
986 }
987 #endif /*LV_HAVE_NEON*/
988 
989 #ifdef LV_HAVE_NEON
990 #include <arm_neon.h>
991 static inline void volk_32fc_x2_dot_prod_32fc_neon_opttests(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
992 
993  unsigned int quarter_points = num_points / 4;
994  unsigned int number;
995 
996  lv_32fc_t* a_ptr = (lv_32fc_t*) taps;
997  lv_32fc_t* b_ptr = (lv_32fc_t*) input;
998  // for 2-lane vectors, 1st lane holds the real part,
999  // 2nd lane holds the imaginary part
1000  float32x4x2_t a_val, b_val, accumulator;
1001  float32x4x2_t tmp_imag;
1002  accumulator.val[0] = vdupq_n_f32(0);
1003  accumulator.val[1] = vdupq_n_f32(0);
1004 
1005  for(number = 0; number < quarter_points; ++number) {
1006  a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
1007  b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
1008  __VOLK_PREFETCH(a_ptr+8);
1009  __VOLK_PREFETCH(b_ptr+8);
1010 
1011  // do the first multiply
1012  tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
1013  tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
1014 
1015  // use multiply accumulate/subtract to get result
1016  tmp_imag.val[1] = vmlaq_f32(tmp_imag.val[1], a_val.val[0], b_val.val[1]);
1017  tmp_imag.val[0] = vmlsq_f32(tmp_imag.val[0], a_val.val[1], b_val.val[1]);
1018 
1019  accumulator.val[0] = vaddq_f32(accumulator.val[0], tmp_imag.val[0]);
1020  accumulator.val[1] = vaddq_f32(accumulator.val[1], tmp_imag.val[1]);
1021 
1022  // increment pointers
1023  a_ptr += 4;
1024  b_ptr += 4;
1025  }
1026  lv_32fc_t accum_result[4];
1027  vst2q_f32((float*)accum_result, accumulator);
1028  *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1029 
1030  // tail case
1031  for(number = quarter_points*4; number < num_points; ++number) {
1032  *result += (*a_ptr++) * (*b_ptr++);
1033  }
1034 
1035 }
1036 #endif /*LV_HAVE_NEON*/
1037 
1038 #ifdef LV_HAVE_NEON
1039 static inline void volk_32fc_x2_dot_prod_32fc_neon_optfma(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
1040 
1041  unsigned int quarter_points = num_points / 4;
1042  unsigned int number;
1043 
1044  lv_32fc_t* a_ptr = (lv_32fc_t*) taps;
1045  lv_32fc_t* b_ptr = (lv_32fc_t*) input;
1046  // for 2-lane vectors, 1st lane holds the real part,
1047  // 2nd lane holds the imaginary part
1048  float32x4x2_t a_val, b_val, accumulator1, accumulator2;
1049  accumulator1.val[0] = vdupq_n_f32(0);
1050  accumulator1.val[1] = vdupq_n_f32(0);
1051  accumulator2.val[0] = vdupq_n_f32(0);
1052  accumulator2.val[1] = vdupq_n_f32(0);
1053 
1054  for(number = 0; number < quarter_points; ++number) {
1055  a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
1056  b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
1057  __VOLK_PREFETCH(a_ptr+8);
1058  __VOLK_PREFETCH(b_ptr+8);
1059 
1060  // use 2 accumulators to remove inter-instruction data dependencies
1061  accumulator1.val[0] = vmlaq_f32(accumulator1.val[0], a_val.val[0], b_val.val[0]);
1062  accumulator1.val[1] = vmlaq_f32(accumulator1.val[1], a_val.val[0], b_val.val[1]);
1063  accumulator2.val[0] = vmlsq_f32(accumulator2.val[0], a_val.val[1], b_val.val[1]);
1064  accumulator2.val[1] = vmlaq_f32(accumulator2.val[1], a_val.val[1], b_val.val[0]);
1065  // increment pointers
1066  a_ptr += 4;
1067  b_ptr += 4;
1068  }
1069  accumulator1.val[0] = vaddq_f32(accumulator1.val[0], accumulator2.val[0]);
1070  accumulator1.val[1] = vaddq_f32(accumulator1.val[1], accumulator2.val[1]);
1071  lv_32fc_t accum_result[4];
1072  vst2q_f32((float*)accum_result, accumulator1);
1073  *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1074 
1075  // tail case
1076  for(number = quarter_points*4; number < num_points; ++number) {
1077  *result += (*a_ptr++) * (*b_ptr++);
1078  }
1079 
1080 }
1081 #endif /*LV_HAVE_NEON*/
1082 
1083 #ifdef LV_HAVE_NEON
1084 static inline void volk_32fc_x2_dot_prod_32fc_neon_optfmaunroll(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
1085 // NOTE: GCC does a poor job with this kernel, but the euivalent ASM code is very fast
1086 
1087  unsigned int quarter_points = num_points / 8;
1088  unsigned int number;
1089 
1090  lv_32fc_t* a_ptr = (lv_32fc_t*) taps;
1091  lv_32fc_t* b_ptr = (lv_32fc_t*) input;
1092  // for 2-lane vectors, 1st lane holds the real part,
1093  // 2nd lane holds the imaginary part
1094  float32x4x4_t a_val, b_val, accumulator1, accumulator2;
1095  float32x4x2_t reduced_accumulator;
1096  accumulator1.val[0] = vdupq_n_f32(0);
1097  accumulator1.val[1] = vdupq_n_f32(0);
1098  accumulator1.val[2] = vdupq_n_f32(0);
1099  accumulator1.val[3] = vdupq_n_f32(0);
1100  accumulator2.val[0] = vdupq_n_f32(0);
1101  accumulator2.val[1] = vdupq_n_f32(0);
1102  accumulator2.val[2] = vdupq_n_f32(0);
1103  accumulator2.val[3] = vdupq_n_f32(0);
1104 
1105  // 8 input regs, 8 accumulators -> 16/16 neon regs are used
1106  for(number = 0; number < quarter_points; ++number) {
1107  a_val = vld4q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
1108  b_val = vld4q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
1109  __VOLK_PREFETCH(a_ptr+8);
1110  __VOLK_PREFETCH(b_ptr+8);
1111 
1112  // use 2 accumulators to remove inter-instruction data dependencies
1113  accumulator1.val[0] = vmlaq_f32(accumulator1.val[0], a_val.val[0], b_val.val[0]);
1114  accumulator1.val[1] = vmlaq_f32(accumulator1.val[1], a_val.val[0], b_val.val[1]);
1115 
1116  accumulator1.val[2] = vmlaq_f32(accumulator1.val[2], a_val.val[2], b_val.val[2]);
1117  accumulator1.val[3] = vmlaq_f32(accumulator1.val[3], a_val.val[2], b_val.val[3]);
1118 
1119  accumulator2.val[0] = vmlsq_f32(accumulator2.val[0], a_val.val[1], b_val.val[1]);
1120  accumulator2.val[1] = vmlaq_f32(accumulator2.val[1], a_val.val[1], b_val.val[0]);
1121 
1122  accumulator2.val[2] = vmlsq_f32(accumulator2.val[2], a_val.val[3], b_val.val[3]);
1123  accumulator2.val[3] = vmlaq_f32(accumulator2.val[3], a_val.val[3], b_val.val[2]);
1124  // increment pointers
1125  a_ptr += 8;
1126  b_ptr += 8;
1127  }
1128  // reduce 8 accumulator lanes down to 2 (1 real and 1 imag)
1129  accumulator1.val[0] = vaddq_f32(accumulator1.val[0], accumulator1.val[2]);
1130  accumulator1.val[1] = vaddq_f32(accumulator1.val[1], accumulator1.val[3]);
1131  accumulator2.val[0] = vaddq_f32(accumulator2.val[0], accumulator2.val[2]);
1132  accumulator2.val[1] = vaddq_f32(accumulator2.val[1], accumulator2.val[3]);
1133  reduced_accumulator.val[0] = vaddq_f32(accumulator1.val[0], accumulator2.val[0]);
1134  reduced_accumulator.val[1] = vaddq_f32(accumulator1.val[1], accumulator2.val[1]);
1135  // now reduce accumulators to scalars
1136  lv_32fc_t accum_result[4];
1137  vst2q_f32((float*)accum_result, reduced_accumulator);
1138  *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1139 
1140  // tail case
1141  for(number = quarter_points*8; number < num_points; ++number) {
1142  *result += (*a_ptr++) * (*b_ptr++);
1143  }
1144 
1145 }
1146 #endif /*LV_HAVE_NEON*/
1147 
1148 
1149 #ifdef LV_HAVE_AVX
1150 
1151 #include <immintrin.h>
1152 
1153 static inline void volk_32fc_x2_dot_prod_32fc_a_avx(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
1154 
1155  unsigned int isodd = num_points & 3;
1156  unsigned int i = 0;
1157  lv_32fc_t dotProduct;
1158  memset(&dotProduct, 0x0, 2*sizeof(float));
1159 
1160  unsigned int number = 0;
1161  const unsigned int quarterPoints = num_points / 4;
1162 
1163  __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
1164 
1165  const lv_32fc_t* a = input;
1166  const lv_32fc_t* b = taps;
1167 
1168  dotProdVal = _mm256_setzero_ps();
1169 
1170  for(;number < quarterPoints; number++){
1171 
1172  x = _mm256_load_ps((float*)a); // Load a,b,e,f as ar,ai,br,bi,er,ei,fr,fi
1173  y = _mm256_load_ps((float*)b); // Load c,d,g,h as cr,ci,dr,di,gr,gi,hr,hi
1174 
1175  yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr,gr,gr,hr,hr
1176  yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di,gi,gi,hi,hi
1177 
1178  tmp1 = _mm256_mul_ps(x,yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr ...
1179 
1180  x = _mm256_shuffle_ps(x,x,0xB1); // Re-arrange x to be ai,ar,bi,br,ei,er,fi,fr
1181 
1182  tmp2 = _mm256_mul_ps(x,yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di ...
1183 
1184  z = _mm256_addsub_ps(tmp1,tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
1185 
1186  dotProdVal = _mm256_add_ps(dotProdVal, z); // Add the complex multiplication results together
1187 
1188  a += 4;
1189  b += 4;
1190  }
1191 
1192  __VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
1193 
1194  _mm256_store_ps((float*)dotProductVector,dotProdVal); // Store the results back into the dot product vector
1195 
1196  dotProduct += ( dotProductVector[0] + dotProductVector[1] + dotProductVector[2] + dotProductVector[3]);
1197 
1198  for(i = num_points-isodd; i < num_points; i++) {
1199  dotProduct += input[i] * taps[i];
1200  }
1201 
1202  *result = dotProduct;
1203 }
1204 
1205 #endif /*LV_HAVE_AVX*/
1206 
1207 #if LV_HAVE_AVX && LV_HAVE_FMA
1208 #include <immintrin.h>
1209 
1210 static inline void volk_32fc_x2_dot_prod_32fc_a_avx_fma(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
1211 
1212  unsigned int isodd = num_points & 3;
1213  unsigned int i = 0;
1214  lv_32fc_t dotProduct;
1215  memset(&dotProduct, 0x0, 2*sizeof(float));
1216 
1217  unsigned int number = 0;
1218  const unsigned int quarterPoints = num_points / 4;
1219 
1220  __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
1221 
1222  const lv_32fc_t* a = input;
1223  const lv_32fc_t* b = taps;
1224 
1225  dotProdVal = _mm256_setzero_ps();
1226 
1227  for(;number < quarterPoints; number++){
1228 
1229  x = _mm256_load_ps((float*)a); // Load a,b,e,f as ar,ai,br,bi,er,ei,fr,fi
1230  y = _mm256_load_ps((float*)b); // Load c,d,g,h as cr,ci,dr,di,gr,gi,hr,hi
1231 
1232  yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr,gr,gr,hr,hr
1233  yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di,gi,gi,hi,hi
1234 
1235  tmp1 = x;
1236 
1237  x = _mm256_shuffle_ps(x,x,0xB1); // Re-arrange x to be ai,ar,bi,br,ei,er,fi,fr
1238 
1239  tmp2 = _mm256_mul_ps(x,yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di ...
1240 
1241  z = _mm256_fmaddsub_ps(tmp1, yl,tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
1242 
1243  dotProdVal = _mm256_add_ps(dotProdVal, z); // Add the complex multiplication results together
1244 
1245  a += 4;
1246  b += 4;
1247  }
1248 
1249  __VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
1250 
1251  _mm256_store_ps((float*)dotProductVector,dotProdVal); // Store the results back into the dot product vector
1252 
1253  dotProduct += ( dotProductVector[0] + dotProductVector[1] + dotProductVector[2] + dotProductVector[3]);
1254 
1255  for(i = num_points-isodd; i < num_points; i++) {
1256  dotProduct += input[i] * taps[i];
1257  }
1258 
1259  *result = dotProduct;
1260 }
1261 
1262 #endif /*LV_HAVE_AVX && LV_HAVE_FMA*/
1263 
1264 
1265 #endif /*INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H*/
static void volk_32fc_x2_dot_prod_32fc_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:70
static void volk_32fc_x2_dot_prod_32fc_u_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:243
#define __VOLK_ASM
Definition: volk_common.h:40
static void volk_32fc_x2_dot_prod_32fc_a_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:797
#define __VOLK_VOLATILE
Definition: volk_common.h:41
#define bit128_p(x)
Definition: volk_common.h:118
static void volk_32fc_x2_dot_prod_32fc_neon_optfma(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:1039
static void volk_32fc_x2_dot_prod_32fc_a_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:504
static void volk_32fc_x2_dot_prod_32fc_neon(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:936
#define __VOLK_PREFETCH(addr)
Definition: volk_common.h:39
for i
Definition: volk_config_fixed.tmpl.h:25
#define __VOLK_ATTR_ALIGNED(x)
Definition: volk_common.h:33
static void volk_32fc_x2_dot_prod_32fc_u_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:380
float complex lv_32fc_t
Definition: volk_complex.h:61
__m256 float_vec
Definition: volk_common.h:112
static void volk_32fc_x2_dot_prod_32fc_neon_optfmaunroll(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:1084
static void volk_32fc_x2_dot_prod_32fc_a_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:1153
static void volk_32fc_x2_dot_prod_32fc_neon_opttests(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:991