Vector Optimized Library of Kernels  2.0
Architecture-tuned implementations of math kernels
volk_32fc_x2_conjugate_dot_prod_32fc.h
Go to the documentation of this file.
1 /* -*- c++ -*- */
2 /*
3  * Copyright 2012, 2014 Free Software Foundation, Inc.
4  *
5  * This file is part of GNU Radio
6  *
7  * GNU Radio is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 3, or (at your option)
10  * any later version.
11  *
12  * GNU Radio is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with GNU Radio; see the file COPYING. If not, write to
19  * the Free Software Foundation, Inc., 51 Franklin Street,
20  * Boston, MA 02110-1301, USA.
21  */
22 
59 #ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H
60 #define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H
61 
62 
63 #include<volk/volk_complex.h>
64 
65 
66 #ifdef LV_HAVE_GENERIC
67 
68 static inline void volk_32fc_x2_conjugate_dot_prod_32fc_generic(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
69 
70  const unsigned int num_bytes = num_points*8;
71 
72  float * res = (float*) result;
73  float * in = (float*) input;
74  float * tp = (float*) taps;
75  unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
76  unsigned int isodd = (num_bytes >> 3) &1;
77 
78  float sum0[2] = {0,0};
79  float sum1[2] = {0,0};
80  unsigned int i = 0;
81 
82  for(i = 0; i < n_2_ccomplex_blocks; ++i) {
83  sum0[0] += in[0] * tp[0] + in[1] * tp[1];
84  sum0[1] += (-in[0] * tp[1]) + in[1] * tp[0];
85  sum1[0] += in[2] * tp[2] + in[3] * tp[3];
86  sum1[1] += (-in[2] * tp[3]) + in[3] * tp[2];
87 
88  in += 4;
89  tp += 4;
90  }
91 
92  res[0] = sum0[0] + sum1[0];
93  res[1] = sum0[1] + sum1[1];
94 
95  for(i = 0; i < isodd; ++i) {
96  *result += input[(num_bytes >> 3) - 1] * lv_conj(taps[(num_bytes >> 3) - 1]);
97  }
98 }
99 
100 #endif /*LV_HAVE_GENERIC*/
101 
102 #ifdef LV_HAVE_AVX
103 
104 #include <immintrin.h>
105 
107  const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points)
108 {
109  // Partial sums for indices i, i+1, i+2 and i+3.
110  __m256 sum_a_mult_b_real = _mm256_setzero_ps();
111  __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
112 
113  for (long unsigned i = 0; i < (num_points & ~3u); i += 4) {
114  /* Four complex elements a time are processed.
115  * (ar + j⋅ai)*conj(br + j⋅bi) =
116  * ar⋅br + ai⋅bi + j⋅(ai⋅br − ar⋅bi)
117  */
118 
119  /* Load input and taps, split and duplicate real und imaginary parts of taps.
120  * a: | ai,i+3 | ar,i+3 | … | ai,i+1 | ar,i+1 | ai,i+0 | ar,i+0 |
121  * b: | bi,i+3 | br,i+3 | … | bi,i+1 | br,i+1 | bi,i+0 | br,i+0 |
122  * b_real: | br,i+3 | br,i+3 | … | br,i+1 | br,i+1 | br,i+0 | br,i+0 |
123  * b_imag: | bi,i+3 | bi,i+3 | … | bi,i+1 | bi,i+1 | bi,i+0 | bi,i+0 |
124  */
125  __m256 a = _mm256_loadu_ps((const float *) &input[i]);
126  __m256 b = _mm256_loadu_ps((const float *) &taps[i]);
127  __m256 b_real = _mm256_moveldup_ps(b);
128  __m256 b_imag = _mm256_movehdup_ps(b);
129 
130  // Add | ai⋅br,i+3 | ar⋅br,i+3 | … | ai⋅br,i+0 | ar⋅br,i+0 | to partial sum.
131  sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
132  // Add | ai⋅bi,i+3 | −ar⋅bi,i+3 | … | ai⋅bi,i+0 | −ar⋅bi,i+0 | to partial sum.
133  sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
134  }
135 
136  // Swap position of −ar⋅bi and ai⋅bi.
137  sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
138  // | ai⋅br + ai⋅bi | ai⋅br − ar⋅bi |, sum contains four such partial sums.
139  __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
140  /* Sum the four partial sums: Add high half of vector sum to the low one, i.e.
141  * s1 + s3 and s0 + s2 …
142  */
143  sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
144  // … and now (s0 + s2) + (s1 + s3)
145  sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
146  // Store result.
147  __m128 lower = _mm256_extractf128_ps(sum, 0);
148  _mm_storel_pi((__m64 *) result, lower);
149 
150  // Handle the last elements if num_points mod 4 is bigger than 0.
151  for (long unsigned i = num_points & ~3u; i < num_points; ++i) {
152  *result += lv_cmake(
153  lv_creal(input[i]) * lv_creal(taps[i]) + lv_cimag(input[i]) * lv_cimag(taps[i]),
154  lv_cimag(input[i]) * lv_creal(taps[i]) - lv_creal(input[i]) * lv_cimag(taps[i]));
155  }
156 }
157 
158 #endif /* LV_HAVE_AVX */
159 
160 #ifdef LV_HAVE_SSE3
161 
162 #include <xmmintrin.h>
163 #include <pmmintrin.h>
164 
166  const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points)
167 {
168  // Partial sums for indices i and i+1.
169  __m128 sum_a_mult_b_real = _mm_setzero_ps();
170  __m128 sum_a_mult_b_imag = _mm_setzero_ps();
171 
172  for (long unsigned i = 0; i < (num_points & ~1u); i += 2) {
173  /* Two complex elements a time are processed.
174  * (ar + j⋅ai)*conj(br + j⋅bi) =
175  * ar⋅br + ai⋅bi + j⋅(ai⋅br − ar⋅bi)
176  */
177 
178  /* Load input and taps, split and duplicate real und imaginary parts of taps.
179  * a: | ai,i+1 | ar,i+1 | ai,i+0 | ar,i+0 |
180  * b: | bi,i+1 | br,i+1 | bi,i+0 | br,i+0 |
181  * b_real: | br,i+1 | br,i+1 | br,i+0 | br,i+0 |
182  * b_imag: | bi,i+1 | bi,i+1 | bi,i+0 | bi,i+0 |
183  */
184  __m128 a = _mm_loadu_ps((const float *) &input[i]);
185  __m128 b = _mm_loadu_ps((const float *) &taps[i]);
186  __m128 b_real = _mm_moveldup_ps(b);
187  __m128 b_imag = _mm_movehdup_ps(b);
188 
189  // Add | ai⋅br,i+1 | ar⋅br,i+1 | ai⋅br,i+0 | ar⋅br,i+0 | to partial sum.
190  sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
191  // Add | ai⋅bi,i+1 | −ar⋅bi,i+1 | ai⋅bi,i+0 | −ar⋅bi,i+0 | to partial sum.
192  sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
193  }
194 
195  // Swap position of −ar⋅bi and ai⋅bi.
196  sum_a_mult_b_imag = _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag,
197  _MM_SHUFFLE(2, 3, 0, 1));
198  // | ai⋅br + ai⋅bi | ai⋅br − ar⋅bi |, sum contains two such partial sums.
199  __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
200  // Sum the two partial sums.
201  sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
202  // Store result.
203  _mm_storel_pi((__m64 *) result, sum);
204 
205  // Handle the last element if num_points mod 2 is 1.
206  if (num_points & 1u) {
207  *result += lv_cmake(
208  lv_creal(input[num_points - 1]) * lv_creal(taps[num_points - 1]) +
209  lv_cimag(input[num_points - 1]) * lv_cimag(taps[num_points - 1]),
210  lv_cimag(input[num_points - 1]) * lv_creal(taps[num_points - 1]) -
211  lv_creal(input[num_points - 1]) * lv_cimag(taps[num_points - 1]));
212  }
213 }
214 
215 #endif /*LV_HAVE_SSE3*/
216 
217 #ifdef LV_HAVE_NEON
218 #include <arm_neon.h>
219 static inline void volk_32fc_x2_conjugate_dot_prod_32fc_neon(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
220 
221  unsigned int quarter_points = num_points / 4;
222  unsigned int number;
223 
224  lv_32fc_t* a_ptr = (lv_32fc_t*) taps;
225  lv_32fc_t* b_ptr = (lv_32fc_t*) input;
226  // for 2-lane vectors, 1st lane holds the real part,
227  // 2nd lane holds the imaginary part
228  float32x4x2_t a_val, b_val, accumulator;
229  float32x4x2_t tmp_imag;
230  accumulator.val[0] = vdupq_n_f32(0);
231  accumulator.val[1] = vdupq_n_f32(0);
232 
233  for(number = 0; number < quarter_points; ++number) {
234  a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
235  b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
236  __VOLK_PREFETCH(a_ptr+8);
237  __VOLK_PREFETCH(b_ptr+8);
238 
239  // do the first multiply
240  tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
241  tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
242 
243  // use multiply accumulate/subtract to get result
244  tmp_imag.val[1] = vmlsq_f32(tmp_imag.val[1], a_val.val[0], b_val.val[1]);
245  tmp_imag.val[0] = vmlaq_f32(tmp_imag.val[0], a_val.val[1], b_val.val[1]);
246 
247  accumulator.val[0] = vaddq_f32(accumulator.val[0], tmp_imag.val[0]);
248  accumulator.val[1] = vaddq_f32(accumulator.val[1], tmp_imag.val[1]);
249 
250  // increment pointers
251  a_ptr += 4;
252  b_ptr += 4;
253  }
254  lv_32fc_t accum_result[4];
255  vst2q_f32((float*)accum_result, accumulator);
256  *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
257 
258  // tail case
259  for(number = quarter_points*4; number < num_points; ++number) {
260  *result += (*a_ptr++) * lv_conj(*b_ptr++);
261  }
262  *result = lv_conj(*result);
263 
264 }
265 #endif /*LV_HAVE_NEON*/
266 
267 #endif /*INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H*/
268 
269 #ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H
270 #define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H
271 
272 #include <volk/volk_common.h>
273 #include<volk/volk_complex.h>
274 #include<stdio.h>
275 
276 
277 #ifdef LV_HAVE_AVX
278 #include <immintrin.h>
279 
281  const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points)
282 {
283  // Partial sums for indices i, i+1, i+2 and i+3.
284  __m256 sum_a_mult_b_real = _mm256_setzero_ps();
285  __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
286 
287  for (long unsigned i = 0; i < (num_points & ~3u); i += 4) {
288  /* Four complex elements a time are processed.
289  * (ar + j⋅ai)*conj(br + j⋅bi) =
290  * ar⋅br + ai⋅bi + j⋅(ai⋅br − ar⋅bi)
291  */
292 
293  /* Load input and taps, split and duplicate real und imaginary parts of taps.
294  * a: | ai,i+3 | ar,i+3 | … | ai,i+1 | ar,i+1 | ai,i+0 | ar,i+0 |
295  * b: | bi,i+3 | br,i+3 | … | bi,i+1 | br,i+1 | bi,i+0 | br,i+0 |
296  * b_real: | br,i+3 | br,i+3 | … | br,i+1 | br,i+1 | br,i+0 | br,i+0 |
297  * b_imag: | bi,i+3 | bi,i+3 | … | bi,i+1 | bi,i+1 | bi,i+0 | bi,i+0 |
298  */
299  __m256 a = _mm256_load_ps((const float *) &input[i]);
300  __m256 b = _mm256_load_ps((const float *) &taps[i]);
301  __m256 b_real = _mm256_moveldup_ps(b);
302  __m256 b_imag = _mm256_movehdup_ps(b);
303 
304  // Add | ai⋅br,i+3 | ar⋅br,i+3 | … | ai⋅br,i+0 | ar⋅br,i+0 | to partial sum.
305  sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
306  // Add | ai⋅bi,i+3 | −ar⋅bi,i+3 | … | ai⋅bi,i+0 | −ar⋅bi,i+0 | to partial sum.
307  sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
308  }
309 
310  // Swap position of −ar⋅bi and ai⋅bi.
311  sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
312  // | ai⋅br + ai⋅bi | ai⋅br − ar⋅bi |, sum contains four such partial sums.
313  __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
314  /* Sum the four partial sums: Add high half of vector sum to the low one, i.e.
315  * s1 + s3 and s0 + s2 …
316  */
317  sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
318  // … and now (s0 + s2) + (s1 + s3)
319  sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
320  // Store result.
321  __m128 lower = _mm256_extractf128_ps(sum, 0);
322  _mm_storel_pi((__m64 *) result, lower);
323 
324  // Handle the last elements if num_points mod 4 is bigger than 0.
325  for (long unsigned i = num_points & ~3u; i < num_points; ++i) {
326  *result += lv_cmake(
327  lv_creal(input[i]) * lv_creal(taps[i]) + lv_cimag(input[i]) * lv_cimag(taps[i]),
328  lv_cimag(input[i]) * lv_creal(taps[i]) - lv_creal(input[i]) * lv_cimag(taps[i]));
329  }
330 }
331 #endif /* LV_HAVE_AVX */
332 
333 #ifdef LV_HAVE_SSE3
334 
335 #include <xmmintrin.h>
336 #include <pmmintrin.h>
337 
339  const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points)
340 {
341  // Partial sums for indices i and i+1.
342  __m128 sum_a_mult_b_real = _mm_setzero_ps();
343  __m128 sum_a_mult_b_imag = _mm_setzero_ps();
344 
345  for (long unsigned i = 0; i < (num_points & ~1u); i += 2) {
346  /* Two complex elements a time are processed.
347  * (ar + j⋅ai)*conj(br + j⋅bi) =
348  * ar⋅br + ai⋅bi + j⋅(ai⋅br − ar⋅bi)
349  */
350 
351  /* Load input and taps, split and duplicate real und imaginary parts of taps.
352  * a: | ai,i+1 | ar,i+1 | ai,i+0 | ar,i+0 |
353  * b: | bi,i+1 | br,i+1 | bi,i+0 | br,i+0 |
354  * b_real: | br,i+1 | br,i+1 | br,i+0 | br,i+0 |
355  * b_imag: | bi,i+1 | bi,i+1 | bi,i+0 | bi,i+0 |
356  */
357  __m128 a = _mm_load_ps((const float *) &input[i]);
358  __m128 b = _mm_load_ps((const float *) &taps[i]);
359  __m128 b_real = _mm_moveldup_ps(b);
360  __m128 b_imag = _mm_movehdup_ps(b);
361 
362  // Add | ai⋅br,i+1 | ar⋅br,i+1 | ai⋅br,i+0 | ar⋅br,i+0 | to partial sum.
363  sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
364  // Add | ai⋅bi,i+1 | −ar⋅bi,i+1 | ai⋅bi,i+0 | −ar⋅bi,i+0 | to partial sum.
365  sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
366  }
367 
368  // Swap position of −ar⋅bi and ai⋅bi.
369  sum_a_mult_b_imag = _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag,
370  _MM_SHUFFLE(2, 3, 0, 1));
371  // | ai⋅br + ai⋅bi | ai⋅br − ar⋅bi |, sum contains two such partial sums.
372  __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
373  // Sum the two partial sums.
374  sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
375  // Store result.
376  _mm_storel_pi((__m64 *) result, sum);
377 
378  // Handle the last element if num_points mod 2 is 1.
379  if (num_points & 1u) {
380  *result += lv_cmake(
381  lv_creal(input[num_points - 1]) * lv_creal(taps[num_points - 1]) +
382  lv_cimag(input[num_points - 1]) * lv_cimag(taps[num_points - 1]),
383  lv_cimag(input[num_points - 1]) * lv_creal(taps[num_points - 1]) -
384  lv_creal(input[num_points - 1]) * lv_cimag(taps[num_points - 1]));
385  }
386 }
387 
388 #endif /*LV_HAVE_SSE3*/
389 
390 
391 #ifdef LV_HAVE_GENERIC
392 
393 
394 static inline void volk_32fc_x2_conjugate_dot_prod_32fc_a_generic(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
395 
396  const unsigned int num_bytes = num_points*8;
397 
398  float * res = (float*) result;
399  float * in = (float*) input;
400  float * tp = (float*) taps;
401  unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
402  unsigned int isodd = (num_bytes >> 3) &1;
403 
404  float sum0[2] = {0,0};
405  float sum1[2] = {0,0};
406  unsigned int i = 0;
407 
408  for(i = 0; i < n_2_ccomplex_blocks; ++i) {
409  sum0[0] += in[0] * tp[0] + in[1] * tp[1];
410  sum0[1] += (-in[0] * tp[1]) + in[1] * tp[0];
411  sum1[0] += in[2] * tp[2] + in[3] * tp[3];
412  sum1[1] += (-in[2] * tp[3]) + in[3] * tp[2];
413 
414  in += 4;
415  tp += 4;
416  }
417 
418  res[0] = sum0[0] + sum1[0];
419  res[1] = sum0[1] + sum1[1];
420 
421  for(i = 0; i < isodd; ++i) {
422  *result += input[(num_bytes >> 3) - 1] * lv_conj(taps[(num_bytes >> 3) - 1]);
423  }
424 }
425 
426 #endif /*LV_HAVE_GENERIC*/
427 
428 
429 #if LV_HAVE_SSE && LV_HAVE_64
430 
431 static inline void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
432 
433  const unsigned int num_bytes = num_points*8;
434 
435  __VOLK_ATTR_ALIGNED(16) static const uint32_t conjugator[4]= {0x00000000, 0x80000000, 0x00000000, 0x80000000};
436 
438  (
439  "# ccomplex_conjugate_dotprod_generic (float* result, const float *input,\n\t"
440  "# const float *taps, unsigned num_bytes)\n\t"
441  "# float sum0 = 0;\n\t"
442  "# float sum1 = 0;\n\t"
443  "# float sum2 = 0;\n\t"
444  "# float sum3 = 0;\n\t"
445  "# do {\n\t"
446  "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
447  "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
448  "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
449  "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
450  "# input += 4;\n\t"
451  "# taps += 4; \n\t"
452  "# } while (--n_2_ccomplex_blocks != 0);\n\t"
453  "# result[0] = sum0 + sum2;\n\t"
454  "# result[1] = sum1 + sum3;\n\t"
455  "# TODO: prefetch and better scheduling\n\t"
456  " xor %%r9, %%r9\n\t"
457  " xor %%r10, %%r10\n\t"
458  " movq %[conjugator], %%r9\n\t"
459  " movq %%rcx, %%rax\n\t"
460  " movaps 0(%%r9), %%xmm8\n\t"
461  " movq %%rcx, %%r8\n\t"
462  " movq %[rsi], %%r9\n\t"
463  " movq %[rdx], %%r10\n\t"
464  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
465  " movaps 0(%%r9), %%xmm0\n\t"
466  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
467  " movups 0(%%r10), %%xmm2\n\t"
468  " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
469  " shr $4, %%r8\n\t"
470  " xorps %%xmm8, %%xmm2\n\t"
471  " jmp .%=L1_test\n\t"
472  " # 4 taps / loop\n\t"
473  " # something like ?? cycles / loop\n\t"
474  ".%=Loop1: \n\t"
475  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
476  "# movaps (%%r9), %%xmmA\n\t"
477  "# movaps (%%r10), %%xmmB\n\t"
478  "# movaps %%xmmA, %%xmmZ\n\t"
479  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
480  "# mulps %%xmmB, %%xmmA\n\t"
481  "# mulps %%xmmZ, %%xmmB\n\t"
482  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
483  "# xorps %%xmmPN, %%xmmA\n\t"
484  "# movaps %%xmmA, %%xmmZ\n\t"
485  "# unpcklps %%xmmB, %%xmmA\n\t"
486  "# unpckhps %%xmmB, %%xmmZ\n\t"
487  "# movaps %%xmmZ, %%xmmY\n\t"
488  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
489  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
490  "# addps %%xmmZ, %%xmmA\n\t"
491  "# addps %%xmmA, %%xmmC\n\t"
492  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
493  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
494  " movaps 16(%%r9), %%xmm1\n\t"
495  " movaps %%xmm0, %%xmm4\n\t"
496  " mulps %%xmm2, %%xmm0\n\t"
497  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
498  " movaps 16(%%r10), %%xmm3\n\t"
499  " movaps %%xmm1, %%xmm5\n\t"
500  " xorps %%xmm8, %%xmm3\n\t"
501  " addps %%xmm0, %%xmm6\n\t"
502  " mulps %%xmm3, %%xmm1\n\t"
503  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
504  " addps %%xmm1, %%xmm6\n\t"
505  " mulps %%xmm4, %%xmm2\n\t"
506  " movaps 32(%%r9), %%xmm0\n\t"
507  " addps %%xmm2, %%xmm7\n\t"
508  " mulps %%xmm5, %%xmm3\n\t"
509  " add $32, %%r9\n\t"
510  " movaps 32(%%r10), %%xmm2\n\t"
511  " addps %%xmm3, %%xmm7\n\t"
512  " add $32, %%r10\n\t"
513  " xorps %%xmm8, %%xmm2\n\t"
514  ".%=L1_test:\n\t"
515  " dec %%rax\n\t"
516  " jge .%=Loop1\n\t"
517  " # We've handled the bulk of multiplies up to here.\n\t"
518  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
519  " # If so, we've got 2 more taps to do.\n\t"
520  " and $1, %%r8\n\t"
521  " je .%=Leven\n\t"
522  " # The count was odd, do 2 more taps.\n\t"
523  " # Note that we've already got mm0/mm2 preloaded\n\t"
524  " # from the main loop.\n\t"
525  " movaps %%xmm0, %%xmm4\n\t"
526  " mulps %%xmm2, %%xmm0\n\t"
527  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
528  " addps %%xmm0, %%xmm6\n\t"
529  " mulps %%xmm4, %%xmm2\n\t"
530  " addps %%xmm2, %%xmm7\n\t"
531  ".%=Leven:\n\t"
532  " # neg inversor\n\t"
533  " xorps %%xmm1, %%xmm1\n\t"
534  " mov $0x80000000, %%r9\n\t"
535  " movd %%r9, %%xmm1\n\t"
536  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
537  " # pfpnacc\n\t"
538  " xorps %%xmm1, %%xmm6\n\t"
539  " movaps %%xmm6, %%xmm2\n\t"
540  " unpcklps %%xmm7, %%xmm6\n\t"
541  " unpckhps %%xmm7, %%xmm2\n\t"
542  " movaps %%xmm2, %%xmm3\n\t"
543  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
544  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
545  " addps %%xmm2, %%xmm6\n\t"
546  " # xmm6 = r1 i2 r3 i4\n\t"
547  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
548  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
549  " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) to memory\n\t"
550  :
551  :[rsi] "r" (input), [rdx] "r" (taps), "c" (num_bytes), [rdi] "r" (result), [conjugator] "r" (conjugator)
552  :"rax", "r8", "r9", "r10"
553  );
554 
555  int getem = num_bytes % 16;
556 
557  for(; getem > 0; getem -= 8) {
558  *result += (input[(num_bytes >> 3) - 1] * lv_conj(taps[(num_bytes >> 3) - 1]));
559  }
560 }
561 #endif
562 
563 #if LV_HAVE_SSE && LV_HAVE_32
564 static inline void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse_32(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
565 
566  const unsigned int num_bytes = num_points*8;
567 
568  __VOLK_ATTR_ALIGNED(16) static const uint32_t conjugator[4]= {0x00000000, 0x80000000, 0x00000000, 0x80000000};
569 
570  int bound = num_bytes >> 4;
571  int leftovers = num_bytes % 16;
572 
574  (
575  " #pushl %%ebp\n\t"
576  " #movl %%esp, %%ebp\n\t"
577  " #movl 12(%%ebp), %%eax # input\n\t"
578  " #movl 16(%%ebp), %%edx # taps\n\t"
579  " #movl 20(%%ebp), %%ecx # n_bytes\n\t"
580  " movaps 0(%[conjugator]), %%xmm1\n\t"
581  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
582  " movaps 0(%[eax]), %%xmm0\n\t"
583  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
584  " movaps 0(%[edx]), %%xmm2\n\t"
585  " movl %[ecx], (%[out])\n\t"
586  " shrl $5, %[ecx] # ecx = n_2_ccomplex_blocks / 2\n\t"
587 
588  " xorps %%xmm1, %%xmm2\n\t"
589  " jmp .%=L1_test\n\t"
590  " # 4 taps / loop\n\t"
591  " # something like ?? cycles / loop\n\t"
592  ".%=Loop1: \n\t"
593  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
594  "# movaps (%[eax]), %%xmmA\n\t"
595  "# movaps (%[edx]), %%xmmB\n\t"
596  "# movaps %%xmmA, %%xmmZ\n\t"
597  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
598  "# mulps %%xmmB, %%xmmA\n\t"
599  "# mulps %%xmmZ, %%xmmB\n\t"
600  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
601  "# xorps %%xmmPN, %%xmmA\n\t"
602  "# movaps %%xmmA, %%xmmZ\n\t"
603  "# unpcklps %%xmmB, %%xmmA\n\t"
604  "# unpckhps %%xmmB, %%xmmZ\n\t"
605  "# movaps %%xmmZ, %%xmmY\n\t"
606  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
607  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
608  "# addps %%xmmZ, %%xmmA\n\t"
609  "# addps %%xmmA, %%xmmC\n\t"
610  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
611  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
612  " movaps 16(%[edx]), %%xmm3\n\t"
613  " movaps %%xmm0, %%xmm4\n\t"
614  " xorps %%xmm1, %%xmm3\n\t"
615  " mulps %%xmm2, %%xmm0\n\t"
616  " movaps 16(%[eax]), %%xmm1\n\t"
617  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
618  " movaps %%xmm1, %%xmm5\n\t"
619  " addps %%xmm0, %%xmm6\n\t"
620  " mulps %%xmm3, %%xmm1\n\t"
621  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
622  " addps %%xmm1, %%xmm6\n\t"
623  " movaps 0(%[conjugator]), %%xmm1\n\t"
624  " mulps %%xmm4, %%xmm2\n\t"
625  " movaps 32(%[eax]), %%xmm0\n\t"
626  " addps %%xmm2, %%xmm7\n\t"
627  " mulps %%xmm5, %%xmm3\n\t"
628  " addl $32, %[eax]\n\t"
629  " movaps 32(%[edx]), %%xmm2\n\t"
630  " addps %%xmm3, %%xmm7\n\t"
631  " xorps %%xmm1, %%xmm2\n\t"
632  " addl $32, %[edx]\n\t"
633  ".%=L1_test:\n\t"
634  " decl %[ecx]\n\t"
635  " jge .%=Loop1\n\t"
636  " # We've handled the bulk of multiplies up to here.\n\t"
637  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
638  " # If so, we've got 2 more taps to do.\n\t"
639  " movl 0(%[out]), %[ecx] # n_2_ccomplex_blocks\n\t"
640  " shrl $4, %[ecx]\n\t"
641  " andl $1, %[ecx]\n\t"
642  " je .%=Leven\n\t"
643  " # The count was odd, do 2 more taps.\n\t"
644  " # Note that we've already got mm0/mm2 preloaded\n\t"
645  " # from the main loop.\n\t"
646  " movaps %%xmm0, %%xmm4\n\t"
647  " mulps %%xmm2, %%xmm0\n\t"
648  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
649  " addps %%xmm0, %%xmm6\n\t"
650  " mulps %%xmm4, %%xmm2\n\t"
651  " addps %%xmm2, %%xmm7\n\t"
652  ".%=Leven:\n\t"
653  " # neg inversor\n\t"
654  " #movl 8(%%ebp), %[eax] \n\t"
655  " xorps %%xmm1, %%xmm1\n\t"
656  " movl $0x80000000, (%[out])\n\t"
657  " movss (%[out]), %%xmm1\n\t"
658  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
659  " # pfpnacc\n\t"
660  " xorps %%xmm1, %%xmm6\n\t"
661  " movaps %%xmm6, %%xmm2\n\t"
662  " unpcklps %%xmm7, %%xmm6\n\t"
663  " unpckhps %%xmm7, %%xmm2\n\t"
664  " movaps %%xmm2, %%xmm3\n\t"
665  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
666  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
667  " addps %%xmm2, %%xmm6\n\t"
668  " # xmm6 = r1 i2 r3 i4\n\t"
669  " #movl 8(%%ebp), %[eax] # @result\n\t"
670  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
671  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
672  " movlps %%xmm6, (%[out]) # store low 2x32 bits (complex) to memory\n\t"
673  " #popl %%ebp\n\t"
674  :
675  : [eax] "r" (input), [edx] "r" (taps), [ecx] "r" (num_bytes), [out] "r" (result), [conjugator] "r" (conjugator)
676  );
677 
678  for(; leftovers > 0; leftovers -= 8) {
679  *result += (input[(bound << 1)] * lv_conj(taps[(bound << 1)]));
680  }
681 }
682 #endif /*LV_HAVE_SSE*/
683 
684 
685 #endif /*INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H*/
#define __VOLK_ASM
Definition: volk_common.h:40
#define __VOLK_VOLATILE
Definition: volk_common.h:41
static void volk_32fc_x2_conjugate_dot_prod_32fc_neon(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:219
#define lv_conj(x)
Definition: volk_complex.h:87
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:338
#define lv_cmake(r, i)
Definition: volk_complex.h:64
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:280
static void volk_32fc_x2_conjugate_dot_prod_32fc_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:68
#define __VOLK_PREFETCH(addr)
Definition: volk_common.h:39
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:394
for i
Definition: volk_config_fixed.tmpl.h:25
#define __VOLK_ATTR_ALIGNED(x)
Definition: volk_common.h:33
float complex lv_32fc_t
Definition: volk_complex.h:61
static void volk_32fc_x2_conjugate_dot_prod_32fc_u_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:106
static void volk_32fc_x2_conjugate_dot_prod_32fc_u_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:165
#define lv_creal(x)
Definition: volk_complex.h:83
#define lv_cimag(x)
Definition: volk_complex.h:85