Vector Optimized Library of Kernels  2.5.0
Architecture-tuned implementations of math kernels
volk_32fc_x2_conjugate_dot_prod_32fc.h
Go to the documentation of this file.
1 /* -*- c++ -*- */
2 /*
3  * Copyright 2012, 2014 Free Software Foundation, Inc.
4  *
5  * This file is part of GNU Radio
6  *
7  * GNU Radio is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 3, or (at your option)
10  * any later version.
11  *
12  * GNU Radio is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with GNU Radio; see the file COPYING. If not, write to
19  * the Free Software Foundation, Inc., 51 Franklin Street,
20  * Boston, MA 02110-1301, USA.
21  */
22 
74 #ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H
75 #define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H
76 
77 
78 #include <volk/volk_complex.h>
79 
80 
81 #ifdef LV_HAVE_GENERIC
82 
84  const lv_32fc_t* input,
85  const lv_32fc_t* taps,
86  unsigned int num_points)
87 {
88  lv_32fc_t res = lv_cmake(0.f, 0.f);
89  for (unsigned int i = 0; i < num_points; ++i) {
90  res += (*input++) * lv_conj((*taps++));
91  }
92  *result = res;
93 }
94 
95 #endif /*LV_HAVE_GENERIC*/
96 
97 #ifdef LV_HAVE_GENERIC
98 
100  const lv_32fc_t* input,
101  const lv_32fc_t* taps,
102  unsigned int num_points)
103 {
104 
105  const unsigned int num_bytes = num_points * 8;
106 
107  float* res = (float*)result;
108  float* in = (float*)input;
109  float* tp = (float*)taps;
110  unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
111 
112  float sum0[2] = { 0, 0 };
113  float sum1[2] = { 0, 0 };
114  unsigned int i = 0;
115 
116  for (i = 0; i < n_2_ccomplex_blocks; ++i) {
117  sum0[0] += in[0] * tp[0] + in[1] * tp[1];
118  sum0[1] += (-in[0] * tp[1]) + in[1] * tp[0];
119  sum1[0] += in[2] * tp[2] + in[3] * tp[3];
120  sum1[1] += (-in[2] * tp[3]) + in[3] * tp[2];
121 
122  in += 4;
123  tp += 4;
124  }
125 
126  res[0] = sum0[0] + sum1[0];
127  res[1] = sum0[1] + sum1[1];
128 
129  if (num_bytes >> 3 & 1) {
130  *result += input[(num_bytes >> 3) - 1] * lv_conj(taps[(num_bytes >> 3) - 1]);
131  }
132 }
133 
134 #endif /*LV_HAVE_GENERIC*/
135 
136 #ifdef LV_HAVE_AVX
137 
138 #include <immintrin.h>
139 
141  const lv_32fc_t* input,
142  const lv_32fc_t* taps,
143  unsigned int num_points)
144 {
145  // Partial sums for indices i, i+1, i+2 and i+3.
146  __m256 sum_a_mult_b_real = _mm256_setzero_ps();
147  __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
148 
149  for (long unsigned i = 0; i < (num_points & ~3u); i += 4) {
150  /* Four complex elements a time are processed.
151  * (ar + j⋅ai)*conj(br + j⋅bi) =
152  * ar⋅br + ai⋅bi + j⋅(ai⋅br − ar⋅bi)
153  */
154 
155  /* Load input and taps, split and duplicate real und imaginary parts of taps.
156  * a: | ai,i+3 | ar,i+3 | … | ai,i+1 | ar,i+1 | ai,i+0 | ar,i+0 |
157  * b: | bi,i+3 | br,i+3 | … | bi,i+1 | br,i+1 | bi,i+0 | br,i+0 |
158  * b_real: | br,i+3 | br,i+3 | … | br,i+1 | br,i+1 | br,i+0 | br,i+0 |
159  * b_imag: | bi,i+3 | bi,i+3 | … | bi,i+1 | bi,i+1 | bi,i+0 | bi,i+0 |
160  */
161  __m256 a = _mm256_loadu_ps((const float*)&input[i]);
162  __m256 b = _mm256_loadu_ps((const float*)&taps[i]);
163  __m256 b_real = _mm256_moveldup_ps(b);
164  __m256 b_imag = _mm256_movehdup_ps(b);
165 
166  // Add | ai⋅br,i+3 | ar⋅br,i+3 | … | ai⋅br,i+0 | ar⋅br,i+0 | to partial sum.
167  sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
168  // Add | ai⋅bi,i+3 | −ar⋅bi,i+3 | … | ai⋅bi,i+0 | −ar⋅bi,i+0 | to partial sum.
169  sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
170  }
171 
172  // Swap position of −ar⋅bi and ai⋅bi.
173  sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
174  // | ai⋅br + ai⋅bi | ai⋅br − ar⋅bi |, sum contains four such partial sums.
175  __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
176  /* Sum the four partial sums: Add high half of vector sum to the low one, i.e.
177  * s1 + s3 and s0 + s2 …
178  */
179  sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
180  // … and now (s0 + s2) + (s1 + s3)
181  sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
182  // Store result.
183  __m128 lower = _mm256_extractf128_ps(sum, 0);
184  _mm_storel_pi((__m64*)result, lower);
185 
186  // Handle the last elements if num_points mod 4 is bigger than 0.
187  for (long unsigned i = num_points & ~3u; i < num_points; ++i) {
188  *result += lv_cmake(lv_creal(input[i]) * lv_creal(taps[i]) +
189  lv_cimag(input[i]) * lv_cimag(taps[i]),
190  lv_cimag(input[i]) * lv_creal(taps[i]) -
191  lv_creal(input[i]) * lv_cimag(taps[i]));
192  }
193 }
194 
195 #endif /* LV_HAVE_AVX */
196 
197 #ifdef LV_HAVE_SSE3
198 
199 #include <pmmintrin.h>
200 #include <xmmintrin.h>
201 
203  const lv_32fc_t* input,
204  const lv_32fc_t* taps,
205  unsigned int num_points)
206 {
207  // Partial sums for indices i and i+1.
208  __m128 sum_a_mult_b_real = _mm_setzero_ps();
209  __m128 sum_a_mult_b_imag = _mm_setzero_ps();
210 
211  for (long unsigned i = 0; i < (num_points & ~1u); i += 2) {
212  /* Two complex elements a time are processed.
213  * (ar + j⋅ai)*conj(br + j⋅bi) =
214  * ar⋅br + ai⋅bi + j⋅(ai⋅br − ar⋅bi)
215  */
216 
217  /* Load input and taps, split and duplicate real und imaginary parts of taps.
218  * a: | ai,i+1 | ar,i+1 | ai,i+0 | ar,i+0 |
219  * b: | bi,i+1 | br,i+1 | bi,i+0 | br,i+0 |
220  * b_real: | br,i+1 | br,i+1 | br,i+0 | br,i+0 |
221  * b_imag: | bi,i+1 | bi,i+1 | bi,i+0 | bi,i+0 |
222  */
223  __m128 a = _mm_loadu_ps((const float*)&input[i]);
224  __m128 b = _mm_loadu_ps((const float*)&taps[i]);
225  __m128 b_real = _mm_moveldup_ps(b);
226  __m128 b_imag = _mm_movehdup_ps(b);
227 
228  // Add | ai⋅br,i+1 | ar⋅br,i+1 | ai⋅br,i+0 | ar⋅br,i+0 | to partial sum.
229  sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
230  // Add | ai⋅bi,i+1 | −ar⋅bi,i+1 | ai⋅bi,i+0 | −ar⋅bi,i+0 | to partial sum.
231  sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
232  }
233 
234  // Swap position of −ar⋅bi and ai⋅bi.
235  sum_a_mult_b_imag =
236  _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
237  // | ai⋅br + ai⋅bi | ai⋅br − ar⋅bi |, sum contains two such partial sums.
238  __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
239  // Sum the two partial sums.
240  sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
241  // Store result.
242  _mm_storel_pi((__m64*)result, sum);
243 
244  // Handle the last element if num_points mod 2 is 1.
245  if (num_points & 1u) {
246  *result += lv_cmake(
247  lv_creal(input[num_points - 1]) * lv_creal(taps[num_points - 1]) +
248  lv_cimag(input[num_points - 1]) * lv_cimag(taps[num_points - 1]),
249  lv_cimag(input[num_points - 1]) * lv_creal(taps[num_points - 1]) -
250  lv_creal(input[num_points - 1]) * lv_cimag(taps[num_points - 1]));
251  }
252 }
253 
254 #endif /*LV_HAVE_SSE3*/
255 
256 #ifdef LV_HAVE_NEON
257 #include <arm_neon.h>
259  const lv_32fc_t* input,
260  const lv_32fc_t* taps,
261  unsigned int num_points)
262 {
263 
264  unsigned int quarter_points = num_points / 4;
265  unsigned int number;
266 
267  lv_32fc_t* a_ptr = (lv_32fc_t*)taps;
268  lv_32fc_t* b_ptr = (lv_32fc_t*)input;
269  // for 2-lane vectors, 1st lane holds the real part,
270  // 2nd lane holds the imaginary part
271  float32x4x2_t a_val, b_val, accumulator;
272  float32x4x2_t tmp_imag;
273  accumulator.val[0] = vdupq_n_f32(0);
274  accumulator.val[1] = vdupq_n_f32(0);
275 
276  for (number = 0; number < quarter_points; ++number) {
277  a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
278  b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
279  __VOLK_PREFETCH(a_ptr + 8);
280  __VOLK_PREFETCH(b_ptr + 8);
281 
282  // do the first multiply
283  tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
284  tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
285 
286  // use multiply accumulate/subtract to get result
287  tmp_imag.val[1] = vmlsq_f32(tmp_imag.val[1], a_val.val[0], b_val.val[1]);
288  tmp_imag.val[0] = vmlaq_f32(tmp_imag.val[0], a_val.val[1], b_val.val[1]);
289 
290  accumulator.val[0] = vaddq_f32(accumulator.val[0], tmp_imag.val[0]);
291  accumulator.val[1] = vaddq_f32(accumulator.val[1], tmp_imag.val[1]);
292 
293  // increment pointers
294  a_ptr += 4;
295  b_ptr += 4;
296  }
297  lv_32fc_t accum_result[4];
298  vst2q_f32((float*)accum_result, accumulator);
299  *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
300 
301  // tail case
302  for (number = quarter_points * 4; number < num_points; ++number) {
303  *result += (*a_ptr++) * lv_conj(*b_ptr++);
304  }
305  *result = lv_conj(*result);
306 }
307 #endif /*LV_HAVE_NEON*/
308 
309 #endif /*INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H*/
310 
311 #ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H
312 #define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H
313 
314 #include <stdio.h>
315 #include <volk/volk_common.h>
316 #include <volk/volk_complex.h>
317 
318 
319 #ifdef LV_HAVE_AVX
320 #include <immintrin.h>
321 
323  const lv_32fc_t* input,
324  const lv_32fc_t* taps,
325  unsigned int num_points)
326 {
327  // Partial sums for indices i, i+1, i+2 and i+3.
328  __m256 sum_a_mult_b_real = _mm256_setzero_ps();
329  __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
330 
331  for (long unsigned i = 0; i < (num_points & ~3u); i += 4) {
332  /* Four complex elements a time are processed.
333  * (ar + j⋅ai)*conj(br + j⋅bi) =
334  * ar⋅br + ai⋅bi + j⋅(ai⋅br − ar⋅bi)
335  */
336 
337  /* Load input and taps, split and duplicate real und imaginary parts of taps.
338  * a: | ai,i+3 | ar,i+3 | … | ai,i+1 | ar,i+1 | ai,i+0 | ar,i+0 |
339  * b: | bi,i+3 | br,i+3 | … | bi,i+1 | br,i+1 | bi,i+0 | br,i+0 |
340  * b_real: | br,i+3 | br,i+3 | … | br,i+1 | br,i+1 | br,i+0 | br,i+0 |
341  * b_imag: | bi,i+3 | bi,i+3 | … | bi,i+1 | bi,i+1 | bi,i+0 | bi,i+0 |
342  */
343  __m256 a = _mm256_load_ps((const float*)&input[i]);
344  __m256 b = _mm256_load_ps((const float*)&taps[i]);
345  __m256 b_real = _mm256_moveldup_ps(b);
346  __m256 b_imag = _mm256_movehdup_ps(b);
347 
348  // Add | ai⋅br,i+3 | ar⋅br,i+3 | … | ai⋅br,i+0 | ar⋅br,i+0 | to partial sum.
349  sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
350  // Add | ai⋅bi,i+3 | −ar⋅bi,i+3 | … | ai⋅bi,i+0 | −ar⋅bi,i+0 | to partial sum.
351  sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
352  }
353 
354  // Swap position of −ar⋅bi and ai⋅bi.
355  sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
356  // | ai⋅br + ai⋅bi | ai⋅br − ar⋅bi |, sum contains four such partial sums.
357  __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
358  /* Sum the four partial sums: Add high half of vector sum to the low one, i.e.
359  * s1 + s3 and s0 + s2 …
360  */
361  sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
362  // … and now (s0 + s2) + (s1 + s3)
363  sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
364  // Store result.
365  __m128 lower = _mm256_extractf128_ps(sum, 0);
366  _mm_storel_pi((__m64*)result, lower);
367 
368  // Handle the last elements if num_points mod 4 is bigger than 0.
369  for (long unsigned i = num_points & ~3u; i < num_points; ++i) {
370  *result += lv_cmake(lv_creal(input[i]) * lv_creal(taps[i]) +
371  lv_cimag(input[i]) * lv_cimag(taps[i]),
372  lv_cimag(input[i]) * lv_creal(taps[i]) -
373  lv_creal(input[i]) * lv_cimag(taps[i]));
374  }
375 }
376 #endif /* LV_HAVE_AVX */
377 
378 #ifdef LV_HAVE_SSE3
379 
380 #include <pmmintrin.h>
381 #include <xmmintrin.h>
382 
384  const lv_32fc_t* input,
385  const lv_32fc_t* taps,
386  unsigned int num_points)
387 {
388  // Partial sums for indices i and i+1.
389  __m128 sum_a_mult_b_real = _mm_setzero_ps();
390  __m128 sum_a_mult_b_imag = _mm_setzero_ps();
391 
392  for (long unsigned i = 0; i < (num_points & ~1u); i += 2) {
393  /* Two complex elements a time are processed.
394  * (ar + j⋅ai)*conj(br + j⋅bi) =
395  * ar⋅br + ai⋅bi + j⋅(ai⋅br − ar⋅bi)
396  */
397 
398  /* Load input and taps, split and duplicate real und imaginary parts of taps.
399  * a: | ai,i+1 | ar,i+1 | ai,i+0 | ar,i+0 |
400  * b: | bi,i+1 | br,i+1 | bi,i+0 | br,i+0 |
401  * b_real: | br,i+1 | br,i+1 | br,i+0 | br,i+0 |
402  * b_imag: | bi,i+1 | bi,i+1 | bi,i+0 | bi,i+0 |
403  */
404  __m128 a = _mm_load_ps((const float*)&input[i]);
405  __m128 b = _mm_load_ps((const float*)&taps[i]);
406  __m128 b_real = _mm_moveldup_ps(b);
407  __m128 b_imag = _mm_movehdup_ps(b);
408 
409  // Add | ai⋅br,i+1 | ar⋅br,i+1 | ai⋅br,i+0 | ar⋅br,i+0 | to partial sum.
410  sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
411  // Add | ai⋅bi,i+1 | −ar⋅bi,i+1 | ai⋅bi,i+0 | −ar⋅bi,i+0 | to partial sum.
412  sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
413  }
414 
415  // Swap position of −ar⋅bi and ai⋅bi.
416  sum_a_mult_b_imag =
417  _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
418  // | ai⋅br + ai⋅bi | ai⋅br − ar⋅bi |, sum contains two such partial sums.
419  __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
420  // Sum the two partial sums.
421  sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
422  // Store result.
423  _mm_storel_pi((__m64*)result, sum);
424 
425  // Handle the last element if num_points mod 2 is 1.
426  if (num_points & 1u) {
427  *result += lv_cmake(
428  lv_creal(input[num_points - 1]) * lv_creal(taps[num_points - 1]) +
429  lv_cimag(input[num_points - 1]) * lv_cimag(taps[num_points - 1]),
430  lv_cimag(input[num_points - 1]) * lv_creal(taps[num_points - 1]) -
431  lv_creal(input[num_points - 1]) * lv_cimag(taps[num_points - 1]));
432  }
433 }
434 
435 #endif /*LV_HAVE_SSE3*/
436 
437 
438 #ifdef LV_HAVE_GENERIC
439 
440 
442  const lv_32fc_t* input,
443  const lv_32fc_t* taps,
444  unsigned int num_points)
445 {
446 
447  const unsigned int num_bytes = num_points * 8;
448 
449  float* res = (float*)result;
450  float* in = (float*)input;
451  float* tp = (float*)taps;
452  unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
453 
454  float sum0[2] = { 0, 0 };
455  float sum1[2] = { 0, 0 };
456  unsigned int i = 0;
457 
458  for (i = 0; i < n_2_ccomplex_blocks; ++i) {
459  sum0[0] += in[0] * tp[0] + in[1] * tp[1];
460  sum0[1] += (-in[0] * tp[1]) + in[1] * tp[0];
461  sum1[0] += in[2] * tp[2] + in[3] * tp[3];
462  sum1[1] += (-in[2] * tp[3]) + in[3] * tp[2];
463 
464  in += 4;
465  tp += 4;
466  }
467 
468  res[0] = sum0[0] + sum1[0];
469  res[1] = sum0[1] + sum1[1];
470 
471  if (num_bytes >> 3 & 1) {
472  *result += input[(num_bytes >> 3) - 1] * lv_conj(taps[(num_bytes >> 3) - 1]);
473  }
474 }
475 
476 #endif /*LV_HAVE_GENERIC*/
477 
478 
479 #if LV_HAVE_SSE && LV_HAVE_64
480 
481 static inline void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse(lv_32fc_t* result,
482  const lv_32fc_t* input,
483  const lv_32fc_t* taps,
484  unsigned int num_points)
485 {
486 
487  const unsigned int num_bytes = num_points * 8;
488 
490  static const uint32_t conjugator[4] = {
491  0x00000000, 0x80000000, 0x00000000, 0x80000000
492  };
493 
495  "# ccomplex_conjugate_dotprod_generic (float* result, const float *input,\n\t"
496  "# const float *taps, unsigned num_bytes)\n\t"
497  "# float sum0 = 0;\n\t"
498  "# float sum1 = 0;\n\t"
499  "# float sum2 = 0;\n\t"
500  "# float sum3 = 0;\n\t"
501  "# do {\n\t"
502  "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
503  "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
504  "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
505  "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
506  "# input += 4;\n\t"
507  "# taps += 4; \n\t"
508  "# } while (--n_2_ccomplex_blocks != 0);\n\t"
509  "# result[0] = sum0 + sum2;\n\t"
510  "# result[1] = sum1 + sum3;\n\t"
511  "# TODO: prefetch and better scheduling\n\t"
512  " xor %%r9, %%r9\n\t"
513  " xor %%r10, %%r10\n\t"
514  " movq %[conjugator], %%r9\n\t"
515  " movq %%rcx, %%rax\n\t"
516  " movaps 0(%%r9), %%xmm8\n\t"
517  " movq %%rcx, %%r8\n\t"
518  " movq %[rsi], %%r9\n\t"
519  " movq %[rdx], %%r10\n\t"
520  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
521  " movaps 0(%%r9), %%xmm0\n\t"
522  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
523  " movups 0(%%r10), %%xmm2\n\t"
524  " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
525  " shr $4, %%r8\n\t"
526  " xorps %%xmm8, %%xmm2\n\t"
527  " jmp .%=L1_test\n\t"
528  " # 4 taps / loop\n\t"
529  " # something like ?? cycles / loop\n\t"
530  ".%=Loop1: \n\t"
531  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
532  "# movaps (%%r9), %%xmmA\n\t"
533  "# movaps (%%r10), %%xmmB\n\t"
534  "# movaps %%xmmA, %%xmmZ\n\t"
535  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
536  "# mulps %%xmmB, %%xmmA\n\t"
537  "# mulps %%xmmZ, %%xmmB\n\t"
538  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
539  "# xorps %%xmmPN, %%xmmA\n\t"
540  "# movaps %%xmmA, %%xmmZ\n\t"
541  "# unpcklps %%xmmB, %%xmmA\n\t"
542  "# unpckhps %%xmmB, %%xmmZ\n\t"
543  "# movaps %%xmmZ, %%xmmY\n\t"
544  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
545  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
546  "# addps %%xmmZ, %%xmmA\n\t"
547  "# addps %%xmmA, %%xmmC\n\t"
548  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
549  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
550  " movaps 16(%%r9), %%xmm1\n\t"
551  " movaps %%xmm0, %%xmm4\n\t"
552  " mulps %%xmm2, %%xmm0\n\t"
553  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
554  " movaps 16(%%r10), %%xmm3\n\t"
555  " movaps %%xmm1, %%xmm5\n\t"
556  " xorps %%xmm8, %%xmm3\n\t"
557  " addps %%xmm0, %%xmm6\n\t"
558  " mulps %%xmm3, %%xmm1\n\t"
559  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
560  " addps %%xmm1, %%xmm6\n\t"
561  " mulps %%xmm4, %%xmm2\n\t"
562  " movaps 32(%%r9), %%xmm0\n\t"
563  " addps %%xmm2, %%xmm7\n\t"
564  " mulps %%xmm5, %%xmm3\n\t"
565  " add $32, %%r9\n\t"
566  " movaps 32(%%r10), %%xmm2\n\t"
567  " addps %%xmm3, %%xmm7\n\t"
568  " add $32, %%r10\n\t"
569  " xorps %%xmm8, %%xmm2\n\t"
570  ".%=L1_test:\n\t"
571  " dec %%rax\n\t"
572  " jge .%=Loop1\n\t"
573  " # We've handled the bulk of multiplies up to here.\n\t"
574  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
575  " # If so, we've got 2 more taps to do.\n\t"
576  " and $1, %%r8\n\t"
577  " je .%=Leven\n\t"
578  " # The count was odd, do 2 more taps.\n\t"
579  " # Note that we've already got mm0/mm2 preloaded\n\t"
580  " # from the main loop.\n\t"
581  " movaps %%xmm0, %%xmm4\n\t"
582  " mulps %%xmm2, %%xmm0\n\t"
583  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
584  " addps %%xmm0, %%xmm6\n\t"
585  " mulps %%xmm4, %%xmm2\n\t"
586  " addps %%xmm2, %%xmm7\n\t"
587  ".%=Leven:\n\t"
588  " # neg inversor\n\t"
589  " xorps %%xmm1, %%xmm1\n\t"
590  " mov $0x80000000, %%r9\n\t"
591  " movd %%r9, %%xmm1\n\t"
592  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
593  " # pfpnacc\n\t"
594  " xorps %%xmm1, %%xmm6\n\t"
595  " movaps %%xmm6, %%xmm2\n\t"
596  " unpcklps %%xmm7, %%xmm6\n\t"
597  " unpckhps %%xmm7, %%xmm2\n\t"
598  " movaps %%xmm2, %%xmm3\n\t"
599  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
600  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
601  " addps %%xmm2, %%xmm6\n\t"
602  " # xmm6 = r1 i2 r3 i4\n\t"
603  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
604  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
605  " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) "
606  "to memory\n\t"
607  :
608  : [rsi] "r"(input),
609  [rdx] "r"(taps),
610  "c"(num_bytes),
611  [rdi] "r"(result),
612  [conjugator] "r"(conjugator)
613  : "rax", "r8", "r9", "r10");
614 
615  int getem = num_bytes % 16;
616 
617  for (; getem > 0; getem -= 8) {
618  *result += (input[(num_bytes >> 3) - 1] * lv_conj(taps[(num_bytes >> 3) - 1]));
619  }
620 }
621 #endif
622 
623 #if LV_HAVE_SSE && LV_HAVE_32
624 static inline void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse_32(lv_32fc_t* result,
625  const lv_32fc_t* input,
626  const lv_32fc_t* taps,
627  unsigned int num_points)
628 {
629 
630  const unsigned int num_bytes = num_points * 8;
631 
633  static const uint32_t conjugator[4] = {
634  0x00000000, 0x80000000, 0x00000000, 0x80000000
635  };
636 
637  int bound = num_bytes >> 4;
638  int leftovers = num_bytes % 16;
639 
641  " #pushl %%ebp\n\t"
642  " #movl %%esp, %%ebp\n\t"
643  " #movl 12(%%ebp), %%eax # input\n\t"
644  " #movl 16(%%ebp), %%edx # taps\n\t"
645  " #movl 20(%%ebp), %%ecx # n_bytes\n\t"
646  " movaps 0(%[conjugator]), %%xmm1\n\t"
647  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
648  " movaps 0(%[eax]), %%xmm0\n\t"
649  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
650  " movaps 0(%[edx]), %%xmm2\n\t"
651  " movl %[ecx], (%[out])\n\t"
652  " shrl $5, %[ecx] # ecx = n_2_ccomplex_blocks / 2\n\t"
653 
654  " xorps %%xmm1, %%xmm2\n\t"
655  " jmp .%=L1_test\n\t"
656  " # 4 taps / loop\n\t"
657  " # something like ?? cycles / loop\n\t"
658  ".%=Loop1: \n\t"
659  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
660  "# movaps (%[eax]), %%xmmA\n\t"
661  "# movaps (%[edx]), %%xmmB\n\t"
662  "# movaps %%xmmA, %%xmmZ\n\t"
663  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
664  "# mulps %%xmmB, %%xmmA\n\t"
665  "# mulps %%xmmZ, %%xmmB\n\t"
666  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
667  "# xorps %%xmmPN, %%xmmA\n\t"
668  "# movaps %%xmmA, %%xmmZ\n\t"
669  "# unpcklps %%xmmB, %%xmmA\n\t"
670  "# unpckhps %%xmmB, %%xmmZ\n\t"
671  "# movaps %%xmmZ, %%xmmY\n\t"
672  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
673  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
674  "# addps %%xmmZ, %%xmmA\n\t"
675  "# addps %%xmmA, %%xmmC\n\t"
676  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
677  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
678  " movaps 16(%[edx]), %%xmm3\n\t"
679  " movaps %%xmm0, %%xmm4\n\t"
680  " xorps %%xmm1, %%xmm3\n\t"
681  " mulps %%xmm2, %%xmm0\n\t"
682  " movaps 16(%[eax]), %%xmm1\n\t"
683  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
684  " movaps %%xmm1, %%xmm5\n\t"
685  " addps %%xmm0, %%xmm6\n\t"
686  " mulps %%xmm3, %%xmm1\n\t"
687  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
688  " addps %%xmm1, %%xmm6\n\t"
689  " movaps 0(%[conjugator]), %%xmm1\n\t"
690  " mulps %%xmm4, %%xmm2\n\t"
691  " movaps 32(%[eax]), %%xmm0\n\t"
692  " addps %%xmm2, %%xmm7\n\t"
693  " mulps %%xmm5, %%xmm3\n\t"
694  " addl $32, %[eax]\n\t"
695  " movaps 32(%[edx]), %%xmm2\n\t"
696  " addps %%xmm3, %%xmm7\n\t"
697  " xorps %%xmm1, %%xmm2\n\t"
698  " addl $32, %[edx]\n\t"
699  ".%=L1_test:\n\t"
700  " decl %[ecx]\n\t"
701  " jge .%=Loop1\n\t"
702  " # We've handled the bulk of multiplies up to here.\n\t"
703  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
704  " # If so, we've got 2 more taps to do.\n\t"
705  " movl 0(%[out]), %[ecx] # n_2_ccomplex_blocks\n\t"
706  " shrl $4, %[ecx]\n\t"
707  " andl $1, %[ecx]\n\t"
708  " je .%=Leven\n\t"
709  " # The count was odd, do 2 more taps.\n\t"
710  " # Note that we've already got mm0/mm2 preloaded\n\t"
711  " # from the main loop.\n\t"
712  " movaps %%xmm0, %%xmm4\n\t"
713  " mulps %%xmm2, %%xmm0\n\t"
714  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
715  " addps %%xmm0, %%xmm6\n\t"
716  " mulps %%xmm4, %%xmm2\n\t"
717  " addps %%xmm2, %%xmm7\n\t"
718  ".%=Leven:\n\t"
719  " # neg inversor\n\t"
720  " #movl 8(%%ebp), %[eax] \n\t"
721  " xorps %%xmm1, %%xmm1\n\t"
722  " movl $0x80000000, (%[out])\n\t"
723  " movss (%[out]), %%xmm1\n\t"
724  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
725  " # pfpnacc\n\t"
726  " xorps %%xmm1, %%xmm6\n\t"
727  " movaps %%xmm6, %%xmm2\n\t"
728  " unpcklps %%xmm7, %%xmm6\n\t"
729  " unpckhps %%xmm7, %%xmm2\n\t"
730  " movaps %%xmm2, %%xmm3\n\t"
731  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
732  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
733  " addps %%xmm2, %%xmm6\n\t"
734  " # xmm6 = r1 i2 r3 i4\n\t"
735  " #movl 8(%%ebp), %[eax] # @result\n\t"
736  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
737  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
738  " movlps %%xmm6, (%[out]) # store low 2x32 bits (complex) "
739  "to memory\n\t"
740  " #popl %%ebp\n\t"
741  :
742  : [eax] "r"(input),
743  [edx] "r"(taps),
744  [ecx] "r"(num_bytes),
745  [out] "r"(result),
746  [conjugator] "r"(conjugator));
747 
748  for (; leftovers > 0; leftovers -= 8) {
749  *result += (input[(bound << 1)] * lv_conj(taps[(bound << 1)]));
750  }
751 }
752 #endif /*LV_HAVE_SSE*/
753 
754 
755 #endif /*INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H*/
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:383
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:441
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:322
static void volk_32fc_x2_conjugate_dot_prod_32fc_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:83
static void volk_32fc_x2_conjugate_dot_prod_32fc_u_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:140
static void volk_32fc_x2_conjugate_dot_prod_32fc_block(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:99
static void volk_32fc_x2_conjugate_dot_prod_32fc_u_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:202
static void volk_32fc_x2_conjugate_dot_prod_32fc_neon(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:258
#define __VOLK_VOLATILE
Definition: volk_common.h:64
#define __VOLK_PREFETCH(addr)
Definition: volk_common.h:62
#define __VOLK_ASM
Definition: volk_common.h:63
#define __VOLK_ATTR_ALIGNED(x)
Definition: volk_common.h:56
#define lv_cimag(x)
Definition: volk_complex.h:89
#define lv_conj(x)
Definition: volk_complex.h:91
#define lv_cmake(r, i)
Definition: volk_complex.h:68
#define lv_creal(x)
Definition: volk_complex.h:87
float complex lv_32fc_t
Definition: volk_complex.h:65
for i
Definition: volk_config_fixed.tmpl.h:25