Vector Optimized Library of Kernels  2.0
Architecture-tuned implementations of math kernels
volk_16i_x5_add_quad_16i_x4.h
Go to the documentation of this file.
1 /* -*- c++ -*- */
2 /*
3  * Copyright 2012, 2014 Free Software Foundation, Inc.
4  *
5  * This file is part of GNU Radio
6  *
7  * GNU Radio is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 3, or (at your option)
10  * any later version.
11  *
12  * GNU Radio is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with GNU Radio; see the file COPYING. If not, write to
19  * the Free Software Foundation, Inc., 51 Franklin Street,
20  * Boston, MA 02110-1301, USA.
21  */
22 
59 #ifndef INCLUDED_volk_16i_x5_add_quad_16i_x4_a_H
60 #define INCLUDED_volk_16i_x5_add_quad_16i_x4_a_H
61 
62 #include<inttypes.h>
63 #include<stdio.h>
64 
65 #ifdef LV_HAVE_SSE2
66 #include<xmmintrin.h>
67 #include<emmintrin.h>
68 
69 static inline void
70 volk_16i_x5_add_quad_16i_x4_a_sse2(short* target0, short* target1, short* target2, short* target3,
71  short* src0, short* src1, short* src2, short* src3, short* src4,
72  unsigned int num_points)
73 {
74  const unsigned int num_bytes = num_points*2;
75 
76  __m128i xmm0, xmm1, xmm2, xmm3, xmm4;
77  __m128i *p_target0, *p_target1, *p_target2, *p_target3, *p_src0, *p_src1, *p_src2, *p_src3, *p_src4;
78  p_target0 = (__m128i*)target0;
79  p_target1 = (__m128i*)target1;
80  p_target2 = (__m128i*)target2;
81  p_target3 = (__m128i*)target3;
82 
83  p_src0 = (__m128i*)src0;
84  p_src1 = (__m128i*)src1;
85  p_src2 = (__m128i*)src2;
86  p_src3 = (__m128i*)src3;
87  p_src4 = (__m128i*)src4;
88 
89  int i = 0;
90 
91  int bound = (num_bytes >> 4);
92  int leftovers = (num_bytes >> 1) & 7;
93 
94  for(; i < bound; ++i) {
95  xmm0 = _mm_load_si128(p_src0);
96  xmm1 = _mm_load_si128(p_src1);
97  xmm2 = _mm_load_si128(p_src2);
98  xmm3 = _mm_load_si128(p_src3);
99  xmm4 = _mm_load_si128(p_src4);
100 
101  p_src0 += 1;
102  p_src1 += 1;
103 
104  xmm1 = _mm_add_epi16(xmm0, xmm1);
105  xmm2 = _mm_add_epi16(xmm0, xmm2);
106  xmm3 = _mm_add_epi16(xmm0, xmm3);
107  xmm4 = _mm_add_epi16(xmm0, xmm4);
108 
109 
110  p_src2 += 1;
111  p_src3 += 1;
112  p_src4 += 1;
113 
114  _mm_store_si128(p_target0, xmm1);
115  _mm_store_si128(p_target1, xmm2);
116  _mm_store_si128(p_target2, xmm3);
117  _mm_store_si128(p_target3, xmm4);
118 
119  p_target0 += 1;
120  p_target1 += 1;
121  p_target2 += 1;
122  p_target3 += 1;
123  }
124  /*__VOLK_ASM __VOLK_VOLATILE
125  (
126  ".%=volk_16i_x5_add_quad_16i_x4_a_sse2_L1:\n\t"
127  "cmp $0, %[bound]\n\t"
128  "je .%=volk_16i_x5_add_quad_16i_x4_a_sse2_END\n\t"
129  "movaps (%[src0]), %%xmm1\n\t"
130  "movaps (%[src1]), %%xmm2\n\t"
131  "movaps (%[src2]), %%xmm3\n\t"
132  "movaps (%[src3]), %%xmm4\n\t"
133  "movaps (%[src4]), %%xmm5\n\t"
134  "add $16, %[src0]\n\t"
135  "add $16, %[src1]\n\t"
136  "add $16, %[src2]\n\t"
137  "add $16, %[src3]\n\t"
138  "add $16, %[src4]\n\t"
139  "paddw %%xmm1, %%xmm2\n\t"
140  "paddw %%xmm1, %%xmm3\n\t"
141  "paddw %%xmm1, %%xmm4\n\t"
142  "paddw %%xmm1, %%xmm5\n\t"
143  "add $-1, %[bound]\n\t"
144  "movaps %%xmm2, (%[target0])\n\t"
145  "movaps %%xmm3, (%[target1])\n\t"
146  "movaps %%xmm4, (%[target2])\n\t"
147  "movaps %%xmm5, (%[target3])\n\t"
148  "add $16, %[target0]\n\t"
149  "add $16, %[target1]\n\t"
150  "add $16, %[target2]\n\t"
151  "add $16, %[target3]\n\t"
152  "jmp .%=volk_16i_x5_add_quad_16i_x4_a_sse2_L1\n\t"
153  ".%=volk_16i_x5_add_quad_16i_x4_a_sse2_END:\n\t"
154  :
155  :[bound]"r"(bound), [src0]"r"(src0), [src1]"r"(src1), [src2]"r"(src2), [src3]"r"(src3), [src4]"r"(src4), [target0]"r"(target0), [target1]"r"(target1), [target2]"r"(target2), [target3]"r"(target3)
156  :"xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
157  );
158  */
159 
160  for(i = bound * 8; i < (bound * 8) + leftovers; ++i) {
161  target0[i] = src0[i] + src1[i];
162  target1[i] = src0[i] + src2[i];
163  target2[i] = src0[i] + src3[i];
164  target3[i] = src0[i] + src4[i];
165  }
166 }
167 #endif /*LV_HAVE_SSE2*/
168 
169 #ifdef LV_HAVE_NEON
170 #include <arm_neon.h>
171 
172 static inline void
173 volk_16i_x5_add_quad_16i_x4_neon(short* target0, short* target1, short* target2, short* target3,
174  short* src0, short* src1, short* src2, short* src3, short* src4,
175  unsigned int num_points)
176 {
177  const unsigned int eighth_points = num_points / 8;
178  unsigned int number = 0;
179 
180  int16x8_t src0_vec, src1_vec, src2_vec, src3_vec, src4_vec;
181  int16x8_t target0_vec, target1_vec, target2_vec, target3_vec;
182  for(number = 0; number < eighth_points; ++number) {
183  src0_vec = vld1q_s16(src0);
184  src1_vec = vld1q_s16(src1);
185  src2_vec = vld1q_s16(src2);
186  src3_vec = vld1q_s16(src3);
187  src4_vec = vld1q_s16(src4);
188 
189  target0_vec = vaddq_s16(src0_vec , src1_vec);
190  target1_vec = vaddq_s16(src0_vec , src2_vec);
191  target2_vec = vaddq_s16(src0_vec , src3_vec);
192  target3_vec = vaddq_s16(src0_vec , src4_vec);
193 
194  vst1q_s16(target0, target0_vec);
195  vst1q_s16(target1, target1_vec);
196  vst1q_s16(target2, target2_vec);
197  vst1q_s16(target3, target3_vec);
198  src0 += 8;
199  src1 += 8;
200  src2 += 8;
201  src3 += 8;
202  src4 += 8;
203  target0 += 8;
204  target1 += 8;
205  target2 += 8;
206  target3 += 8;
207  }
208 
209  for(number = eighth_points * 8; number < num_points; ++number) {
210  *target0++ = *src0 + *src1++;
211  *target1++ = *src0 + *src2++;
212  *target2++ = *src0 + *src3++;
213  *target3++ = *src0++ + *src4++;
214  }
215 }
216 
217 #endif /* LV_HAVE_NEON */
218 
219 #ifdef LV_HAVE_GENERIC
220 
221 static inline void
222 volk_16i_x5_add_quad_16i_x4_generic(short* target0, short* target1, short* target2, short* target3,
223  short* src0, short* src1, short* src2, short* src3, short* src4,
224  unsigned int num_points)
225 {
226  const unsigned int num_bytes = num_points*2;
227 
228  int i = 0;
229 
230  int bound = num_bytes >> 1;
231 
232  for(i = 0; i < bound; ++i) {
233  target0[i] = src0[i] + src1[i];
234  target1[i] = src0[i] + src2[i];
235  target2[i] = src0[i] + src3[i];
236  target3[i] = src0[i] + src4[i];
237  }
238 }
239 
240 #endif /* LV_HAVE_GENERIC */
241 
242 #endif /*INCLUDED_volk_16i_x5_add_quad_16i_x4_a_H*/
for i
Definition: volk_config_fixed.tmpl.h:25
static void volk_16i_x5_add_quad_16i_x4_neon(short *target0, short *target1, short *target2, short *target3, short *src0, short *src1, short *src2, short *src3, short *src4, unsigned int num_points)
Definition: volk_16i_x5_add_quad_16i_x4.h:173
static void volk_16i_x5_add_quad_16i_x4_generic(short *target0, short *target1, short *target2, short *target3, short *src0, short *src1, short *src2, short *src3, short *src4, unsigned int num_points)
Definition: volk_16i_x5_add_quad_16i_x4.h:222
static void volk_16i_x5_add_quad_16i_x4_a_sse2(short *target0, short *target1, short *target2, short *target3, short *src0, short *src1, short *src2, short *src3, short *src4, unsigned int num_points)
Definition: volk_16i_x5_add_quad_16i_x4.h:70