Vector Optimized Library of Kernels  2.0
Architecture-tuned implementations of math kernels
volk_16i_permute_and_scalar_add.h
Go to the documentation of this file.
1 /* -*- c++ -*- */
2 /*
3  * Copyright 2012, 2014 Free Software Foundation, Inc.
4  *
5  * This file is part of GNU Radio
6  *
7  * GNU Radio is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 3, or (at your option)
10  * any later version.
11  *
12  * GNU Radio is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with GNU Radio; see the file COPYING. If not, write to
19  * the Free Software Foundation, Inc., 51 Franklin Street,
20  * Boston, MA 02110-1301, USA.
21  */
22 
58 #ifndef INCLUDED_volk_16i_permute_and_scalar_add_a_H
59 #define INCLUDED_volk_16i_permute_and_scalar_add_a_H
60 
61 #include<inttypes.h>
62 #include<stdio.h>
63 
64 #ifdef LV_HAVE_SSE2
65 
66 #include<xmmintrin.h>
67 #include<emmintrin.h>
68 
69 static inline void
70 volk_16i_permute_and_scalar_add_a_sse2(short* target, short* src0, short* permute_indexes,
71  short* cntl0, short* cntl1, short* cntl2, short* cntl3,
72  short* scalars, unsigned int num_points)
73 {
74 
75  const unsigned int num_bytes = num_points*2;
76 
77  __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
78 
79  __m128i *p_target, *p_cntl0, *p_cntl1, *p_cntl2, *p_cntl3, *p_scalars;
80 
81  short* p_permute_indexes = permute_indexes;
82 
83  p_target = (__m128i*)target;
84  p_cntl0 = (__m128i*)cntl0;
85  p_cntl1 = (__m128i*)cntl1;
86  p_cntl2 = (__m128i*)cntl2;
87  p_cntl3 = (__m128i*)cntl3;
88  p_scalars = (__m128i*)scalars;
89 
90  int i = 0;
91 
92  int bound = (num_bytes >> 4);
93  int leftovers = (num_bytes >> 1) & 7;
94 
95  xmm0 = _mm_load_si128(p_scalars);
96 
97  xmm1 = _mm_shufflelo_epi16(xmm0, 0);
98  xmm2 = _mm_shufflelo_epi16(xmm0, 0x55);
99  xmm3 = _mm_shufflelo_epi16(xmm0, 0xaa);
100  xmm4 = _mm_shufflelo_epi16(xmm0, 0xff);
101 
102  xmm1 = _mm_shuffle_epi32(xmm1, 0x00);
103  xmm2 = _mm_shuffle_epi32(xmm2, 0x00);
104  xmm3 = _mm_shuffle_epi32(xmm3, 0x00);
105  xmm4 = _mm_shuffle_epi32(xmm4, 0x00);
106 
107 
108  for(; i < bound; ++i) {
109  xmm0 = _mm_setzero_si128();
110  xmm5 = _mm_setzero_si128();
111  xmm6 = _mm_setzero_si128();
112  xmm7 = _mm_setzero_si128();
113 
114  xmm0 = _mm_insert_epi16(xmm0, src0[p_permute_indexes[0]], 0);
115  xmm5 = _mm_insert_epi16(xmm5, src0[p_permute_indexes[1]], 1);
116  xmm6 = _mm_insert_epi16(xmm6, src0[p_permute_indexes[2]], 2);
117  xmm7 = _mm_insert_epi16(xmm7, src0[p_permute_indexes[3]], 3);
118  xmm0 = _mm_insert_epi16(xmm0, src0[p_permute_indexes[4]], 4);
119  xmm5 = _mm_insert_epi16(xmm5, src0[p_permute_indexes[5]], 5);
120  xmm6 = _mm_insert_epi16(xmm6, src0[p_permute_indexes[6]], 6);
121  xmm7 = _mm_insert_epi16(xmm7, src0[p_permute_indexes[7]], 7);
122 
123  xmm0 = _mm_add_epi16(xmm0, xmm5);
124  xmm6 = _mm_add_epi16(xmm6, xmm7);
125 
126  p_permute_indexes += 8;
127 
128  xmm0 = _mm_add_epi16(xmm0, xmm6);
129 
130  xmm5 = _mm_load_si128(p_cntl0);
131  xmm6 = _mm_load_si128(p_cntl1);
132  xmm7 = _mm_load_si128(p_cntl2);
133 
134  xmm5 = _mm_and_si128(xmm5, xmm1);
135  xmm6 = _mm_and_si128(xmm6, xmm2);
136  xmm7 = _mm_and_si128(xmm7, xmm3);
137 
138  xmm0 = _mm_add_epi16(xmm0, xmm5);
139 
140  xmm5 = _mm_load_si128(p_cntl3);
141 
142  xmm6 = _mm_add_epi16(xmm6, xmm7);
143 
144  p_cntl0 += 1;
145 
146  xmm5 = _mm_and_si128(xmm5, xmm4);
147 
148  xmm0 = _mm_add_epi16(xmm0, xmm6);
149 
150  p_cntl1 += 1;
151  p_cntl2 += 1;
152 
153  xmm0 = _mm_add_epi16(xmm0, xmm5);
154 
155  p_cntl3 += 1;
156 
157  _mm_store_si128(p_target, xmm0);
158 
159  p_target += 1;
160  }
161 
162  for(i = bound * 8; i < (bound * 8) + leftovers; ++i) {
163  target[i] = src0[permute_indexes[i]]
164  + (cntl0[i] & scalars[0])
165  + (cntl1[i] & scalars[1])
166  + (cntl2[i] & scalars[2])
167  + (cntl3[i] & scalars[3]);
168  }
169 }
170 #endif /*LV_HAVE_SSE*/
171 
172 
173 #ifdef LV_HAVE_GENERIC
174 static inline void
175 volk_16i_permute_and_scalar_add_generic(short* target, short* src0, short* permute_indexes,
176  short* cntl0, short* cntl1, short* cntl2, short* cntl3,
177  short* scalars, unsigned int num_points)
178 {
179  const unsigned int num_bytes = num_points*2;
180 
181  int i = 0;
182 
183  int bound = num_bytes >> 1;
184 
185  for(i = 0; i < bound; ++i) {
186  target[i] = src0[permute_indexes[i]]
187  + (cntl0[i] & scalars[0])
188  + (cntl1[i] & scalars[1])
189  + (cntl2[i] & scalars[2])
190  + (cntl3[i] & scalars[3]);
191  }
192 }
193 
194 #endif /*LV_HAVE_GENERIC*/
195 
196 #endif /*INCLUDED_volk_16i_permute_and_scalar_add_a_H*/
static void volk_16i_permute_and_scalar_add_generic(short *target, short *src0, short *permute_indexes, short *cntl0, short *cntl1, short *cntl2, short *cntl3, short *scalars, unsigned int num_points)
Definition: volk_16i_permute_and_scalar_add.h:175
static void volk_16i_permute_and_scalar_add_a_sse2(short *target, short *src0, short *permute_indexes, short *cntl0, short *cntl1, short *cntl2, short *cntl3, short *scalars, unsigned int num_points)
Definition: volk_16i_permute_and_scalar_add.h:70
for i
Definition: volk_config_fixed.tmpl.h:25