58 #ifndef INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H
59 #define INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H
67 #ifdef LV_HAVE_GENERIC
72 float * res = (
float*) result;
73 float * in = (
float*) input;
74 float * tp = (
float*) taps;
75 unsigned int n_2_ccomplex_blocks = num_points/2;
77 float sum0[2] = {0,0};
78 float sum1[2] = {0,0};
81 for(
i = 0;
i < n_2_ccomplex_blocks; ++
i) {
82 sum0[0] += in[0] * tp[0] - in[1] * tp[1];
83 sum0[1] += in[0] * tp[1] + in[1] * tp[0];
84 sum1[0] += in[2] * tp[2] - in[3] * tp[3];
85 sum1[1] += in[2] * tp[3] + in[3] * tp[2];
91 res[0] = sum0[0] + sum1[0];
92 res[1] = sum0[1] + sum1[1];
96 *result += input[num_points - 1] * taps[num_points - 1];
104 #if LV_HAVE_SSE && LV_HAVE_64
106 static inline void volk_32fc_x2_dot_prod_32fc_u_sse_64(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
108 const unsigned int num_bytes = num_points*8;
109 unsigned int isodd = num_points & 1;
113 "# ccomplex_dotprod_generic (float* result, const float *input,\n\t"
114 "# const float *taps, unsigned num_bytes)\n\t"
115 "# float sum0 = 0;\n\t"
116 "# float sum1 = 0;\n\t"
117 "# float sum2 = 0;\n\t"
118 "# float sum3 = 0;\n\t"
120 "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
121 "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
122 "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
123 "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
126 "# } while (--n_2_ccomplex_blocks != 0);\n\t"
127 "# result[0] = sum0 + sum2;\n\t"
128 "# result[1] = sum1 + sum3;\n\t"
129 "# TODO: prefetch and better scheduling\n\t"
130 " xor %%r9, %%r9\n\t"
131 " xor %%r10, %%r10\n\t"
132 " movq %%rcx, %%rax\n\t"
133 " movq %%rcx, %%r8\n\t"
134 " movq %[rsi], %%r9\n\t"
135 " movq %[rdx], %%r10\n\t"
136 " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
137 " movups 0(%%r9), %%xmm0\n\t"
138 " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
139 " movups 0(%%r10), %%xmm2\n\t"
140 " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
142 " jmp .%=L1_test\n\t"
143 " # 4 taps / loop\n\t"
144 " # something like ?? cycles / loop\n\t"
146 "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
147 "# movups (%%r9), %%xmmA\n\t"
148 "# movups (%%r10), %%xmmB\n\t"
149 "# movups %%xmmA, %%xmmZ\n\t"
150 "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
151 "# mulps %%xmmB, %%xmmA\n\t"
152 "# mulps %%xmmZ, %%xmmB\n\t"
153 "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
154 "# xorps %%xmmPN, %%xmmA\n\t"
155 "# movups %%xmmA, %%xmmZ\n\t"
156 "# unpcklps %%xmmB, %%xmmA\n\t"
157 "# unpckhps %%xmmB, %%xmmZ\n\t"
158 "# movups %%xmmZ, %%xmmY\n\t"
159 "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
160 "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
161 "# addps %%xmmZ, %%xmmA\n\t"
162 "# addps %%xmmA, %%xmmC\n\t"
163 "# A=xmm0, B=xmm2, Z=xmm4\n\t"
164 "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
165 " movups 16(%%r9), %%xmm1\n\t"
166 " movups %%xmm0, %%xmm4\n\t"
167 " mulps %%xmm2, %%xmm0\n\t"
168 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
169 " movups 16(%%r10), %%xmm3\n\t"
170 " movups %%xmm1, %%xmm5\n\t"
171 " addps %%xmm0, %%xmm6\n\t"
172 " mulps %%xmm3, %%xmm1\n\t"
173 " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
174 " addps %%xmm1, %%xmm6\n\t"
175 " mulps %%xmm4, %%xmm2\n\t"
176 " movups 32(%%r9), %%xmm0\n\t"
177 " addps %%xmm2, %%xmm7\n\t"
178 " mulps %%xmm5, %%xmm3\n\t"
180 " movups 32(%%r10), %%xmm2\n\t"
181 " addps %%xmm3, %%xmm7\n\t"
182 " add $32, %%r10\n\t"
186 " # We've handled the bulk of multiplies up to here.\n\t"
187 " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
188 " # If so, we've got 2 more taps to do.\n\t"
191 " # The count was odd, do 2 more taps.\n\t"
192 " # Note that we've already got mm0/mm2 preloaded\n\t"
193 " # from the main loop.\n\t"
194 " movups %%xmm0, %%xmm4\n\t"
195 " mulps %%xmm2, %%xmm0\n\t"
196 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
197 " addps %%xmm0, %%xmm6\n\t"
198 " mulps %%xmm4, %%xmm2\n\t"
199 " addps %%xmm2, %%xmm7\n\t"
201 " # neg inversor\n\t"
202 " xorps %%xmm1, %%xmm1\n\t"
203 " mov $0x80000000, %%r9\n\t"
204 " movd %%r9, %%xmm1\n\t"
205 " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
207 " xorps %%xmm1, %%xmm6\n\t"
208 " movups %%xmm6, %%xmm2\n\t"
209 " unpcklps %%xmm7, %%xmm6\n\t"
210 " unpckhps %%xmm7, %%xmm2\n\t"
211 " movups %%xmm2, %%xmm3\n\t"
212 " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
213 " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
214 " addps %%xmm2, %%xmm6\n\t"
215 " # xmm6 = r1 i2 r3 i4\n\t"
216 " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
217 " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
218 " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) to memory\n\t"
220 :[rsi]
"r" (input), [rdx]
"r" (taps),
"c" (num_bytes), [rdi]
"r" (result)
221 :
"rax",
"r8",
"r9",
"r10"
226 *result += input[num_points - 1] * taps[num_points - 1];
240 #include <pmmintrin.h>
245 memset(&dotProduct, 0x0, 2*
sizeof(
float));
247 unsigned int number = 0;
248 const unsigned int halfPoints = num_points/2;
249 unsigned int isodd = num_points & 1;
251 __m128 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
256 dotProdVal = _mm_setzero_ps();
258 for(;number < halfPoints; number++){
260 x = _mm_loadu_ps((
float*)a);
261 y = _mm_loadu_ps((
float*)b);
263 yl = _mm_moveldup_ps(y);
264 yh = _mm_movehdup_ps(y);
266 tmp1 = _mm_mul_ps(x,yl);
268 x = _mm_shuffle_ps(x,x,0xB1);
270 tmp2 = _mm_mul_ps(x,yh);
272 z = _mm_addsub_ps(tmp1,tmp2);
274 dotProdVal = _mm_add_ps(dotProdVal, z);
282 _mm_storeu_ps((
float*)dotProductVector,dotProdVal);
284 dotProduct += ( dotProductVector[0] + dotProductVector[1] );
287 dotProduct += input[num_points - 1] * taps[num_points - 1];
290 *result = dotProduct;
295 #ifdef LV_HAVE_SSE4_1
297 #include <smmintrin.h>
299 static inline void volk_32fc_x2_dot_prod_32fc_u_sse4_1(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
302 const unsigned int qtr_points = num_points/4;
303 const unsigned int isodd = num_points & 3;
305 __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, real0, real1, im0, im1;
306 float *p_input, *p_taps;
309 p_result = (__m64*)result;
310 p_input = (
float*)input;
311 p_taps = (
float*)taps;
313 static const __m128i neg = {0x000000000000000080000000};
315 real0 = _mm_setzero_ps();
316 real1 = _mm_setzero_ps();
317 im0 = _mm_setzero_ps();
318 im1 = _mm_setzero_ps();
320 for(;
i < qtr_points; ++
i) {
321 xmm0 = _mm_loadu_ps(p_input);
322 xmm1 = _mm_loadu_ps(p_taps);
327 xmm2 = _mm_loadu_ps(p_input);
328 xmm3 = _mm_loadu_ps(p_taps);
333 xmm4 = _mm_unpackhi_ps(xmm0, xmm2);
334 xmm5 = _mm_unpackhi_ps(xmm1, xmm3);
335 xmm0 = _mm_unpacklo_ps(xmm0, xmm2);
336 xmm2 = _mm_unpacklo_ps(xmm1, xmm3);
339 xmm1 = _mm_unpackhi_ps(xmm0, xmm4);
341 xmm3 = _mm_unpacklo_ps(xmm0, xmm4);
343 xmm0 = _mm_unpackhi_ps(xmm2, xmm5);
345 xmm2 = _mm_unpacklo_ps(xmm2, xmm5);
347 xmm4 = _mm_dp_ps(xmm3, xmm2, 0xf1);
348 xmm5 = _mm_dp_ps(xmm1, xmm0, 0xf1);
350 xmm6 = _mm_dp_ps(xmm3, xmm0, 0xf2);
351 xmm7 = _mm_dp_ps(xmm1, xmm2, 0xf2);
353 real0 = _mm_add_ps(xmm4, real0);
354 real1 = _mm_add_ps(xmm5, real1);
355 im0 = _mm_add_ps(xmm6, im0);
356 im1 = _mm_add_ps(xmm7, im1);
361 im0 = _mm_add_ps(im0, im1);
362 real0 = _mm_add_ps(real0, real1);
364 im0 = _mm_add_ps(im0, real0);
366 _mm_storel_pi(p_result, im0);
368 for(
i = num_points-isodd;
i < num_points;
i++) {
369 *result += input[
i] * taps[
i];
377 #include <immintrin.h>
381 unsigned int isodd = num_points & 3;
384 memset(&dotProduct, 0x0, 2*
sizeof(
float));
386 unsigned int number = 0;
387 const unsigned int quarterPoints = num_points / 4;
389 __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
394 dotProdVal = _mm256_setzero_ps();
396 for(;number < quarterPoints; number++){
397 x = _mm256_loadu_ps((
float*)a);
398 y = _mm256_loadu_ps((
float*)b);
400 yl = _mm256_moveldup_ps(y);
401 yh = _mm256_movehdup_ps(y);
403 tmp1 = _mm256_mul_ps(x,yl);
405 x = _mm256_shuffle_ps(x,x,0xB1);
407 tmp2 = _mm256_mul_ps(x,yh);
409 z = _mm256_addsub_ps(tmp1,tmp2);
411 dotProdVal = _mm256_add_ps(dotProdVal, z);
419 _mm256_storeu_ps((
float*)dotProductVector,dotProdVal);
421 dotProduct += ( dotProductVector[0] + dotProductVector[1] + dotProductVector[2] + dotProductVector[3]);
423 for(
i = num_points-isodd;
i < num_points;
i++) {
424 dotProduct += input[
i] * taps[
i];
427 *result = dotProduct;
432 #if LV_HAVE_AVX && LV_HAVE_FMA
433 #include <immintrin.h>
435 static inline void volk_32fc_x2_dot_prod_32fc_u_avx_fma(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
437 unsigned int isodd = num_points & 3;
440 memset(&dotProduct, 0x0, 2*
sizeof(
float));
442 unsigned int number = 0;
443 const unsigned int quarterPoints = num_points / 4;
445 __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
450 dotProdVal = _mm256_setzero_ps();
452 for(;number < quarterPoints; number++){
454 x = _mm256_loadu_ps((
float*)a);
455 y = _mm256_loadu_ps((
float*)b);
457 yl = _mm256_moveldup_ps(y);
458 yh = _mm256_movehdup_ps(y);
462 x = _mm256_shuffle_ps(x,x,0xB1);
464 tmp2 = _mm256_mul_ps(x,yh);
466 z = _mm256_fmaddsub_ps(tmp1, yl,tmp2);
468 dotProdVal = _mm256_add_ps(dotProdVal, z);
476 _mm256_storeu_ps((
float*)dotProductVector,dotProdVal);
478 dotProduct += ( dotProductVector[0] + dotProductVector[1] + dotProductVector[2] + dotProductVector[3]);
480 for(
i = num_points-isodd;
i < num_points;
i++) {
481 dotProduct += input[
i] * taps[
i];
484 *result = dotProduct;
491 #ifndef INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H
492 #define INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H
500 #ifdef LV_HAVE_GENERIC
505 const unsigned int num_bytes = num_points*8;
507 float * res = (
float*) result;
508 float * in = (
float*) input;
509 float * tp = (
float*) taps;
510 unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
512 float sum0[2] = {0,0};
513 float sum1[2] = {0,0};
516 for(
i = 0;
i < n_2_ccomplex_blocks; ++
i) {
517 sum0[0] += in[0] * tp[0] - in[1] * tp[1];
518 sum0[1] += in[0] * tp[1] + in[1] * tp[0];
519 sum1[0] += in[2] * tp[2] - in[3] * tp[3];
520 sum1[1] += in[2] * tp[3] + in[3] * tp[2];
526 res[0] = sum0[0] + sum1[0];
527 res[1] = sum0[1] + sum1[1];
529 if (num_points & 1) {
530 *result += input[num_points - 1] * taps[num_points - 1];
537 #if LV_HAVE_SSE && LV_HAVE_64
540 static inline void volk_32fc_x2_dot_prod_32fc_a_sse_64(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
542 const unsigned int num_bytes = num_points*8;
543 unsigned int isodd = num_points & 1;
547 "# ccomplex_dotprod_generic (float* result, const float *input,\n\t"
548 "# const float *taps, unsigned num_bytes)\n\t"
549 "# float sum0 = 0;\n\t"
550 "# float sum1 = 0;\n\t"
551 "# float sum2 = 0;\n\t"
552 "# float sum3 = 0;\n\t"
554 "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
555 "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
556 "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
557 "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
560 "# } while (--n_2_ccomplex_blocks != 0);\n\t"
561 "# result[0] = sum0 + sum2;\n\t"
562 "# result[1] = sum1 + sum3;\n\t"
563 "# TODO: prefetch and better scheduling\n\t"
564 " xor %%r9, %%r9\n\t"
565 " xor %%r10, %%r10\n\t"
566 " movq %%rcx, %%rax\n\t"
567 " movq %%rcx, %%r8\n\t"
568 " movq %[rsi], %%r9\n\t"
569 " movq %[rdx], %%r10\n\t"
570 " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
571 " movaps 0(%%r9), %%xmm0\n\t"
572 " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
573 " movaps 0(%%r10), %%xmm2\n\t"
574 " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
576 " jmp .%=L1_test\n\t"
577 " # 4 taps / loop\n\t"
578 " # something like ?? cycles / loop\n\t"
580 "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
581 "# movaps (%%r9), %%xmmA\n\t"
582 "# movaps (%%r10), %%xmmB\n\t"
583 "# movaps %%xmmA, %%xmmZ\n\t"
584 "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
585 "# mulps %%xmmB, %%xmmA\n\t"
586 "# mulps %%xmmZ, %%xmmB\n\t"
587 "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
588 "# xorps %%xmmPN, %%xmmA\n\t"
589 "# movaps %%xmmA, %%xmmZ\n\t"
590 "# unpcklps %%xmmB, %%xmmA\n\t"
591 "# unpckhps %%xmmB, %%xmmZ\n\t"
592 "# movaps %%xmmZ, %%xmmY\n\t"
593 "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
594 "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
595 "# addps %%xmmZ, %%xmmA\n\t"
596 "# addps %%xmmA, %%xmmC\n\t"
597 "# A=xmm0, B=xmm2, Z=xmm4\n\t"
598 "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
599 " movaps 16(%%r9), %%xmm1\n\t"
600 " movaps %%xmm0, %%xmm4\n\t"
601 " mulps %%xmm2, %%xmm0\n\t"
602 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
603 " movaps 16(%%r10), %%xmm3\n\t"
604 " movaps %%xmm1, %%xmm5\n\t"
605 " addps %%xmm0, %%xmm6\n\t"
606 " mulps %%xmm3, %%xmm1\n\t"
607 " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
608 " addps %%xmm1, %%xmm6\n\t"
609 " mulps %%xmm4, %%xmm2\n\t"
610 " movaps 32(%%r9), %%xmm0\n\t"
611 " addps %%xmm2, %%xmm7\n\t"
612 " mulps %%xmm5, %%xmm3\n\t"
614 " movaps 32(%%r10), %%xmm2\n\t"
615 " addps %%xmm3, %%xmm7\n\t"
616 " add $32, %%r10\n\t"
620 " # We've handled the bulk of multiplies up to here.\n\t"
621 " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
622 " # If so, we've got 2 more taps to do.\n\t"
625 " # The count was odd, do 2 more taps.\n\t"
626 " # Note that we've already got mm0/mm2 preloaded\n\t"
627 " # from the main loop.\n\t"
628 " movaps %%xmm0, %%xmm4\n\t"
629 " mulps %%xmm2, %%xmm0\n\t"
630 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
631 " addps %%xmm0, %%xmm6\n\t"
632 " mulps %%xmm4, %%xmm2\n\t"
633 " addps %%xmm2, %%xmm7\n\t"
635 " # neg inversor\n\t"
636 " xorps %%xmm1, %%xmm1\n\t"
637 " mov $0x80000000, %%r9\n\t"
638 " movd %%r9, %%xmm1\n\t"
639 " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
641 " xorps %%xmm1, %%xmm6\n\t"
642 " movaps %%xmm6, %%xmm2\n\t"
643 " unpcklps %%xmm7, %%xmm6\n\t"
644 " unpckhps %%xmm7, %%xmm2\n\t"
645 " movaps %%xmm2, %%xmm3\n\t"
646 " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
647 " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
648 " addps %%xmm2, %%xmm6\n\t"
649 " # xmm6 = r1 i2 r3 i4\n\t"
650 " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
651 " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
652 " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) to memory\n\t"
654 :[rsi]
"r" (input), [rdx]
"r" (taps),
"c" (num_bytes), [rdi]
"r" (result)
655 :
"rax",
"r8",
"r9",
"r10"
660 *result += input[num_points - 1] * taps[num_points - 1];
669 #if LV_HAVE_SSE && LV_HAVE_32
671 static inline void volk_32fc_x2_dot_prod_32fc_a_sse_32(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
676 const unsigned int num_bytes = num_points*8;
677 unsigned int isodd = num_points & 1;
682 " #movl %%esp, %%ebp\n\t"
683 " movl 12(%%ebp), %%eax # input\n\t"
684 " movl 16(%%ebp), %%edx # taps\n\t"
685 " movl 20(%%ebp), %%ecx # n_bytes\n\t"
686 " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
687 " movaps 0(%%eax), %%xmm0\n\t"
688 " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
689 " movaps 0(%%edx), %%xmm2\n\t"
690 " shrl $5, %%ecx # ecx = n_2_ccomplex_blocks / 2\n\t"
691 " jmp .%=L1_test\n\t"
692 " # 4 taps / loop\n\t"
693 " # something like ?? cycles / loop\n\t"
695 "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
696 "# movaps (%%eax), %%xmmA\n\t"
697 "# movaps (%%edx), %%xmmB\n\t"
698 "# movaps %%xmmA, %%xmmZ\n\t"
699 "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
700 "# mulps %%xmmB, %%xmmA\n\t"
701 "# mulps %%xmmZ, %%xmmB\n\t"
702 "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
703 "# xorps %%xmmPN, %%xmmA\n\t"
704 "# movaps %%xmmA, %%xmmZ\n\t"
705 "# unpcklps %%xmmB, %%xmmA\n\t"
706 "# unpckhps %%xmmB, %%xmmZ\n\t"
707 "# movaps %%xmmZ, %%xmmY\n\t"
708 "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
709 "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
710 "# addps %%xmmZ, %%xmmA\n\t"
711 "# addps %%xmmA, %%xmmC\n\t"
712 "# A=xmm0, B=xmm2, Z=xmm4\n\t"
713 "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
714 " movaps 16(%%eax), %%xmm1\n\t"
715 " movaps %%xmm0, %%xmm4\n\t"
716 " mulps %%xmm2, %%xmm0\n\t"
717 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
718 " movaps 16(%%edx), %%xmm3\n\t"
719 " movaps %%xmm1, %%xmm5\n\t"
720 " addps %%xmm0, %%xmm6\n\t"
721 " mulps %%xmm3, %%xmm1\n\t"
722 " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
723 " addps %%xmm1, %%xmm6\n\t"
724 " mulps %%xmm4, %%xmm2\n\t"
725 " movaps 32(%%eax), %%xmm0\n\t"
726 " addps %%xmm2, %%xmm7\n\t"
727 " mulps %%xmm5, %%xmm3\n\t"
728 " addl $32, %%eax\n\t"
729 " movaps 32(%%edx), %%xmm2\n\t"
730 " addps %%xmm3, %%xmm7\n\t"
731 " addl $32, %%edx\n\t"
735 " # We've handled the bulk of multiplies up to here.\n\t"
736 " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
737 " # If so, we've got 2 more taps to do.\n\t"
738 " movl 20(%%ebp), %%ecx # n_2_ccomplex_blocks\n\t"
739 " shrl $4, %%ecx\n\t"
740 " andl $1, %%ecx\n\t"
742 " # The count was odd, do 2 more taps.\n\t"
743 " # Note that we've already got mm0/mm2 preloaded\n\t"
744 " # from the main loop.\n\t"
745 " movaps %%xmm0, %%xmm4\n\t"
746 " mulps %%xmm2, %%xmm0\n\t"
747 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
748 " addps %%xmm0, %%xmm6\n\t"
749 " mulps %%xmm4, %%xmm2\n\t"
750 " addps %%xmm2, %%xmm7\n\t"
752 " # neg inversor\n\t"
753 " movl 8(%%ebp), %%eax \n\t"
754 " xorps %%xmm1, %%xmm1\n\t"
755 " movl $0x80000000, (%%eax)\n\t"
756 " movss (%%eax), %%xmm1\n\t"
757 " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
759 " xorps %%xmm1, %%xmm6\n\t"
760 " movaps %%xmm6, %%xmm2\n\t"
761 " unpcklps %%xmm7, %%xmm6\n\t"
762 " unpckhps %%xmm7, %%xmm2\n\t"
763 " movaps %%xmm2, %%xmm3\n\t"
764 " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
765 " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
766 " addps %%xmm2, %%xmm6\n\t"
767 " # xmm6 = r1 i2 r3 i4\n\t"
768 " #movl 8(%%ebp), %%eax # @result\n\t"
769 " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
770 " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
771 " movlps %%xmm6, (%%eax) # store low 2x32 bits (complex) to memory\n\t"
775 :
"eax",
"ecx",
"edx"
779 int getem = num_bytes % 16;
782 *result += (input[num_points - 1] * taps[num_points - 1]);
793 #include <pmmintrin.h>
797 const unsigned int num_bytes = num_points*8;
798 unsigned int isodd = num_points & 1;
801 memset(&dotProduct, 0x0, 2*
sizeof(
float));
803 unsigned int number = 0;
804 const unsigned int halfPoints = num_bytes >> 4;
806 __m128 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
811 dotProdVal = _mm_setzero_ps();
813 for(;number < halfPoints; number++){
815 x = _mm_load_ps((
float*)a);
816 y = _mm_load_ps((
float*)b);
818 yl = _mm_moveldup_ps(y);
819 yh = _mm_movehdup_ps(y);
821 tmp1 = _mm_mul_ps(x,yl);
823 x = _mm_shuffle_ps(x,x,0xB1);
825 tmp2 = _mm_mul_ps(x,yh);
827 z = _mm_addsub_ps(tmp1,tmp2);
829 dotProdVal = _mm_add_ps(dotProdVal, z);
837 _mm_store_ps((
float*)dotProductVector,dotProdVal);
839 dotProduct += ( dotProductVector[0] + dotProductVector[1] );
842 dotProduct += input[num_points - 1] * taps[num_points - 1];
845 *result = dotProduct;
851 #ifdef LV_HAVE_SSE4_1
853 #include <smmintrin.h>
855 static inline void volk_32fc_x2_dot_prod_32fc_a_sse4_1(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
858 const unsigned int qtr_points = num_points/4;
859 const unsigned int isodd = num_points & 3;
861 __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, real0, real1, im0, im1;
862 float *p_input, *p_taps;
865 static const __m128i neg = {0x000000000000000080000000};
867 p_result = (__m64*)result;
868 p_input = (
float*)input;
869 p_taps = (
float*)taps;
871 real0 = _mm_setzero_ps();
872 real1 = _mm_setzero_ps();
873 im0 = _mm_setzero_ps();
874 im1 = _mm_setzero_ps();
876 for(;
i < qtr_points; ++
i) {
877 xmm0 = _mm_load_ps(p_input);
878 xmm1 = _mm_load_ps(p_taps);
883 xmm2 = _mm_load_ps(p_input);
884 xmm3 = _mm_load_ps(p_taps);
889 xmm4 = _mm_unpackhi_ps(xmm0, xmm2);
890 xmm5 = _mm_unpackhi_ps(xmm1, xmm3);
891 xmm0 = _mm_unpacklo_ps(xmm0, xmm2);
892 xmm2 = _mm_unpacklo_ps(xmm1, xmm3);
895 xmm1 = _mm_unpackhi_ps(xmm0, xmm4);
897 xmm3 = _mm_unpacklo_ps(xmm0, xmm4);
899 xmm0 = _mm_unpackhi_ps(xmm2, xmm5);
901 xmm2 = _mm_unpacklo_ps(xmm2, xmm5);
903 xmm4 = _mm_dp_ps(xmm3, xmm2, 0xf1);
904 xmm5 = _mm_dp_ps(xmm1, xmm0, 0xf1);
906 xmm6 = _mm_dp_ps(xmm3, xmm0, 0xf2);
907 xmm7 = _mm_dp_ps(xmm1, xmm2, 0xf2);
909 real0 = _mm_add_ps(xmm4, real0);
910 real1 = _mm_add_ps(xmm5, real1);
911 im0 = _mm_add_ps(xmm6, im0);
912 im1 = _mm_add_ps(xmm7, im1);
917 im0 = _mm_add_ps(im0, im1);
918 real0 = _mm_add_ps(real0, real1);
920 im0 = _mm_add_ps(im0, real0);
922 _mm_storel_pi(p_result, im0);
924 for(
i = num_points-isodd;
i < num_points;
i++) {
925 *result += input[
i] * taps[
i];
932 #include <arm_neon.h>
936 unsigned int quarter_points = num_points / 4;
943 float32x4x2_t a_val, b_val, c_val, accumulator;
944 float32x4x2_t tmp_real, tmp_imag;
945 accumulator.val[0] = vdupq_n_f32(0);
946 accumulator.val[1] = vdupq_n_f32(0);
948 for(number = 0; number < quarter_points; ++number) {
949 a_val = vld2q_f32((
float*)a_ptr);
950 b_val = vld2q_f32((
float*)b_ptr);
956 tmp_real.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
958 tmp_real.val[1] = vmulq_f32(a_val.val[1], b_val.val[1]);
962 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[1]);
964 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
966 c_val.val[0] = vsubq_f32(tmp_real.val[0], tmp_real.val[1]);
967 c_val.val[1] = vaddq_f32(tmp_imag.val[0], tmp_imag.val[1]);
969 accumulator.val[0] = vaddq_f32(accumulator.val[0], c_val.val[0]);
970 accumulator.val[1] = vaddq_f32(accumulator.val[1], c_val.val[1]);
976 vst2q_f32((
float*)accum_result, accumulator);
977 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
980 for(number = quarter_points*4; number < num_points; ++number) {
981 *result += (*a_ptr++) * (*b_ptr++);
988 #include <arm_neon.h>
991 unsigned int quarter_points = num_points / 4;
998 float32x4x2_t a_val, b_val, accumulator;
999 float32x4x2_t tmp_imag;
1000 accumulator.val[0] = vdupq_n_f32(0);
1001 accumulator.val[1] = vdupq_n_f32(0);
1003 for(number = 0; number < quarter_points; ++number) {
1004 a_val = vld2q_f32((
float*)a_ptr);
1005 b_val = vld2q_f32((
float*)b_ptr);
1010 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
1011 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
1014 tmp_imag.val[1] = vmlaq_f32(tmp_imag.val[1], a_val.val[0], b_val.val[1]);
1015 tmp_imag.val[0] = vmlsq_f32(tmp_imag.val[0], a_val.val[1], b_val.val[1]);
1017 accumulator.val[0] = vaddq_f32(accumulator.val[0], tmp_imag.val[0]);
1018 accumulator.val[1] = vaddq_f32(accumulator.val[1], tmp_imag.val[1]);
1025 vst2q_f32((
float*)accum_result, accumulator);
1026 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1029 for(number = quarter_points*4; number < num_points; ++number) {
1030 *result += (*a_ptr++) * (*b_ptr++);
1039 unsigned int quarter_points = num_points / 4;
1040 unsigned int number;
1046 float32x4x2_t a_val, b_val, accumulator1, accumulator2;
1047 accumulator1.val[0] = vdupq_n_f32(0);
1048 accumulator1.val[1] = vdupq_n_f32(0);
1049 accumulator2.val[0] = vdupq_n_f32(0);
1050 accumulator2.val[1] = vdupq_n_f32(0);
1052 for(number = 0; number < quarter_points; ++number) {
1053 a_val = vld2q_f32((
float*)a_ptr);
1054 b_val = vld2q_f32((
float*)b_ptr);
1059 accumulator1.val[0] = vmlaq_f32(accumulator1.val[0], a_val.val[0], b_val.val[0]);
1060 accumulator1.val[1] = vmlaq_f32(accumulator1.val[1], a_val.val[0], b_val.val[1]);
1061 accumulator2.val[0] = vmlsq_f32(accumulator2.val[0], a_val.val[1], b_val.val[1]);
1062 accumulator2.val[1] = vmlaq_f32(accumulator2.val[1], a_val.val[1], b_val.val[0]);
1067 accumulator1.val[0] = vaddq_f32(accumulator1.val[0], accumulator2.val[0]);
1068 accumulator1.val[1] = vaddq_f32(accumulator1.val[1], accumulator2.val[1]);
1070 vst2q_f32((
float*)accum_result, accumulator1);
1071 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1074 for(number = quarter_points*4; number < num_points; ++number) {
1075 *result += (*a_ptr++) * (*b_ptr++);
1085 unsigned int quarter_points = num_points / 8;
1086 unsigned int number;
1092 float32x4x4_t a_val, b_val, accumulator1, accumulator2;
1093 float32x4x2_t reduced_accumulator;
1094 accumulator1.val[0] = vdupq_n_f32(0);
1095 accumulator1.val[1] = vdupq_n_f32(0);
1096 accumulator1.val[2] = vdupq_n_f32(0);
1097 accumulator1.val[3] = vdupq_n_f32(0);
1098 accumulator2.val[0] = vdupq_n_f32(0);
1099 accumulator2.val[1] = vdupq_n_f32(0);
1100 accumulator2.val[2] = vdupq_n_f32(0);
1101 accumulator2.val[3] = vdupq_n_f32(0);
1104 for(number = 0; number < quarter_points; ++number) {
1105 a_val = vld4q_f32((
float*)a_ptr);
1106 b_val = vld4q_f32((
float*)b_ptr);
1111 accumulator1.val[0] = vmlaq_f32(accumulator1.val[0], a_val.val[0], b_val.val[0]);
1112 accumulator1.val[1] = vmlaq_f32(accumulator1.val[1], a_val.val[0], b_val.val[1]);
1114 accumulator1.val[2] = vmlaq_f32(accumulator1.val[2], a_val.val[2], b_val.val[2]);
1115 accumulator1.val[3] = vmlaq_f32(accumulator1.val[3], a_val.val[2], b_val.val[3]);
1117 accumulator2.val[0] = vmlsq_f32(accumulator2.val[0], a_val.val[1], b_val.val[1]);
1118 accumulator2.val[1] = vmlaq_f32(accumulator2.val[1], a_val.val[1], b_val.val[0]);
1120 accumulator2.val[2] = vmlsq_f32(accumulator2.val[2], a_val.val[3], b_val.val[3]);
1121 accumulator2.val[3] = vmlaq_f32(accumulator2.val[3], a_val.val[3], b_val.val[2]);
1127 accumulator1.val[0] = vaddq_f32(accumulator1.val[0], accumulator1.val[2]);
1128 accumulator1.val[1] = vaddq_f32(accumulator1.val[1], accumulator1.val[3]);
1129 accumulator2.val[0] = vaddq_f32(accumulator2.val[0], accumulator2.val[2]);
1130 accumulator2.val[1] = vaddq_f32(accumulator2.val[1], accumulator2.val[3]);
1131 reduced_accumulator.val[0] = vaddq_f32(accumulator1.val[0], accumulator2.val[0]);
1132 reduced_accumulator.val[1] = vaddq_f32(accumulator1.val[1], accumulator2.val[1]);
1135 vst2q_f32((
float*)accum_result, reduced_accumulator);
1136 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1139 for(number = quarter_points*8; number < num_points; ++number) {
1140 *result += (*a_ptr++) * (*b_ptr++);
1149 #include <immintrin.h>
1153 unsigned int isodd = num_points & 3;
1156 memset(&dotProduct, 0x0, 2*
sizeof(
float));
1158 unsigned int number = 0;
1159 const unsigned int quarterPoints = num_points / 4;
1161 __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
1166 dotProdVal = _mm256_setzero_ps();
1168 for(;number < quarterPoints; number++){
1170 x = _mm256_load_ps((
float*)a);
1171 y = _mm256_load_ps((
float*)b);
1173 yl = _mm256_moveldup_ps(y);
1174 yh = _mm256_movehdup_ps(y);
1176 tmp1 = _mm256_mul_ps(x,yl);
1178 x = _mm256_shuffle_ps(x,x,0xB1);
1180 tmp2 = _mm256_mul_ps(x,yh);
1182 z = _mm256_addsub_ps(tmp1,tmp2);
1184 dotProdVal = _mm256_add_ps(dotProdVal, z);
1192 _mm256_store_ps((
float*)dotProductVector,dotProdVal);
1194 dotProduct += ( dotProductVector[0] + dotProductVector[1] + dotProductVector[2] + dotProductVector[3]);
1196 for(
i = num_points-isodd;
i < num_points;
i++) {
1197 dotProduct += input[
i] * taps[
i];
1200 *result = dotProduct;
1205 #if LV_HAVE_AVX && LV_HAVE_FMA
1206 #include <immintrin.h>
1208 static inline void volk_32fc_x2_dot_prod_32fc_a_avx_fma(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
1210 unsigned int isodd = num_points & 3;
1213 memset(&dotProduct, 0x0, 2*
sizeof(
float));
1215 unsigned int number = 0;
1216 const unsigned int quarterPoints = num_points / 4;
1218 __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
1223 dotProdVal = _mm256_setzero_ps();
1225 for(;number < quarterPoints; number++){
1227 x = _mm256_load_ps((
float*)a);
1228 y = _mm256_load_ps((
float*)b);
1230 yl = _mm256_moveldup_ps(y);
1231 yh = _mm256_movehdup_ps(y);
1235 x = _mm256_shuffle_ps(x,x,0xB1);
1237 tmp2 = _mm256_mul_ps(x,yh);
1239 z = _mm256_fmaddsub_ps(tmp1, yl,tmp2);
1241 dotProdVal = _mm256_add_ps(dotProdVal, z);
1249 _mm256_store_ps((
float*)dotProductVector,dotProdVal);
1251 dotProduct += ( dotProductVector[0] + dotProductVector[1] + dotProductVector[2] + dotProductVector[3]);
1253 for(
i = num_points-isodd;
i < num_points;
i++) {
1254 dotProduct += input[
i] * taps[
i];
1257 *result = dotProduct;