59 #ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H
60 #define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H
66 #ifdef LV_HAVE_GENERIC
70 const unsigned int num_bytes = num_points*8;
72 float * res = (
float*) result;
73 float * in = (
float*) input;
74 float * tp = (
float*) taps;
75 unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
76 unsigned int isodd = (num_bytes >> 3) &1;
78 float sum0[2] = {0,0};
79 float sum1[2] = {0,0};
82 for(
i = 0;
i < n_2_ccomplex_blocks; ++
i) {
83 sum0[0] += in[0] * tp[0] + in[1] * tp[1];
84 sum0[1] += (-in[0] * tp[1]) + in[1] * tp[0];
85 sum1[0] += in[2] * tp[2] + in[3] * tp[3];
86 sum1[1] += (-in[2] * tp[3]) + in[3] * tp[2];
92 res[0] = sum0[0] + sum1[0];
93 res[1] = sum0[1] + sum1[1];
95 for(
i = 0;
i < isodd; ++
i) {
96 *result += input[(num_bytes >> 3) - 1] *
lv_conj(taps[(num_bytes >> 3) - 1]);
104 #include <immintrin.h>
110 __m256 sum_a_mult_b_real = _mm256_setzero_ps();
111 __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
113 for (
long unsigned i = 0;
i < (num_points & ~3u);
i += 4) {
125 __m256 a = _mm256_loadu_ps((
const float *) &input[
i]);
126 __m256 b = _mm256_loadu_ps((
const float *) &taps[
i]);
127 __m256 b_real = _mm256_moveldup_ps(b);
128 __m256 b_imag = _mm256_movehdup_ps(b);
131 sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
133 sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
137 sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
139 __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
143 sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
145 sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
147 __m128 lower = _mm256_extractf128_ps(sum, 0);
148 _mm_storel_pi((__m64 *) result, lower);
151 for (
long unsigned i = num_points & ~3u;
i < num_points; ++
i) {
162 #include <xmmintrin.h>
163 #include <pmmintrin.h>
169 __m128 sum_a_mult_b_real = _mm_setzero_ps();
170 __m128 sum_a_mult_b_imag = _mm_setzero_ps();
172 for (
long unsigned i = 0;
i < (num_points & ~1u);
i += 2) {
184 __m128 a = _mm_loadu_ps((
const float *) &input[
i]);
185 __m128 b = _mm_loadu_ps((
const float *) &taps[
i]);
186 __m128 b_real = _mm_moveldup_ps(b);
187 __m128 b_imag = _mm_movehdup_ps(b);
190 sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
192 sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
196 sum_a_mult_b_imag = _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag,
197 _MM_SHUFFLE(2, 3, 0, 1));
199 __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
201 sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
203 _mm_storel_pi((__m64 *) result, sum);
206 if (num_points & 1u) {
218 #include <arm_neon.h>
221 unsigned int quarter_points = num_points / 4;
228 float32x4x2_t a_val, b_val, accumulator;
229 float32x4x2_t tmp_imag;
230 accumulator.val[0] = vdupq_n_f32(0);
231 accumulator.val[1] = vdupq_n_f32(0);
233 for(number = 0; number < quarter_points; ++number) {
234 a_val = vld2q_f32((
float*)a_ptr);
235 b_val = vld2q_f32((
float*)b_ptr);
240 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
241 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
244 tmp_imag.val[1] = vmlsq_f32(tmp_imag.val[1], a_val.val[0], b_val.val[1]);
245 tmp_imag.val[0] = vmlaq_f32(tmp_imag.val[0], a_val.val[1], b_val.val[1]);
247 accumulator.val[0] = vaddq_f32(accumulator.val[0], tmp_imag.val[0]);
248 accumulator.val[1] = vaddq_f32(accumulator.val[1], tmp_imag.val[1]);
255 vst2q_f32((
float*)accum_result, accumulator);
256 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
259 for(number = quarter_points*4; number < num_points; ++number) {
260 *result += (*a_ptr++) *
lv_conj(*b_ptr++);
269 #ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H
270 #define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H
278 #include <immintrin.h>
284 __m256 sum_a_mult_b_real = _mm256_setzero_ps();
285 __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
287 for (
long unsigned i = 0;
i < (num_points & ~3u);
i += 4) {
299 __m256 a = _mm256_load_ps((
const float *) &input[
i]);
300 __m256 b = _mm256_load_ps((
const float *) &taps[
i]);
301 __m256 b_real = _mm256_moveldup_ps(b);
302 __m256 b_imag = _mm256_movehdup_ps(b);
305 sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
307 sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
311 sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
313 __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
317 sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
319 sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
321 __m128 lower = _mm256_extractf128_ps(sum, 0);
322 _mm_storel_pi((__m64 *) result, lower);
325 for (
long unsigned i = num_points & ~3u;
i < num_points; ++
i) {
335 #include <xmmintrin.h>
336 #include <pmmintrin.h>
342 __m128 sum_a_mult_b_real = _mm_setzero_ps();
343 __m128 sum_a_mult_b_imag = _mm_setzero_ps();
345 for (
long unsigned i = 0;
i < (num_points & ~1u);
i += 2) {
357 __m128 a = _mm_load_ps((
const float *) &input[
i]);
358 __m128 b = _mm_load_ps((
const float *) &taps[
i]);
359 __m128 b_real = _mm_moveldup_ps(b);
360 __m128 b_imag = _mm_movehdup_ps(b);
363 sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
365 sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
369 sum_a_mult_b_imag = _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag,
370 _MM_SHUFFLE(2, 3, 0, 1));
372 __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
374 sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
376 _mm_storel_pi((__m64 *) result, sum);
379 if (num_points & 1u) {
391 #ifdef LV_HAVE_GENERIC
396 const unsigned int num_bytes = num_points*8;
398 float * res = (
float*) result;
399 float * in = (
float*) input;
400 float * tp = (
float*) taps;
401 unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
402 unsigned int isodd = (num_bytes >> 3) &1;
404 float sum0[2] = {0,0};
405 float sum1[2] = {0,0};
408 for(
i = 0;
i < n_2_ccomplex_blocks; ++
i) {
409 sum0[0] += in[0] * tp[0] + in[1] * tp[1];
410 sum0[1] += (-in[0] * tp[1]) + in[1] * tp[0];
411 sum1[0] += in[2] * tp[2] + in[3] * tp[3];
412 sum1[1] += (-in[2] * tp[3]) + in[3] * tp[2];
418 res[0] = sum0[0] + sum1[0];
419 res[1] = sum0[1] + sum1[1];
421 for(
i = 0;
i < isodd; ++
i) {
422 *result += input[(num_bytes >> 3) - 1] *
lv_conj(taps[(num_bytes >> 3) - 1]);
429 #if LV_HAVE_SSE && LV_HAVE_64
431 static inline void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
433 const unsigned int num_bytes = num_points*8;
435 __VOLK_ATTR_ALIGNED(16) static const uint32_t conjugator[4]= {0x00000000, 0x80000000, 0x00000000, 0x80000000};
439 "# ccomplex_conjugate_dotprod_generic (float* result, const float *input,\n\t"
440 "# const float *taps, unsigned num_bytes)\n\t"
441 "# float sum0 = 0;\n\t"
442 "# float sum1 = 0;\n\t"
443 "# float sum2 = 0;\n\t"
444 "# float sum3 = 0;\n\t"
446 "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
447 "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
448 "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
449 "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
452 "# } while (--n_2_ccomplex_blocks != 0);\n\t"
453 "# result[0] = sum0 + sum2;\n\t"
454 "# result[1] = sum1 + sum3;\n\t"
455 "# TODO: prefetch and better scheduling\n\t"
456 " xor %%r9, %%r9\n\t"
457 " xor %%r10, %%r10\n\t"
458 " movq %[conjugator], %%r9\n\t"
459 " movq %%rcx, %%rax\n\t"
460 " movaps 0(%%r9), %%xmm8\n\t"
461 " movq %%rcx, %%r8\n\t"
462 " movq %[rsi], %%r9\n\t"
463 " movq %[rdx], %%r10\n\t"
464 " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
465 " movaps 0(%%r9), %%xmm0\n\t"
466 " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
467 " movups 0(%%r10), %%xmm2\n\t"
468 " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
470 " xorps %%xmm8, %%xmm2\n\t"
471 " jmp .%=L1_test\n\t"
472 " # 4 taps / loop\n\t"
473 " # something like ?? cycles / loop\n\t"
475 "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
476 "# movaps (%%r9), %%xmmA\n\t"
477 "# movaps (%%r10), %%xmmB\n\t"
478 "# movaps %%xmmA, %%xmmZ\n\t"
479 "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
480 "# mulps %%xmmB, %%xmmA\n\t"
481 "# mulps %%xmmZ, %%xmmB\n\t"
482 "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
483 "# xorps %%xmmPN, %%xmmA\n\t"
484 "# movaps %%xmmA, %%xmmZ\n\t"
485 "# unpcklps %%xmmB, %%xmmA\n\t"
486 "# unpckhps %%xmmB, %%xmmZ\n\t"
487 "# movaps %%xmmZ, %%xmmY\n\t"
488 "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
489 "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
490 "# addps %%xmmZ, %%xmmA\n\t"
491 "# addps %%xmmA, %%xmmC\n\t"
492 "# A=xmm0, B=xmm2, Z=xmm4\n\t"
493 "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
494 " movaps 16(%%r9), %%xmm1\n\t"
495 " movaps %%xmm0, %%xmm4\n\t"
496 " mulps %%xmm2, %%xmm0\n\t"
497 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
498 " movaps 16(%%r10), %%xmm3\n\t"
499 " movaps %%xmm1, %%xmm5\n\t"
500 " xorps %%xmm8, %%xmm3\n\t"
501 " addps %%xmm0, %%xmm6\n\t"
502 " mulps %%xmm3, %%xmm1\n\t"
503 " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
504 " addps %%xmm1, %%xmm6\n\t"
505 " mulps %%xmm4, %%xmm2\n\t"
506 " movaps 32(%%r9), %%xmm0\n\t"
507 " addps %%xmm2, %%xmm7\n\t"
508 " mulps %%xmm5, %%xmm3\n\t"
510 " movaps 32(%%r10), %%xmm2\n\t"
511 " addps %%xmm3, %%xmm7\n\t"
512 " add $32, %%r10\n\t"
513 " xorps %%xmm8, %%xmm2\n\t"
517 " # We've handled the bulk of multiplies up to here.\n\t"
518 " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
519 " # If so, we've got 2 more taps to do.\n\t"
522 " # The count was odd, do 2 more taps.\n\t"
523 " # Note that we've already got mm0/mm2 preloaded\n\t"
524 " # from the main loop.\n\t"
525 " movaps %%xmm0, %%xmm4\n\t"
526 " mulps %%xmm2, %%xmm0\n\t"
527 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
528 " addps %%xmm0, %%xmm6\n\t"
529 " mulps %%xmm4, %%xmm2\n\t"
530 " addps %%xmm2, %%xmm7\n\t"
532 " # neg inversor\n\t"
533 " xorps %%xmm1, %%xmm1\n\t"
534 " mov $0x80000000, %%r9\n\t"
535 " movd %%r9, %%xmm1\n\t"
536 " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
538 " xorps %%xmm1, %%xmm6\n\t"
539 " movaps %%xmm6, %%xmm2\n\t"
540 " unpcklps %%xmm7, %%xmm6\n\t"
541 " unpckhps %%xmm7, %%xmm2\n\t"
542 " movaps %%xmm2, %%xmm3\n\t"
543 " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
544 " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
545 " addps %%xmm2, %%xmm6\n\t"
546 " # xmm6 = r1 i2 r3 i4\n\t"
547 " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
548 " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
549 " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) to memory\n\t"
551 :[rsi]
"r" (input), [rdx]
"r" (taps),
"c" (num_bytes), [rdi]
"r" (result), [conjugator]
"r" (conjugator)
552 :
"rax",
"r8",
"r9",
"r10"
555 int getem = num_bytes % 16;
557 for(; getem > 0; getem -= 8) {
558 *result += (input[(num_bytes >> 3) - 1] *
lv_conj(taps[(num_bytes >> 3) - 1]));
563 #if LV_HAVE_SSE && LV_HAVE_32
564 static inline void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse_32(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
566 const unsigned int num_bytes = num_points*8;
568 __VOLK_ATTR_ALIGNED(16) static const uint32_t conjugator[4]= {0x00000000, 0x80000000, 0x00000000, 0x80000000};
570 int bound = num_bytes >> 4;
571 int leftovers = num_bytes % 16;
576 " #movl %%esp, %%ebp\n\t"
577 " #movl 12(%%ebp), %%eax # input\n\t"
578 " #movl 16(%%ebp), %%edx # taps\n\t"
579 " #movl 20(%%ebp), %%ecx # n_bytes\n\t"
580 " movaps 0(%[conjugator]), %%xmm1\n\t"
581 " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
582 " movaps 0(%[eax]), %%xmm0\n\t"
583 " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
584 " movaps 0(%[edx]), %%xmm2\n\t"
585 " movl %[ecx], (%[out])\n\t"
586 " shrl $5, %[ecx] # ecx = n_2_ccomplex_blocks / 2\n\t"
588 " xorps %%xmm1, %%xmm2\n\t"
589 " jmp .%=L1_test\n\t"
590 " # 4 taps / loop\n\t"
591 " # something like ?? cycles / loop\n\t"
593 "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
594 "# movaps (%[eax]), %%xmmA\n\t"
595 "# movaps (%[edx]), %%xmmB\n\t"
596 "# movaps %%xmmA, %%xmmZ\n\t"
597 "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
598 "# mulps %%xmmB, %%xmmA\n\t"
599 "# mulps %%xmmZ, %%xmmB\n\t"
600 "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
601 "# xorps %%xmmPN, %%xmmA\n\t"
602 "# movaps %%xmmA, %%xmmZ\n\t"
603 "# unpcklps %%xmmB, %%xmmA\n\t"
604 "# unpckhps %%xmmB, %%xmmZ\n\t"
605 "# movaps %%xmmZ, %%xmmY\n\t"
606 "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
607 "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
608 "# addps %%xmmZ, %%xmmA\n\t"
609 "# addps %%xmmA, %%xmmC\n\t"
610 "# A=xmm0, B=xmm2, Z=xmm4\n\t"
611 "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
612 " movaps 16(%[edx]), %%xmm3\n\t"
613 " movaps %%xmm0, %%xmm4\n\t"
614 " xorps %%xmm1, %%xmm3\n\t"
615 " mulps %%xmm2, %%xmm0\n\t"
616 " movaps 16(%[eax]), %%xmm1\n\t"
617 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
618 " movaps %%xmm1, %%xmm5\n\t"
619 " addps %%xmm0, %%xmm6\n\t"
620 " mulps %%xmm3, %%xmm1\n\t"
621 " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
622 " addps %%xmm1, %%xmm6\n\t"
623 " movaps 0(%[conjugator]), %%xmm1\n\t"
624 " mulps %%xmm4, %%xmm2\n\t"
625 " movaps 32(%[eax]), %%xmm0\n\t"
626 " addps %%xmm2, %%xmm7\n\t"
627 " mulps %%xmm5, %%xmm3\n\t"
628 " addl $32, %[eax]\n\t"
629 " movaps 32(%[edx]), %%xmm2\n\t"
630 " addps %%xmm3, %%xmm7\n\t"
631 " xorps %%xmm1, %%xmm2\n\t"
632 " addl $32, %[edx]\n\t"
636 " # We've handled the bulk of multiplies up to here.\n\t"
637 " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
638 " # If so, we've got 2 more taps to do.\n\t"
639 " movl 0(%[out]), %[ecx] # n_2_ccomplex_blocks\n\t"
640 " shrl $4, %[ecx]\n\t"
641 " andl $1, %[ecx]\n\t"
643 " # The count was odd, do 2 more taps.\n\t"
644 " # Note that we've already got mm0/mm2 preloaded\n\t"
645 " # from the main loop.\n\t"
646 " movaps %%xmm0, %%xmm4\n\t"
647 " mulps %%xmm2, %%xmm0\n\t"
648 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
649 " addps %%xmm0, %%xmm6\n\t"
650 " mulps %%xmm4, %%xmm2\n\t"
651 " addps %%xmm2, %%xmm7\n\t"
653 " # neg inversor\n\t"
654 " #movl 8(%%ebp), %[eax] \n\t"
655 " xorps %%xmm1, %%xmm1\n\t"
656 " movl $0x80000000, (%[out])\n\t"
657 " movss (%[out]), %%xmm1\n\t"
658 " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
660 " xorps %%xmm1, %%xmm6\n\t"
661 " movaps %%xmm6, %%xmm2\n\t"
662 " unpcklps %%xmm7, %%xmm6\n\t"
663 " unpckhps %%xmm7, %%xmm2\n\t"
664 " movaps %%xmm2, %%xmm3\n\t"
665 " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
666 " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
667 " addps %%xmm2, %%xmm6\n\t"
668 " # xmm6 = r1 i2 r3 i4\n\t"
669 " #movl 8(%%ebp), %[eax] # @result\n\t"
670 " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
671 " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
672 " movlps %%xmm6, (%[out]) # store low 2x32 bits (complex) to memory\n\t"
675 : [eax]
"r" (input), [edx]
"r" (taps), [ecx]
"r" (num_bytes), [out]
"r" (result), [conjugator]
"r" (conjugator)
678 for(; leftovers > 0; leftovers -= 8) {
679 *result += (input[(bound << 1)] *
lv_conj(taps[(bound << 1)]));