diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_s32fc_x2_rotator_16ic.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_s32fc_x2_rotator_16ic.h index c86b46068..21b54779d 100644 --- a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_s32fc_x2_rotator_16ic.h +++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_s32fc_x2_rotator_16ic.h @@ -79,11 +79,11 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_generic(lv_16sc_t* ou static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_a_sse3(lv_16sc_t* outVector, const lv_16sc_t* inVector, const lv_32fc_t phase_inc, lv_32fc_t* phase, unsigned int num_points) { const unsigned int sse_iters = num_points / 4; - __m128 a,b, two_phase_acc_reg,two_phase_inc_reg; - __m128i c1,c2,result; + __m128 a, b, two_phase_acc_reg, two_phase_inc_reg; + __m128i c1, c2, result; __attribute__((aligned(16))) lv_32fc_t two_phase_inc[2]; - two_phase_inc[0] = phase_inc*phase_inc; - two_phase_inc[1] = phase_inc*phase_inc; + two_phase_inc[0] = phase_inc * phase_inc; + two_phase_inc[1] = phase_inc * phase_inc; two_phase_inc_reg = _mm_load_ps((float*) two_phase_inc); __attribute__((aligned(16))) lv_32fc_t two_phase_acc[2]; two_phase_acc[0] = (*phase); @@ -95,6 +95,8 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_a_sse3(lv_16sc_t* out lv_16sc_t* _out = outVector; __m128 yl, yh, tmp1, tmp2, tmp3; + lv_16sc_t tmp16; + lv_32fc_t tmp32; for(unsigned int number = 0; number < sse_iters; number++) { @@ -105,7 +107,7 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_a_sse3(lv_16sc_t* out tmp1 = _mm_mul_ps(a, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr a = _mm_shuffle_ps(a, a, 0xB1); // Re-arrange x to be ai,ar,bi,br tmp2 = _mm_mul_ps(a, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di - b=_mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di + b = _mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di c1 = _mm_cvtps_epi32(b); // convert from 32fc to 32ic //complex 32fc multiplication two_phase_acc_reg=two_phase_acc_reg*two_phase_inc_reg @@ -114,7 +116,7 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_a_sse3(lv_16sc_t* out tmp1 = _mm_mul_ps(two_phase_inc_reg, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr tmp3 = _mm_shuffle_ps(two_phase_inc_reg, two_phase_inc_reg, 0xB1); // Re-arrange x to be ai,ar,bi,br tmp2 = _mm_mul_ps(tmp3, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di - two_phase_acc_reg=_mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di + two_phase_acc_reg = _mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di //next two samples _in += 2; @@ -125,7 +127,7 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_a_sse3(lv_16sc_t* out tmp1 = _mm_mul_ps(a, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr a = _mm_shuffle_ps(a, a, 0xB1); // Re-arrange x to be ai,ar,bi,br tmp2 = _mm_mul_ps(a, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di - b=_mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di + b = _mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di c2 = _mm_cvtps_epi32(b); // convert from 32fc to 32ic //complex 32fc multiplication two_phase_acc_reg=two_phase_acc_reg*two_phase_inc_reg @@ -134,7 +136,7 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_a_sse3(lv_16sc_t* out tmp1 = _mm_mul_ps(two_phase_inc_reg, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr tmp3 = _mm_shuffle_ps(two_phase_inc_reg, two_phase_inc_reg, 0xB1); // Re-arrange x to be ai,ar,bi,br tmp2 = _mm_mul_ps(tmp3, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di - two_phase_acc_reg=_mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di + two_phase_acc_reg = _mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di // store four output samples result = _mm_packs_epi32(c1, c2);// convert from 32ic to 16ic @@ -146,9 +148,8 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_a_sse3(lv_16sc_t* out } _mm_storeu_ps((float*)two_phase_acc, two_phase_acc_reg); - (*phase) = lv_cmake(two_phase_acc[0], two_phase_acc[0]) * phase_inc; - lv_16sc_t tmp16; - lv_32fc_t tmp32; + (*phase) = lv_cmake(two_phase_acc[0], two_phase_acc[0]); + for (unsigned int i = sse_iters * 4; i < num_points; ++i) { tmp16 = *_in++; @@ -166,15 +167,15 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_a_sse3(lv_16sc_t* out static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_u_sse3(lv_16sc_t* outVector, const lv_16sc_t* inVector, const lv_32fc_t phase_inc, lv_32fc_t* phase, unsigned int num_points) { const unsigned int sse_iters = num_points / 4; - __m128 a,b, two_phase_acc_reg,two_phase_inc_reg; - __m128i c1,c2,result; + __m128 a, b, two_phase_acc_reg, two_phase_inc_reg; + __m128i c1, c2, result; __attribute__((aligned(16))) lv_32fc_t two_phase_inc[2]; - two_phase_inc[0] = phase_inc*phase_inc; - two_phase_inc[1] = phase_inc*phase_inc; + two_phase_inc[0] = phase_inc * phase_inc; + two_phase_inc[1] = phase_inc * phase_inc; two_phase_inc_reg = _mm_load_ps((float*) two_phase_inc); __attribute__((aligned(16))) lv_32fc_t two_phase_acc[2]; two_phase_acc[0] = (*phase); - two_phase_acc[1] = (*phase)*phase_inc; + two_phase_acc[1] = (*phase) * phase_inc; two_phase_acc_reg = _mm_load_ps((float*) two_phase_acc); const lv_16sc_t* _in = inVector; @@ -182,6 +183,8 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_u_sse3(lv_16sc_t* out lv_16sc_t* _out = outVector; __m128 yl, yh, tmp1, tmp2, tmp3; + lv_16sc_t tmp16; + lv_32fc_t tmp32; for(unsigned int number = 0; number < sse_iters; number++) { @@ -192,7 +195,7 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_u_sse3(lv_16sc_t* out tmp1 = _mm_mul_ps(a, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr a = _mm_shuffle_ps(a, a, 0xB1); // Re-arrange x to be ai,ar,bi,br tmp2 = _mm_mul_ps(a, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di - b=_mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di + b = _mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di c1 = _mm_cvtps_epi32(b); // convert from 32fc to 32ic //complex 32fc multiplication two_phase_acc_reg=two_phase_acc_reg*two_phase_inc_reg @@ -201,7 +204,7 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_u_sse3(lv_16sc_t* out tmp1 = _mm_mul_ps(two_phase_inc_reg, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr tmp3 = _mm_shuffle_ps(two_phase_inc_reg, two_phase_inc_reg, 0xB1); // Re-arrange x to be ai,ar,bi,br tmp2 = _mm_mul_ps(tmp3, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di - two_phase_acc_reg=_mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di + two_phase_acc_reg = _mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di //next two samples _in += 2; @@ -212,7 +215,7 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_u_sse3(lv_16sc_t* out tmp1 = _mm_mul_ps(a, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr a = _mm_shuffle_ps(a, a, 0xB1); // Re-arrange x to be ai,ar,bi,br tmp2 = _mm_mul_ps(a, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di - b=_mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di + b = _mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di c2 = _mm_cvtps_epi32(b); // convert from 32fc to 32ic //complex 32fc multiplication two_phase_acc_reg=two_phase_acc_reg*two_phase_inc_reg @@ -233,9 +236,8 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_u_sse3(lv_16sc_t* out } _mm_storeu_ps((float*)two_phase_acc, two_phase_acc_reg); - (*phase) = lv_cmake(two_phase_acc[0], two_phase_acc[0]) * phase_inc; - lv_16sc_t tmp16; - lv_32fc_t tmp32; + (*phase) = lv_cmake(two_phase_acc[0], two_phase_acc[0]); + for (unsigned int i = sse_iters * 4; i < num_points; ++i) { tmp16 = *_in++; @@ -261,8 +263,8 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_neon(lv_16sc_t* outVe lv_16sc_t* _out = outVector; lv_32fc_t ___phase4 = phase_inc * phase_inc * phase_inc * phase_inc; - float32_t __phase4_real[4] = { lv_creal(___phase4), lv_creal(___phase4), lv_creal(___phase4), lv_creal(___phase4) }; - float32_t __phase4_imag[4] = { lv_cimag(___phase4), lv_cimag(___phase4), lv_cimag(___phase4), lv_cimag(___phase4) }; + __VOLK_ATTR_ALIGNED(16) float32_t __phase4_real[4] = { lv_creal(___phase4), lv_creal(___phase4), lv_creal(___phase4), lv_creal(___phase4) }; + __VOLK_ATTR_ALIGNED(16) float32_t __phase4_imag[4] = { lv_cimag(___phase4), lv_cimag(___phase4), lv_cimag(___phase4), lv_cimag(___phase4) }; float32x4_t _phase4_real = vld1q_f32(__phase4_real); float32x4_t _phase4_imag = vld1q_f32(__phase4_imag); @@ -271,8 +273,8 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_neon(lv_16sc_t* outVe lv_32fc_t phase3 = phase2 * phase_inc; lv_32fc_t phase4 = phase3 * phase_inc; - float32_t __phase_real[4] = { lv_creal((*phase)), lv_creal(phase2), lv_creal(phase3), lv_creal(phase4) }; - float32_t __phase_imag[4] = { lv_cimag((*phase)), lv_cimag(phase2), lv_cimag(phase3), lv_cimag(phase4) }; + __VOLK_ATTR_ALIGNED(16) float32_t __phase_real[4] = { lv_creal((*phase)), lv_creal(phase2), lv_creal(phase3), lv_creal(phase4) }; + __VOLK_ATTR_ALIGNED(16) float32_t __phase_imag[4] = { lv_cimag((*phase)), lv_cimag(phase2), lv_cimag(phase3), lv_cimag(phase4) }; float32x4_t _phase_real = vld1q_f32(__phase_real); float32x4_t _phase_imag = vld1q_f32(__phase_imag); @@ -288,7 +290,9 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_neon(lv_16sc_t* outVe for(; i < neon_iters; ++i) { /* load 4 complex numbers (int 16 bits each component) */ - tmp16 = vld2_s16((int16_t*)_in); _in += 4; + tmp16 = vld2_s16((int16_t*)_in); + __builtin_prefetch(_in + 8); + _in += 4; /* promote them to int 32 bits */ tmp32i.val[0] = vmovl_s16(tmp16.val[0]); @@ -339,7 +343,7 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_neon(lv_16sc_t* outVe vst1q_f32((float32_t*)__phase_real, _phase_real); vst1q_f32((float32_t*)__phase_imag, _phase_imag); - (*phase) = lv_cmake((float32_t)__phase_real[3], (float32_t)__phase_imag[3]) * phase_inc; + (*phase) = lv_cmake((float32_t)__phase_real[0], (float32_t)__phase_imag[0]); } for(i = 0; i < neon_iters % 4; ++i) { diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic.h index b48ab17bc..344d1c73e 100644 --- a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic.h +++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic.h @@ -77,6 +77,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_a_sse2(lv_16sc_t* out, con if (sse_iters > 0) { __m128i a,b,c, c_sr, mask_imag, mask_real, real, imag, imag1,imag2, b_sl, a_sl, realcacc, imagcacc, result; + __VOLK_ATTR_ALIGNED(16) lv_16sc_t dotProductVector[4]; realcacc = _mm_setzero_si128(); imagcacc = _mm_setzero_si128(); @@ -116,8 +117,6 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_a_sse2(lv_16sc_t* out, con result = _mm_or_si128 (realcacc, imagcacc); - __VOLK_ATTR_ALIGNED(16) lv_16sc_t dotProductVector[4]; - _mm_store_si128((__m128i*)dotProductVector,result); // Store the results back into the dot product vector for (int i = 0; i < 4; ++i) @@ -154,6 +153,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_u_sse2(lv_16sc_t* out, con if (sse_iters > 0) { __m128i a,b,c, c_sr, mask_imag, mask_real, real, imag, imag1,imag2, b_sl, a_sl, realcacc, imagcacc, result; + __VOLK_ATTR_ALIGNED(16) lv_16sc_t dotProductVector[4]; realcacc = _mm_setzero_si128(); imagcacc = _mm_setzero_si128(); @@ -193,8 +193,6 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_u_sse2(lv_16sc_t* out, con result = _mm_or_si128 (realcacc, imagcacc); - __VOLK_ATTR_ALIGNED(16) lv_16sc_t dotProductVector[4]; - _mm_storeu_si128((__m128i*)dotProductVector,result); // Store the results back into the dot product vector for (int i = 0; i < 4; ++i) @@ -227,7 +225,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_neon(lv_16sc_t* out, const // 2nd lane holds the imaginary part int16x4x2_t a_val, b_val, c_val, accumulator; int16x4x2_t tmp_real, tmp_imag; - lv_16sc_t accum_result[4]; + __VOLK_ATTR_ALIGNED(16) lv_16sc_t accum_result[4]; accumulator.val[0] = vdup_n_s16(0); accumulator.val[1] = vdup_n_s16(0);