diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h index 6a79ffbde..571b1c7e6 100644 --- a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h +++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h @@ -92,25 +92,20 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(lv_16sc_t* resul { __VOLK_ATTR_ALIGNED(16) lv_16sc_t dotProductVector[4]; - //todo dyn mem reg - __m128i* realcacc; __m128i* imagcacc; realcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 imagcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 - __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, results; + __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag; mask_imag = _mm_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); mask_real = _mm_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); for(unsigned int number = 0; number < sse_iters; number++) { - //std::complex memory structure: real part -> reinterpret_cast(a)[2*i] - //imaginery part -> reinterpret_cast(a)[2*i + 1] - // a[127:0]=[a3.i,a3.r,a2.i,a2.r,a1.i,a1.r,a0.i,a0.r] - + // b[127:0]=[a3.i,a3.r,a2.i,a2.r,a1.i,a1.r,a0.i,a0.r] b = _mm_load_si128((__m128i*)_in_common); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg for (int n_vec = 0; n_vec < num_a_vectors; n_vec++) { @@ -121,13 +116,13 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(lv_16sc_t* resul c_sr = _mm_srli_si128(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst. real = _mm_subs_epi16(c, c_sr); - b_sl = _mm_slli_si128(b, 2); // b3.r, b2.i .... - a_sl = _mm_slli_si128(a, 2); // a3.r, a2.i .... + c_sr = _mm_slli_si128(b, 2); // b3.r, b2.i .... + c = _mm_mullo_epi16(a, c_sr); // a3.i*b3.r, .... - imag1 = _mm_mullo_epi16(a, b_sl); // a3.i*b3.r, .... - imag2 = _mm_mullo_epi16(b, a_sl); // b3.i*a3.r, .... + c_sr = _mm_slli_si128(a, 2); // a3.r, a2.i .... + imag = _mm_mullo_epi16(b, c_sr); // b3.i*a3.r, .... - imag = _mm_adds_epi16(imag1, imag2); + imag = _mm_adds_epi16(c, imag); realcacc[n_vec] = _mm_adds_epi16(realcacc[n_vec], real); imagcacc[n_vec] = _mm_adds_epi16(imagcacc[n_vec], imag); @@ -141,9 +136,9 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(lv_16sc_t* resul realcacc[n_vec] = _mm_and_si128(realcacc[n_vec], mask_real); imagcacc[n_vec] = _mm_and_si128(imagcacc[n_vec], mask_imag); - results = _mm_or_si128(realcacc[n_vec], imagcacc[n_vec]); + a = _mm_or_si128(realcacc[n_vec], imagcacc[n_vec]); - _mm_store_si128((__m128i*)dotProductVector, results); // Store the results back into the dot product vector + _mm_store_si128((__m128i*)dotProductVector, a); // Store the results back into the dot product vector dotProduct = lv_cmake(0,0); for (int i = 0; i < 4; ++i) { @@ -166,7 +161,6 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(lv_16sc_t* resul sat_adds16i(lv_cimag(_out[n_vec]), lv_cimag(tmp))); } } - } #endif /* LV_HAVE_SSE2 */ @@ -195,25 +189,20 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_u_sse2(lv_16sc_t* resul { __VOLK_ATTR_ALIGNED(16) lv_16sc_t dotProductVector[4]; - //todo dyn mem reg - __m128i* realcacc; __m128i* imagcacc; realcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 imagcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 - __m128i a,b,c, c_sr, mask_imag, mask_real, real, imag, imag1,imag2, b_sl, a_sl, results; + __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag; mask_imag = _mm_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); mask_real = _mm_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); for(unsigned int number = 0; number < sse_iters; number++) { - //std::complex memory structure: real part -> reinterpret_cast(a)[2*i] - //imaginery part -> reinterpret_cast(a)[2*i + 1] - // a[127:0]=[a3.i,a3.r,a2.i,a2.r,a1.i,a1.r,a0.i,a0.r] - + // b[127:0]=[a3.i,a3.r,a2.i,a2.r,a1.i,a1.r,a0.i,a0.r] b = _mm_loadu_si128((__m128i*)_in_common); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg for (int n_vec = 0; n_vec < num_a_vectors; n_vec++) { @@ -224,13 +213,13 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_u_sse2(lv_16sc_t* resul c_sr = _mm_srli_si128(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst. real = _mm_subs_epi16(c, c_sr); - b_sl = _mm_slli_si128(b, 2); // b3.r, b2.i .... - a_sl = _mm_slli_si128(a, 2); // a3.r, a2.i .... + c_sr = _mm_slli_si128(b, 2); // b3.r, b2.i .... + c = _mm_mullo_epi16(a, c_sr); // a3.i*b3.r, .... - imag1 = _mm_mullo_epi16(a, b_sl); // a3.i*b3.r, .... - imag2 = _mm_mullo_epi16(b, a_sl); // b3.i*a3.r, .... + c_sr = _mm_slli_si128(a, 2); // a3.r, a2.i .... + imag = _mm_mullo_epi16(b, c_sr); // b3.i*a3.r, .... - imag = _mm_adds_epi16(imag1, imag2); + imag = _mm_adds_epi16(c, imag); realcacc[n_vec] = _mm_adds_epi16(realcacc[n_vec], real); imagcacc[n_vec] = _mm_adds_epi16(imagcacc[n_vec], imag); @@ -244,9 +233,9 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_u_sse2(lv_16sc_t* resul realcacc[n_vec] = _mm_and_si128(realcacc[n_vec], mask_real); imagcacc[n_vec] = _mm_and_si128(imagcacc[n_vec], mask_imag); - results = _mm_or_si128(realcacc[n_vec], imagcacc[n_vec]); + a = _mm_or_si128(realcacc[n_vec], imagcacc[n_vec]); - _mm_storeu_si128((__m128i*)dotProductVector, results); // Store the results back into the dot product vector + _mm_store_si128((__m128i*)dotProductVector, a); // Store the results back into the dot product vector dotProduct = lv_cmake(0,0); for (int i = 0; i < 4; ++i) { @@ -269,7 +258,6 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_u_sse2(lv_16sc_t* resul sat_adds16i(lv_cimag(_out[n_vec]), lv_cimag(tmp))); } } - } #endif /* LV_HAVE_SSE2 */ diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic.h index efd75a27f..85f7c3050 100644 --- a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic.h +++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic.h @@ -85,6 +85,9 @@ static inline void volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic_a_sse2(lv_16sc_t* r #endif // SSE2 +#define WORKAROUND 1 +#ifdef WORKAROUND + #ifdef LV_HAVE_SSE2 static inline void volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic_u_sse2(lv_16sc_t* result, const lv_16sc_t* local_code, const lv_16sc_t* in, unsigned int num_points) @@ -105,7 +108,7 @@ static inline void volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic_u_sse2(lv_16sc_t* r } volk_gnsssdr_free(in_a); } - +#endif #endif // SSE2 #ifdef LV_HAVE_NEON diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn.h index 595319533..9e0218eeb 100644 --- a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn.h +++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn.h @@ -115,7 +115,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(lv_16sc_ realcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 imagcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 - __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, results; + __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl; mask_imag = _mm_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); mask_real = _mm_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); @@ -200,8 +200,8 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(lv_16sc_ imag = _mm_adds_epi16(imag1, imag2); - realcacc[n_vec] = _mm_adds_epi16 (realcacc[n_vec], real); - imagcacc[n_vec] = _mm_adds_epi16 (imagcacc[n_vec], imag); + realcacc[n_vec] = _mm_adds_epi16(realcacc[n_vec], real); + imagcacc[n_vec] = _mm_adds_epi16(imagcacc[n_vec], imag); } } @@ -210,9 +210,9 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(lv_16sc_ realcacc[n_vec] = _mm_and_si128(realcacc[n_vec], mask_real); imagcacc[n_vec] = _mm_and_si128(imagcacc[n_vec], mask_imag); - results = _mm_or_si128(realcacc[n_vec], imagcacc[n_vec]); + a = _mm_or_si128(realcacc[n_vec], imagcacc[n_vec]); - _mm_store_si128((__m128i*)dotProductVector, results); // Store the results back into the dot product vector + _mm_store_si128((__m128i*)dotProductVector, a); // Store the results back into the dot product vector dotProduct = lv_cmake(0,0); for (int i = 0; i < 4; ++i) { @@ -280,7 +280,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_ realcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 imagcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 - __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, results; + __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl; mask_imag = _mm_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); mask_real = _mm_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); @@ -305,6 +305,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_ // Phase rotation on operand in_common starts here: pa = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg + __builtin_prefetch(_in_common + 8); //complex 32fc multiplication b=a*two_phase_acc_reg yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di @@ -372,12 +373,12 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_ for (int n_vec = 0; n_vec < num_a_vectors; n_vec++) { - realcacc[n_vec] = _mm_and_si128 (realcacc[n_vec], mask_real); - imagcacc[n_vec] = _mm_and_si128 (imagcacc[n_vec], mask_imag); + realcacc[n_vec] = _mm_and_si128(realcacc[n_vec], mask_real); + imagcacc[n_vec] = _mm_and_si128(imagcacc[n_vec], mask_imag); - results = _mm_or_si128(realcacc[n_vec], imagcacc[n_vec]); + a = _mm_or_si128(realcacc[n_vec], imagcacc[n_vec]); - _mm_storeu_si128((__m128i*)dotProductVector, results); // Store the results back into the dot product vector + _mm_storeu_si128((__m128i*)dotProductVector, a); // Store the results back into the dot product vector dotProduct = lv_cmake(0,0); for (int i = 0; i < 4; ++i) { @@ -528,7 +529,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_neon(lv_16sc_t* for (int n_vec = 0; n_vec < num_a_vectors; n_vec++) { a_val = vld2_s16((int16_t*)&(_in_a[n_vec][number*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg - __builtin_prefetch(&_in_a[n_vec][number*4] + 8); + //__builtin_prefetch(&_in_a[n_vec][number*4] + 8); // multiply the real*real and imag*imag to get real result // a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dotprodxnpuppet_16ic.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dotprodxnpuppet_16ic.h index 828bef488..a6af38b71 100644 --- a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dotprodxnpuppet_16ic.h +++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dotprodxnpuppet_16ic.h @@ -116,7 +116,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dotprodxnpuppet_16ic_u_sse3(lv_1 for(unsigned int n = 0; n < num_a_vectors; n++) { in_a[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment()); - memcpy(in_a[n], in, sizeof(lv_16sc_t) * num_points); + memcpy((lv_16sc_t*)in_a[n], (lv_16sc_t*)in, sizeof(lv_16sc_t) * num_points); } volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(result, local_code, phase_inc[0], phase, (const lv_16sc_t**) in_a, num_a_vectors, num_points);