diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h index 614453f30..6a79ffbde 100644 --- a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h +++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h @@ -78,7 +78,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_generic(lv_16sc_t* resu \param[in] num_a_vectors Number of vectors to be multiplied by the reference vector and accumulated \param[in] num_points The Number of complex values to be multiplied together, accumulated and stored into result */ -static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(lv_16sc_t* out, const lv_16sc_t* in_common, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) +static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(lv_16sc_t* result, const lv_16sc_t* in_common, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) { lv_16sc_t dotProduct = lv_cmake(0,0); @@ -86,7 +86,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(lv_16sc_t* out, const lv_16sc_t** _in_a = in_a; const lv_16sc_t* _in_common = in_common; - lv_16sc_t* _out = out; + lv_16sc_t* _out = result; if (sse_iters > 0) { @@ -100,7 +100,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(lv_16sc_t* out, realcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 imagcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 - __m128i a,b,c, c_sr, mask_imag, mask_real, real, imag, imag1,imag2, b_sl, a_sl, result; + __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, results; mask_imag = _mm_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); mask_real = _mm_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); @@ -116,10 +116,10 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(lv_16sc_t* out, { a = _mm_load_si128((__m128i*)&(_in_a[n_vec][number*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg - c = _mm_mullo_epi16 (a, b); // a3.i*b3.i, a3.r*b3.r, .... + c = _mm_mullo_epi16(a, b); // a3.i*b3.i, a3.r*b3.r, .... - c_sr = _mm_srli_si128 (c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst. - real = _mm_subs_epi16 (c, c_sr); + c_sr = _mm_srli_si128(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst. + real = _mm_subs_epi16(c, c_sr); b_sl = _mm_slli_si128(b, 2); // b3.r, b2.i .... a_sl = _mm_slli_si128(a, 2); // a3.r, a2.i .... @@ -129,23 +129,23 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(lv_16sc_t* out, imag = _mm_adds_epi16(imag1, imag2); - realcacc[n_vec] = _mm_adds_epi16 (realcacc[n_vec], real); - imagcacc[n_vec] = _mm_adds_epi16 (imagcacc[n_vec], imag); + realcacc[n_vec] = _mm_adds_epi16(realcacc[n_vec], real); + imagcacc[n_vec] = _mm_adds_epi16(imagcacc[n_vec], imag); } _in_common += 4; } - for (int n_vec=0;n_vec 0) { @@ -203,7 +203,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_u_sse2(lv_16sc_t* out, realcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 imagcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 - __m128i a,b,c, c_sr, mask_imag, mask_real, real, imag, imag1,imag2, b_sl, a_sl, result; + __m128i a,b,c, c_sr, mask_imag, mask_real, real, imag, imag1,imag2, b_sl, a_sl, results; mask_imag = _mm_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); mask_real = _mm_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); @@ -219,10 +219,10 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_u_sse2(lv_16sc_t* out, { a = _mm_loadu_si128((__m128i*)&(_in_a[n_vec][number*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg - c = _mm_mullo_epi16 (a, b); // a3.i*b3.i, a3.r*b3.r, .... + c = _mm_mullo_epi16(a, b); // a3.i*b3.i, a3.r*b3.r, .... - c_sr = _mm_srli_si128 (c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst. - real = _mm_subs_epi16 (c, c_sr); + c_sr = _mm_srli_si128(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst. + real = _mm_subs_epi16(c, c_sr); b_sl = _mm_slli_si128(b, 2); // b3.r, b2.i .... a_sl = _mm_slli_si128(a, 2); // a3.r, a2.i .... @@ -239,16 +239,16 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_u_sse2(lv_16sc_t* out, _in_common += 4; } - for (int n_vec=0;n_vec 0) { @@ -319,7 +319,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_neon(lv_16sc_t* out, co for (int n_vec = 0; n_vec < num_a_vectors; n_vec++) { a_val = vld2_s16((int16_t*)&(_in_a[n_vec][number*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg - //__builtin_prefetch(_in_a[n_vec] + 8); + //__builtin_prefetch(&_in_a[n_vec][number*4] + 8); // multiply the real*real and imag*imag to get real result // a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn.h index 3f4512bfd..4e76413c7 100644 --- a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn.h +++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn.h @@ -93,7 +93,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_generic(lv_16sc \param[in] num_a_vectors Number of vectors to be multiplied by the reference vector and accumulated \param[in] num_points The Number of complex values to be multiplied together, accumulated and stored into result */ -static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(lv_16sc_t* out, const lv_16sc_t* in_common, const lv_32fc_t phase_inc, lv_32fc_t* phase, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) +static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(lv_16sc_t* result, const lv_16sc_t* in_common, const lv_32fc_t phase_inc, lv_32fc_t* phase, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) { lv_16sc_t dotProduct = lv_cmake(0,0); @@ -101,7 +101,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(lv_16sc_ const lv_16sc_t** _in_a = in_a; const lv_16sc_t* _in_common = in_common; - lv_16sc_t* _out = out; + lv_16sc_t* _out = result; __VOLK_ATTR_ALIGNED(16) lv_16sc_t dotProductVector[4]; @@ -113,7 +113,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(lv_16sc_ realcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 imagcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 - __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1,imag2, b_sl, a_sl, result; + __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, results; mask_imag = _mm_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); mask_real = _mm_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); @@ -208,9 +208,9 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(lv_16sc_ realcacc[n_vec] = _mm_and_si128(realcacc[n_vec], mask_real); imagcacc[n_vec] = _mm_and_si128(imagcacc[n_vec], mask_imag); - result = _mm_or_si128(realcacc[n_vec], imagcacc[n_vec]); + results = _mm_or_si128(realcacc[n_vec], imagcacc[n_vec]); - _mm_store_si128((__m128i*)dotProductVector, result); // Store the results back into the dot product vector + _mm_store_si128((__m128i*)dotProductVector, results); // Store the results back into the dot product vector dotProduct = lv_cmake(0,0); for (int i = 0; i < 4; ++i) { @@ -255,7 +255,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(lv_16sc_ \param[in] num_a_vectors Number of vectors to be multiplied by the reference vector and accumulated \param[in] num_points The Number of complex values to be multiplied together, accumulated and stored into result */ -static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_t* out, const lv_16sc_t* in_common, const lv_32fc_t phase_inc, lv_32fc_t* phase, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) +static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_t* result, const lv_16sc_t* in_common, const lv_32fc_t phase_inc, lv_32fc_t* phase, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) { lv_16sc_t dotProduct = lv_cmake(0,0); @@ -263,7 +263,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_ const lv_16sc_t** _in_a = in_a; const lv_16sc_t* _in_common = in_common; - lv_16sc_t* _out = out; + lv_16sc_t* _out = result; __VOLK_ATTR_ALIGNED(16) lv_16sc_t dotProductVector[4]; @@ -275,7 +275,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_ realcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 imagcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 - __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, result; + __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, results; mask_imag = _mm_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); mask_real = _mm_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); @@ -370,9 +370,9 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_ realcacc[n_vec] = _mm_and_si128 (realcacc[n_vec], mask_real); imagcacc[n_vec] = _mm_and_si128 (imagcacc[n_vec], mask_imag); - result = _mm_or_si128(realcacc[n_vec], imagcacc[n_vec]); + results = _mm_or_si128(realcacc[n_vec], imagcacc[n_vec]); - _mm_storeu_si128((__m128i*)dotProductVector, result); // Store the results back into the dot product vector + _mm_storeu_si128((__m128i*)dotProductVector, results); // Store the results back into the dot product vector dotProduct = lv_cmake(0,0); for (int i = 0; i < 4; ++i) { @@ -417,13 +417,13 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_ \param[in] num_a_vectors Number of vectors to be multiplied by the reference vector and accumulated \param[in] num_points The Number of complex values to be multiplied together, accumulated and stored into result */ -static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_neon(lv_16sc_t* out, const lv_16sc_t* in_common, const lv_32fc_t phase_inc, lv_32fc_t* phase, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) +static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_neon(lv_16sc_t* result, const lv_16sc_t* in_common, const lv_32fc_t phase_inc, lv_32fc_t* phase, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) { const unsigned int neon_iters = num_points / 4; const lv_16sc_t** _in_a = in_a; const lv_16sc_t* _in_common = in_common; - lv_16sc_t* _out = out; + lv_16sc_t* _out = result; lv_16sc_t tmp16_, tmp; lv_32fc_t tmp32_; @@ -561,13 +561,13 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_neon(lv_16sc_t* for (unsigned int n = neon_iters * 4; n < num_points; n++) { - tmp16_ = *_in_common++; + tmp16_ = in_common[n]; tmp32_ = lv_cmake((float32_t)lv_creal(tmp16_), (float32_t)lv_cimag(tmp16_)) * (*phase); tmp16_ = lv_cmake((int16_t)rintf(lv_creal(tmp32_)), (int16_t)rintf(lv_cimag(tmp32_))); (*phase) *= phase_inc; for (int n_vec = 0; n_vec < num_a_vectors; n_vec++) { - tmp = tmp16_ * _in_a[n_vec][n]; + tmp = tmp16_ * in_a[n_vec][n]; _out[n_vec] = lv_cmake(sat_adds16i(lv_creal(_out[n_vec]), lv_creal(tmp)), sat_adds16i(lv_cimag(_out[n_vec]), lv_cimag(tmp))); } } diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/lib/qa_utils.cc b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/lib/qa_utils.cc index 37cb17c7e..e849ce15c 100644 --- a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/lib/qa_utils.cc +++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/lib/qa_utils.cc @@ -76,8 +76,10 @@ void load_random_data(void *data, volk_gnsssdr_type_t type, unsigned int n) else ((uint32_t *)data)[i] = (uint32_t) scaled_rand; break; case 2: - if(type.is_signed) ((int16_t *)data)[i] = (int16_t) scaled_rand % 1; - else ((uint16_t *)data)[i] = (uint16_t) scaled_rand % 1; + // 16 bits dot product saturates very fast even with moderate length vectors + // we produce here only 4 bits input range + if(type.is_signed) ((int16_t *)data)[i] = (int16_t)((int16_t) scaled_rand % 16); + else ((uint16_t *)data)[i] = (uint16_t) (int16_t)((int16_t) scaled_rand % 16); break; case 1: if(type.is_signed) ((int8_t *)data)[i] = (int8_t) scaled_rand;