diff --git a/README.md b/README.md index 1fc01b826..9a9687691 100644 --- a/README.md +++ b/README.md @@ -409,7 +409,7 @@ Install Armadillo and dependencies: $ brew tap homebrew/science $ brew install cmake hdf5 arpack superlu $ brew install armadillo -$ brew install glog gflags +$ brew install glog gflags gnutls ~~~~~~ #### Build GNSS-SDR diff --git a/build/.gitignore b/build/.gitignore index 8c58952d6..86d0cb272 100644 --- a/build/.gitignore +++ b/build/.gitignore @@ -1,12 +1,4 @@ -*~ -.*.swp -docs/doxygen/Doxyfile -docs/html -docs/latex -docs/GNSS-SDR_manual.pdf -src/tests/data/output.dat -thirdparty/ -.project -.cproject -/install -/.DS_Store +# Ignore everything in this directory +* +# Except this file +!.gitignore \ No newline at end of file diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h index 614453f30..6a79ffbde 100644 --- a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h +++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h @@ -78,7 +78,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_generic(lv_16sc_t* resu \param[in] num_a_vectors Number of vectors to be multiplied by the reference vector and accumulated \param[in] num_points The Number of complex values to be multiplied together, accumulated and stored into result */ -static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(lv_16sc_t* out, const lv_16sc_t* in_common, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) +static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(lv_16sc_t* result, const lv_16sc_t* in_common, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) { lv_16sc_t dotProduct = lv_cmake(0,0); @@ -86,7 +86,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(lv_16sc_t* out, const lv_16sc_t** _in_a = in_a; const lv_16sc_t* _in_common = in_common; - lv_16sc_t* _out = out; + lv_16sc_t* _out = result; if (sse_iters > 0) { @@ -100,7 +100,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(lv_16sc_t* out, realcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 imagcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 - __m128i a,b,c, c_sr, mask_imag, mask_real, real, imag, imag1,imag2, b_sl, a_sl, result; + __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, results; mask_imag = _mm_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); mask_real = _mm_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); @@ -116,10 +116,10 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(lv_16sc_t* out, { a = _mm_load_si128((__m128i*)&(_in_a[n_vec][number*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg - c = _mm_mullo_epi16 (a, b); // a3.i*b3.i, a3.r*b3.r, .... + c = _mm_mullo_epi16(a, b); // a3.i*b3.i, a3.r*b3.r, .... - c_sr = _mm_srli_si128 (c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst. - real = _mm_subs_epi16 (c, c_sr); + c_sr = _mm_srli_si128(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst. + real = _mm_subs_epi16(c, c_sr); b_sl = _mm_slli_si128(b, 2); // b3.r, b2.i .... a_sl = _mm_slli_si128(a, 2); // a3.r, a2.i .... @@ -129,23 +129,23 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(lv_16sc_t* out, imag = _mm_adds_epi16(imag1, imag2); - realcacc[n_vec] = _mm_adds_epi16 (realcacc[n_vec], real); - imagcacc[n_vec] = _mm_adds_epi16 (imagcacc[n_vec], imag); + realcacc[n_vec] = _mm_adds_epi16(realcacc[n_vec], real); + imagcacc[n_vec] = _mm_adds_epi16(imagcacc[n_vec], imag); } _in_common += 4; } - for (int n_vec=0;n_vec 0) { @@ -203,7 +203,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_u_sse2(lv_16sc_t* out, realcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 imagcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 - __m128i a,b,c, c_sr, mask_imag, mask_real, real, imag, imag1,imag2, b_sl, a_sl, result; + __m128i a,b,c, c_sr, mask_imag, mask_real, real, imag, imag1,imag2, b_sl, a_sl, results; mask_imag = _mm_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); mask_real = _mm_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); @@ -219,10 +219,10 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_u_sse2(lv_16sc_t* out, { a = _mm_loadu_si128((__m128i*)&(_in_a[n_vec][number*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg - c = _mm_mullo_epi16 (a, b); // a3.i*b3.i, a3.r*b3.r, .... + c = _mm_mullo_epi16(a, b); // a3.i*b3.i, a3.r*b3.r, .... - c_sr = _mm_srli_si128 (c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst. - real = _mm_subs_epi16 (c, c_sr); + c_sr = _mm_srli_si128(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst. + real = _mm_subs_epi16(c, c_sr); b_sl = _mm_slli_si128(b, 2); // b3.r, b2.i .... a_sl = _mm_slli_si128(a, 2); // a3.r, a2.i .... @@ -239,16 +239,16 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_u_sse2(lv_16sc_t* out, _in_common += 4; } - for (int n_vec=0;n_vec 0) { @@ -319,7 +319,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_neon(lv_16sc_t* out, co for (int n_vec = 0; n_vec < num_a_vectors; n_vec++) { a_val = vld2_s16((int16_t*)&(_in_a[n_vec][number*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg - //__builtin_prefetch(_in_a[n_vec] + 8); + //__builtin_prefetch(&_in_a[n_vec][number*4] + 8); // multiply the real*real and imag*imag to get real result // a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic.h index 762fd4d8b..efd75a27f 100644 --- a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic.h +++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic.h @@ -49,9 +49,9 @@ static inline void volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic_generic(lv_16sc_t* for(unsigned int n = 0; n < num_a_vectors; n++) { in_a[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment()); - memcpy(in_a[n], in, sizeof(lv_16sc_t) * num_points); + memcpy((lv_16sc_t*)in_a[n], (lv_16sc_t*)in, sizeof(lv_16sc_t) * num_points); } - //result = (lv_16sc_t*)calloc(num_points, sizeof(lv_16sc_t)); + volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_generic(result, local_code, (const lv_16sc_t**) in_a, num_a_vectors, num_points); for(unsigned int n = 0; n < num_a_vectors; n++) @@ -73,7 +73,7 @@ static inline void volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic_a_sse2(lv_16sc_t* r in_a[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment()); memcpy((lv_16sc_t*)in_a[n], (lv_16sc_t*)in, sizeof(lv_16sc_t) * num_points); } - //result = (lv_16sc_t*)calloc(num_points, sizeof(lv_16sc_t)); + volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(result, local_code, (const lv_16sc_t**) in_a, num_a_vectors, num_points); for(unsigned int n = 0; n < num_a_vectors; n++) @@ -94,9 +94,9 @@ static inline void volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic_u_sse2(lv_16sc_t* r for(unsigned int n = 0; n < num_a_vectors; n++) { in_a[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t)*num_points, volk_gnsssdr_get_alignment()); - memcpy(in_a[n], in, sizeof(lv_16sc_t)*num_points); + memcpy((lv_16sc_t*)in_a[n], (lv_16sc_t*)in, sizeof(lv_16sc_t)*num_points); } - //result = (lv_16sc_t*)calloc(num_points, sizeof(lv_16sc_t)); + volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_u_sse2(result, local_code, (const lv_16sc_t**) in_a, num_a_vectors, num_points); for(unsigned int n = 0; n < num_a_vectors; n++) @@ -117,9 +117,9 @@ static inline void volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic_neon(lv_16sc_t* res for(unsigned int n = 0; n < num_a_vectors; n++) { in_a[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t)*num_points, volk_gnsssdr_get_alignment()); - memcpy(in_a[n], in, sizeof(lv_16sc_t)*num_points); + memcpy((lv_16sc_t*)in_a[n], (lv_16sc_t*)in, sizeof(lv_16sc_t)*num_points); } - //result = (lv_16sc_t*)calloc(num_points, sizeof(lv_16sc_t)); + volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_neon(result, local_code, (const lv_16sc_t**) in_a, num_a_vectors, num_points); for(unsigned int n = 0; n < num_a_vectors; n++) diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn.h index 91e6c523c..0b234c8f3 100644 --- a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn.h +++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn.h @@ -1,11 +1,14 @@ /*! * \file volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h - * \brief Volk protokernel: multiplies N 16 bits vectors by a common vector phase rotated and accumulates the results in N 16 bits short complex outputs. + * \brief Volk protokernel: multiplies N 16 bits vectors by a common vector + * phase rotated and accumulates the results in N 16 bits short complex outputs. * \authors * - * Volk protokernel that multiplies N 16 bits vectors by a common vector, which is phase-rotated by phase offset and phase increment, and accumulates the results in N 16 bits short complex outputs. + * Volk protokernel that multiplies N 16 bits vectors by a common vector, which is + * phase-rotated by phase offset and phase increment, and accumulates the results + * in N 16 bits short complex outputs. * It is optimized to perform the N tap correlation process in GNSS receivers. * * ------------------------------------------------------------------------- @@ -90,7 +93,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_generic(lv_16sc \param[in] num_a_vectors Number of vectors to be multiplied by the reference vector and accumulated \param[in] num_points The Number of complex values to be multiplied together, accumulated and stored into result */ -static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(lv_16sc_t* out, const lv_16sc_t* in_common, const lv_32fc_t phase_inc, lv_32fc_t* phase, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) +static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(lv_16sc_t* result, const lv_16sc_t* in_common, const lv_32fc_t phase_inc, lv_32fc_t* phase, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) { lv_16sc_t dotProduct = lv_cmake(0,0); @@ -98,7 +101,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(lv_16sc_ const lv_16sc_t** _in_a = in_a; const lv_16sc_t* _in_common = in_common; - lv_16sc_t* _out = out; + lv_16sc_t* _out = result; __VOLK_ATTR_ALIGNED(16) lv_16sc_t dotProductVector[4]; @@ -110,7 +113,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(lv_16sc_ realcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 imagcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 - __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1,imag2, b_sl, a_sl, result; + __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, results; mask_imag = _mm_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); mask_real = _mm_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); @@ -205,9 +208,9 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(lv_16sc_ realcacc[n_vec] = _mm_and_si128(realcacc[n_vec], mask_real); imagcacc[n_vec] = _mm_and_si128(imagcacc[n_vec], mask_imag); - result = _mm_or_si128(realcacc[n_vec], imagcacc[n_vec]); + results = _mm_or_si128(realcacc[n_vec], imagcacc[n_vec]); - _mm_store_si128((__m128i*)dotProductVector, result); // Store the results back into the dot product vector + _mm_store_si128((__m128i*)dotProductVector, results); // Store the results back into the dot product vector dotProduct = lv_cmake(0,0); for (int i = 0; i < 4; ++i) { @@ -252,7 +255,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(lv_16sc_ \param[in] num_a_vectors Number of vectors to be multiplied by the reference vector and accumulated \param[in] num_points The Number of complex values to be multiplied together, accumulated and stored into result */ -static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_t* out, const lv_16sc_t* in_common, const lv_32fc_t phase_inc, lv_32fc_t* phase, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) +static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_t* result, const lv_16sc_t* in_common, const lv_32fc_t phase_inc, lv_32fc_t* phase, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) { lv_16sc_t dotProduct = lv_cmake(0,0); @@ -260,8 +263,8 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_ const lv_16sc_t** _in_a = in_a; const lv_16sc_t* _in_common = in_common; - lv_16sc_t* _out = out; + lv_16sc_t* _out = result; __VOLK_ATTR_ALIGNED(16) lv_16sc_t dotProductVector[4]; //todo dyn mem reg @@ -272,7 +275,8 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_ realcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 imagcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0 - __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, result; + __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, results; + mask_imag = _mm_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); mask_real = _mm_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); @@ -367,9 +371,9 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_ realcacc[n_vec] = _mm_and_si128 (realcacc[n_vec], mask_real); imagcacc[n_vec] = _mm_and_si128 (imagcacc[n_vec], mask_imag); - result = _mm_or_si128(realcacc[n_vec], imagcacc[n_vec]); + results = _mm_or_si128(realcacc[n_vec], imagcacc[n_vec]); - _mm_storeu_si128((__m128i*)dotProductVector, result); // Store the results back into the dot product vector + _mm_storeu_si128((__m128i*)dotProductVector, results); // Store the results back into the dot product vector dotProduct = lv_cmake(0,0); for (int i = 0; i < 4; ++i) { @@ -414,13 +418,13 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_ \param[in] num_a_vectors Number of vectors to be multiplied by the reference vector and accumulated \param[in] num_points The Number of complex values to be multiplied together, accumulated and stored into result */ -static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_neon(lv_16sc_t* out, const lv_16sc_t* in_common, const lv_32fc_t phase_inc, lv_32fc_t* phase, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) +static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_neon(lv_16sc_t* result, const lv_16sc_t* in_common, const lv_32fc_t phase_inc, lv_32fc_t* phase, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) { const unsigned int neon_iters = num_points / 4; const lv_16sc_t** _in_a = in_a; const lv_16sc_t* _in_common = in_common; - lv_16sc_t* _out = out; + lv_16sc_t* _out = result; lv_16sc_t tmp16_, tmp; lv_32fc_t tmp32_; @@ -446,19 +450,18 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_neon(lv_16sc_t* float32x4_t _phase_real = vld1q_f32(__phase_real); float32x4_t _phase_imag = vld1q_f32(__phase_imag); - int16x4x2_t a_val, c_val; + int16x4x2_t a_val, b_val, c_val; __VOLK_ATTR_ALIGNED(16) lv_16sc_t dotProductVector[4]; float32x4_t half = vdupq_n_f32(0.5f); int16x4x2_t tmp16; int32x4x2_t tmp32i; - float32x4x2_t tmp32f, tmp_real, tmp_imag; + + float32x4x2_t tmp32f, tmp32_real, tmp32_imag; float32x4_t sign, PlusHalf, Round; int16x4x2_t* accumulator; accumulator = (int16x4x2_t*)calloc(num_a_vectors, sizeof(int16x4x2_t)); - int16x4x2_t tmp_real16, tmp_imag16; - for(int n_vec = 0; n_vec < num_a_vectors; n_vec++) { accumulator[n_vec].val[0] = vdup_n_s16(0); @@ -481,13 +484,14 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_neon(lv_16sc_t* tmp32f.val[1] = vcvtq_f32_s32(tmp32i.val[1]); /* complex multiplication of four complex samples (float 32 bits each component) */ - tmp_real.val[0] = vmulq_f32(tmp32f.val[0], _phase_real); - tmp_real.val[1] = vmulq_f32(tmp32f.val[1], _phase_imag); - tmp_imag.val[0] = vmulq_f32(tmp32f.val[0], _phase_imag); - tmp_imag.val[1] = vmulq_f32(tmp32f.val[1], _phase_real); - tmp32f.val[0] = vsubq_f32(tmp_real.val[0], tmp_real.val[1]); - tmp32f.val[1] = vaddq_f32(tmp_imag.val[0], tmp_imag.val[1]); + tmp32_real.val[0] = vmulq_f32(tmp32f.val[0], _phase_real); + tmp32_real.val[1] = vmulq_f32(tmp32f.val[1], _phase_imag); + tmp32_imag.val[0] = vmulq_f32(tmp32f.val[0], _phase_imag); + tmp32_imag.val[1] = vmulq_f32(tmp32f.val[1], _phase_real); + + tmp32f.val[0] = vsubq_f32(tmp32_real.val[0], tmp32_real.val[1]); + tmp32f.val[1] = vaddq_f32(tmp32_imag.val[0], tmp32_imag.val[1]); /* downcast results to int32 */ /* in __aarch64__ we can do that with vcvtaq_s32_f32(ret1); vcvtaq_s32_f32(ret2); */ @@ -506,32 +510,32 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_neon(lv_16sc_t* tmp16.val[1] = vqmovn_s32(tmp32i.val[1]); /* compute next four phases */ - tmp_real.val[0] = vmulq_f32(_phase_real, _phase4_real); - tmp_real.val[1] = vmulq_f32(_phase_imag, _phase4_imag); - tmp_imag.val[0] = vmulq_f32(_phase_real, _phase4_imag); - tmp_imag.val[1] = vmulq_f32(_phase_imag, _phase4_real); + tmp32_real.val[0] = vmulq_f32(_phase_real, _phase4_real); + tmp32_real.val[1] = vmulq_f32(_phase_imag, _phase4_imag); + tmp32_imag.val[0] = vmulq_f32(_phase_real, _phase4_imag); + tmp32_imag.val[1] = vmulq_f32(_phase_imag, _phase4_real); - _phase_real = vsubq_f32(tmp_real.val[0], tmp_real.val[1]); - _phase_imag = vaddq_f32(tmp_imag.val[0], tmp_imag.val[1]); + _phase_real = vsubq_f32(tmp32_real.val[0], tmp32_real.val[1]); + _phase_imag = vaddq_f32(tmp32_imag.val[0], tmp32_imag.val[1]); for (int n_vec = 0; n_vec < num_a_vectors; n_vec++) { a_val = vld2_s16((int16_t*)&(_in_a[n_vec][number*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg + __builtin_prefetch(&_in_a[n_vec][number*4] + 8); // multiply the real*real and imag*imag to get real result // a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r - tmp_real16.val[0] = vmul_s16(a_val.val[0], tmp16.val[0]); + b_val.val[0] = vmul_s16(a_val.val[0], tmp16.val[0]); // a0i*b0i|a1i*b1i|a2i*b2i|a3i*b3i - tmp_real16.val[1] = vmul_s16(a_val.val[1], tmp16.val[1]); + b_val.val[1] = vmul_s16(a_val.val[1], tmp16.val[1]); + c_val.val[0] = vsub_s16(b_val.val[0], b_val.val[1]); // Multiply cross terms to get the imaginary result // a0r*b0i|a1r*b1i|a2r*b2i|a3r*b3i - tmp_imag16.val[0] = vmul_s16(a_val.val[0], tmp16.val[1]); + b_val.val[0] = vmul_s16(a_val.val[0], tmp16.val[1]); // a0i*b0r|a1i*b1r|a2i*b2r|a3i*b3r - tmp_imag16.val[1] = vmul_s16(a_val.val[1], tmp16.val[0]); - - c_val.val[0] = vsub_s16(tmp_real16.val[0], tmp_real16.val[1]); - c_val.val[1] = vadd_s16(tmp_imag16.val[0], tmp_imag16.val[1]); + b_val.val[1] = vmul_s16(a_val.val[1], tmp16.val[0]); + c_val.val[1] = vadd_s16(b_val.val[0], b_val.val[1]); accumulator[n_vec].val[0] = vadd_s16(accumulator[n_vec].val[0], c_val.val[0]); accumulator[n_vec].val[1] = vadd_s16(accumulator[n_vec].val[1], c_val.val[1]); @@ -558,13 +562,13 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_neon(lv_16sc_t* for (unsigned int n = neon_iters * 4; n < num_points; n++) { - tmp16_ = *_in_common++; + tmp16_ = in_common[n]; tmp32_ = lv_cmake((float32_t)lv_creal(tmp16_), (float32_t)lv_cimag(tmp16_)) * (*phase); tmp16_ = lv_cmake((int16_t)rintf(lv_creal(tmp32_)), (int16_t)rintf(lv_cimag(tmp32_))); (*phase) *= phase_inc; for (int n_vec = 0; n_vec < num_a_vectors; n_vec++) { - tmp = tmp16_ * _in_a[n_vec][n]; + tmp = tmp16_ * in_a[n_vec][n]; _out[n_vec] = lv_cmake(sat_adds16i(lv_creal(_out[n_vec]), lv_creal(tmp)), sat_adds16i(lv_cimag(_out[n_vec]), lv_cimag(tmp))); } }