From d987a04d4232c4b259974685745e7d4eeec7bf14 Mon Sep 17 00:00:00 2001 From: Carles Fernandez Date: Tue, 22 Mar 2016 18:03:34 +0100 Subject: [PATCH] adding AVX2 protokernels --- .../volk_gnsssdr_16ic_x2_dot_prod_16ic.h | 152 ++++++++++++++- .../volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h | 184 ++++++++++++++++++ ...olk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic.h | 48 +++++ .../volk_gnsssdr_16ic_x2_multiply_16ic.h | 113 ++++++++++- 4 files changed, 494 insertions(+), 3 deletions(-) diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic.h index 4ec49d645..1dc13b49a 100644 --- a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic.h +++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic.h @@ -233,6 +233,156 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_u_sse2(lv_16sc_t* out, con #endif /* LV_HAVE_SSE2 */ +#ifdef LV_HAVE_AVX2 +#include + +static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_u_axv2(lv_16sc_t* out, const lv_16sc_t* in_a, const lv_16sc_t* in_b, unsigned int num_points) +{ + lv_16sc_t dotProduct = lv_cmake((int16_t)0, (int16_t)0); + + const unsigned int avx_iters = num_points / 8; + + const lv_16sc_t* _in_a = in_a; + const lv_16sc_t* _in_b = in_b; + lv_16sc_t* _out = out; + unsigned int i; + + if (avx_iters > 0) + { + __m256i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, realcacc, imagcacc, result; + __VOLK_ATTR_ALIGNED(32) lv_16sc_t dotProductVector[8]; + + realcacc = _mm256_setzero_si256(); + imagcacc = _mm256_setzero_si256(); + + mask_imag = _mm256_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); + mask_real = _mm256_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); + + for(unsigned int number = 0; number < avx_iters; number++) + { + a = _mm256_loadu_si256((__m256i*)_in_a); + __builtin_prefetch(_in_a + 16); + b = _mm256_loadu_si256((__m256i*)_in_b); + __builtin_prefetch(_in_b + 16); + c = _mm256_mullo_epi16(a, b); + + c_sr = _mm256_srli_si256(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst. + real = _mm256_subs_epi16(c, c_sr); + + b_sl = _mm256_slli_si256(b, 2); + a_sl = _mm256_slli_si256(a, 2); + + imag1 = _mm256_mullo_epi16(a, b_sl); + imag2 = _mm256_mullo_epi16(b, a_sl); + + imag = _mm256_adds_epi16(imag1, imag2); //with saturation arithmetic! + + realcacc = _mm256_adds_epi16(realcacc, real); + imagcacc = _mm256_adds_epi16(imagcacc, imag); + + _in_a += 8; + _in_b += 8; + } + + realcacc = _mm256_and_si256(realcacc, mask_real); + imagcacc = _mm256_and_si256(imagcacc, mask_imag); + + result = _mm256_or_si256(realcacc, imagcacc); + + _mm256_storeu_si256((__m256i*)dotProductVector, result); // Store the results back into the dot product vector + + for (i = 0; i < 8; ++i) + { + dotProduct = lv_cmake(sat_adds16i(lv_creal(dotProduct), lv_creal(dotProductVector[i])), sat_adds16i(lv_cimag(dotProduct), lv_cimag(dotProductVector[i]))); + } + } + + for (i = 0; i < (num_points % 8); ++i) + { + lv_16sc_t tmp = (*_in_a++) * (*_in_b++); + dotProduct = lv_cmake(sat_adds16i(lv_creal(dotProduct), lv_creal(tmp)), sat_adds16i(lv_cimag(dotProduct), lv_cimag(tmp))); + } + + *_out = dotProduct; +} +#endif /* LV_HAVE_AVX2 */ + + +#ifdef LV_HAVE_AVX2 +#include + +static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_a_axv2(lv_16sc_t* out, const lv_16sc_t* in_a, const lv_16sc_t* in_b, unsigned int num_points) +{ + lv_16sc_t dotProduct = lv_cmake((int16_t)0, (int16_t)0); + + const unsigned int avx_iters = num_points / 8; + + const lv_16sc_t* _in_a = in_a; + const lv_16sc_t* _in_b = in_b; + lv_16sc_t* _out = out; + unsigned int i; + + if (avx_iters > 0) + { + __m256i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, realcacc, imagcacc, result; + __VOLK_ATTR_ALIGNED(32) lv_16sc_t dotProductVector[8]; + + realcacc = _mm256_setzero_si256(); + imagcacc = _mm256_setzero_si256(); + + mask_imag = _mm256_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); + mask_real = _mm256_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); + + for(unsigned int number = 0; number < avx_iters; number++) + { + a = _mm256_load_si256((__m256i*)_in_a); + __builtin_prefetch(_in_a + 16); + b = _mm256_load_si256((__m256i*)_in_b); + __builtin_prefetch(_in_b + 16); + c = _mm256_mullo_epi16(a, b); + + c_sr = _mm256_srli_si256(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst. + real = _mm256_subs_epi16(c, c_sr); + + b_sl = _mm256_slli_si256(b, 2); + a_sl = _mm256_slli_si256(a, 2); + + imag1 = _mm256_mullo_epi16(a, b_sl); + imag2 = _mm256_mullo_epi16(b, a_sl); + + imag = _mm256_adds_epi16(imag1, imag2); //with saturation arithmetic! + + realcacc = _mm256_adds_epi16(realcacc, real); + imagcacc = _mm256_adds_epi16(imagcacc, imag); + + _in_a += 8; + _in_b += 8; + } + + realcacc = _mm256_and_si256(realcacc, mask_real); + imagcacc = _mm256_and_si256(imagcacc, mask_imag); + + result = _mm256_or_si256(realcacc, imagcacc); + + _mm256_store_si256((__m256i*)dotProductVector, result); // Store the results back into the dot product vector + + for (i = 0; i < 8; ++i) + { + dotProduct = lv_cmake(sat_adds16i(lv_creal(dotProduct), lv_creal(dotProductVector[i])), sat_adds16i(lv_cimag(dotProduct), lv_cimag(dotProductVector[i]))); + } + } + + for (i = 0; i < (num_points % 8); ++i) + { + lv_16sc_t tmp = (*_in_a++) * (*_in_b++); + dotProduct = lv_cmake(sat_adds16i(lv_creal(dotProduct), lv_creal(tmp)), sat_adds16i(lv_cimag(dotProduct), lv_cimag(tmp))); + } + + *_out = dotProduct; +} +#endif /* LV_HAVE_AVX2 */ + + #ifdef LV_HAVE_NEON #include @@ -385,8 +535,8 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_neon_optvma(lv_16sc_t* out // use 2 accumulators to remove inter-instruction data dependencies accumulator1.val[0] = vmla_s16(accumulator1.val[0], a_val.val[0], b_val.val[0]); - accumulator1.val[1] = vmla_s16(accumulator1.val[1], a_val.val[0], b_val.val[1]); accumulator2.val[0] = vmls_s16(accumulator2.val[0], a_val.val[1], b_val.val[1]); + accumulator1.val[1] = vmla_s16(accumulator1.val[1], a_val.val[0], b_val.val[1]); accumulator2.val[1] = vmla_s16(accumulator2.val[1], a_val.val[1], b_val.val[0]); a_ptr += 4; diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h index 341b9ebcb..0480a8532 100644 --- a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h +++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h @@ -292,6 +292,190 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_u_sse2(lv_16sc_t* resul #endif /* LV_HAVE_SSE2 */ +#ifdef LV_HAVE_AVX2 +#include + +static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_avx2(lv_16sc_t* result, const lv_16sc_t* in_common, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) +{ + lv_16sc_t dotProduct = lv_cmake(0,0); + + const unsigned int sse_iters = num_points / 8; + + const lv_16sc_t** _in_a = in_a; + const lv_16sc_t* _in_common = in_common; + lv_16sc_t* _out = result; + + if (sse_iters > 0) + { + __VOLK_ATTR_ALIGNED(32) lv_16sc_t dotProductVector[8]; + + __m256i* realcacc = (__m256i*)volk_gnsssdr_malloc(num_a_vectors * sizeof(__m256i), volk_gnsssdr_get_alignment()); + __m256i* imagcacc = (__m256i*)volk_gnsssdr_malloc(num_a_vectors * sizeof(__m256i), volk_gnsssdr_get_alignment()); + + for (int n_vec = 0; n_vec < num_a_vectors; n_vec++) + { + realcacc[n_vec] = _mm256_setzero_si256(); + imagcacc[n_vec] = _mm256_setzero_si256(); + } + + __m256i a, b, c, c_sr, mask_imag, mask_real, real, imag; + + mask_imag = _mm256_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); + mask_real = _mm256_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); + + for(unsigned int number = 0; number < sse_iters; number++) + { + b = _mm256_load_si256((__m256i*)_in_common); + __builtin_prefetch(_in_common + 16); + for (int n_vec = 0; n_vec < num_a_vectors; n_vec++) + { + a = _mm256_load_si256((__m256i*)&(_in_a[n_vec][number*8])); + + c = _mm256_mullo_epi16(a, b); + + c_sr = _mm256_srli_si256(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst. + real = _mm256_subs_epi16(c, c_sr); + + c_sr = _mm256_slli_si256(b, 2); // b3.r, b2.i .... + c = _mm256_mullo_epi16(a, c_sr); // a3.i*b3.r, .... + + c_sr = _mm256_slli_si256(a, 2); // a3.r, a2.i .... + imag = _mm256_mullo_epi16(b, c_sr); // b3.i*a3.r, .... + + imag = _mm256_adds_epi16(c, imag); + + realcacc[n_vec] = _mm256_adds_epi16(realcacc[n_vec], real); + imagcacc[n_vec] = _mm256_adds_epi16(imagcacc[n_vec], imag); + } + _in_common += 8; + } + + for (int n_vec = 0; n_vec < num_a_vectors; n_vec++) + { + realcacc[n_vec] = _mm256_and_si256(realcacc[n_vec], mask_real); + imagcacc[n_vec] = _mm256_and_si256(imagcacc[n_vec], mask_imag); + + a = _mm256_or_si256(realcacc[n_vec], imagcacc[n_vec]); + + _mm256_store_si256((__m256i*)dotProductVector, a); // Store the results back into the dot product vector + dotProduct = lv_cmake(0,0); + for (int i = 0; i < 8; ++i) + { + dotProduct = lv_cmake(sat_adds16i(lv_creal(dotProduct), lv_creal(dotProductVector[i])), + sat_adds16i(lv_cimag(dotProduct), lv_cimag(dotProductVector[i]))); + } + _out[n_vec] = dotProduct; + } + volk_gnsssdr_free(realcacc); + volk_gnsssdr_free(imagcacc); + } + + for (int n_vec = 0; n_vec < num_a_vectors; n_vec++) + { + for(unsigned int n = sse_iters * 8; n < num_points; n++) + { + lv_16sc_t tmp = in_common[n] * in_a[n_vec][n]; + + _out[n_vec] = lv_cmake(sat_adds16i(lv_creal(_out[n_vec]), lv_creal(tmp)), + sat_adds16i(lv_cimag(_out[n_vec]), lv_cimag(tmp))); + } + } +} +#endif /* LV_HAVE_AVX2 */ + + +#ifdef LV_HAVE_AVX2 +#include + +static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_u_avx2(lv_16sc_t* result, const lv_16sc_t* in_common, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points) +{ + lv_16sc_t dotProduct = lv_cmake(0,0); + + const unsigned int sse_iters = num_points / 8; + + const lv_16sc_t** _in_a = in_a; + const lv_16sc_t* _in_common = in_common; + lv_16sc_t* _out = result; + + if (sse_iters > 0) + { + __VOLK_ATTR_ALIGNED(32) lv_16sc_t dotProductVector[8]; + + __m256i* realcacc = (__m256i*)volk_gnsssdr_malloc(num_a_vectors * sizeof(__m256i), volk_gnsssdr_get_alignment()); + __m256i* imagcacc = (__m256i*)volk_gnsssdr_malloc(num_a_vectors * sizeof(__m256i), volk_gnsssdr_get_alignment()); + + for (int n_vec = 0; n_vec < num_a_vectors; n_vec++) + { + realcacc[n_vec] = _mm256_setzero_si256(); + imagcacc[n_vec] = _mm256_setzero_si256(); + } + + __m256i a, b, c, c_sr, mask_imag, mask_real, real, imag; + + mask_imag = _mm256_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); + mask_real = _mm256_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); + + for(unsigned int number = 0; number < sse_iters; number++) + { + b = _mm256_loadu_si256((__m256i*)_in_common); + __builtin_prefetch(_in_common + 16); + for (int n_vec = 0; n_vec < num_a_vectors; n_vec++) + { + a = _mm256_loadu_si256((__m256i*)&(_in_a[n_vec][number*8])); + + c = _mm256_mullo_epi16(a, b); + + c_sr = _mm256_srli_si256(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst. + real = _mm256_subs_epi16(c, c_sr); + + c_sr = _mm256_slli_si256(b, 2); // b3.r, b2.i .... + c = _mm256_mullo_epi16(a, c_sr); // a3.i*b3.r, .... + + c_sr = _mm256_slli_si256(a, 2); // a3.r, a2.i .... + imag = _mm256_mullo_epi16(b, c_sr); // b3.i*a3.r, .... + + imag = _mm256_adds_epi16(c, imag); + + realcacc[n_vec] = _mm256_adds_epi16(realcacc[n_vec], real); + imagcacc[n_vec] = _mm256_adds_epi16(imagcacc[n_vec], imag); + } + _in_common += 8; + } + + for (int n_vec = 0; n_vec < num_a_vectors; n_vec++) + { + realcacc[n_vec] = _mm256_and_si256(realcacc[n_vec], mask_real); + imagcacc[n_vec] = _mm256_and_si256(imagcacc[n_vec], mask_imag); + + a = _mm256_or_si256(realcacc[n_vec], imagcacc[n_vec]); + + _mm256_store_si256((__m256i*)dotProductVector, a); // Store the results back into the dot product vector + dotProduct = lv_cmake(0,0); + for (int i = 0; i < 8; ++i) + { + dotProduct = lv_cmake(sat_adds16i(lv_creal(dotProduct), lv_creal(dotProductVector[i])), + sat_adds16i(lv_cimag(dotProduct), lv_cimag(dotProductVector[i]))); + } + _out[n_vec] = dotProduct; + } + volk_gnsssdr_free(realcacc); + volk_gnsssdr_free(imagcacc); + } + + for (int n_vec = 0; n_vec < num_a_vectors; n_vec++) + { + for(unsigned int n = sse_iters * 8; n < num_points; n++) + { + lv_16sc_t tmp = in_common[n] * in_a[n_vec][n]; + + _out[n_vec] = lv_cmake(sat_adds16i(lv_creal(_out[n_vec]), lv_creal(tmp)), + sat_adds16i(lv_cimag(_out[n_vec]), lv_cimag(tmp))); + } + } +} +#endif /* LV_HAVE_AVX2 */ + + #ifdef LV_HAVE_NEON #include diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic.h index 8857f0c0d..e68c5ac9a 100644 --- a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic.h +++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic.h @@ -134,6 +134,54 @@ static inline void volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic_u_sse2(lv_16sc_t* r #endif /* LV_HAVE_SSE2 */ +#if LV_HAVE_AVX2 + +static inline void volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic_a_avx2(lv_16sc_t* result, const lv_16sc_t* local_code, const lv_16sc_t* in, unsigned int num_points) +{ + int num_a_vectors = 3; + lv_16sc_t** in_a = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_a_vectors, volk_gnsssdr_get_alignment()); + for(unsigned int n = 0; n < num_a_vectors; n++) + { + in_a[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t)*num_points, volk_gnsssdr_get_alignment()); + memcpy((lv_16sc_t*)in_a[n], (lv_16sc_t*)in, sizeof(lv_16sc_t)*num_points); + } + + volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_avx2(result, local_code, (const lv_16sc_t**) in_a, num_a_vectors, num_points); + + for(unsigned int n = 0; n < num_a_vectors; n++) + { + volk_gnsssdr_free(in_a[n]); + } + volk_gnsssdr_free(in_a); +} + +#endif /* LV_HAVE_AVX2 */ + + +#if LV_HAVE_AVX2 + +static inline void volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic_u_avx2(lv_16sc_t* result, const lv_16sc_t* local_code, const lv_16sc_t* in, unsigned int num_points) +{ + int num_a_vectors = 3; + lv_16sc_t** in_a = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_a_vectors, volk_gnsssdr_get_alignment()); + for(unsigned int n = 0; n < num_a_vectors; n++) + { + in_a[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t)*num_points, volk_gnsssdr_get_alignment()); + memcpy((lv_16sc_t*)in_a[n], (lv_16sc_t*)in, sizeof(lv_16sc_t)*num_points); + } + + volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_u_avx2(result, local_code, (const lv_16sc_t**) in_a, num_a_vectors, num_points); + + for(unsigned int n = 0; n < num_a_vectors; n++) + { + volk_gnsssdr_free(in_a[n]); + } + volk_gnsssdr_free(in_a); +} + +#endif /* LV_HAVE_AVX2 */ + + #ifdef LV_HAVE_NEON static inline void volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic_neon(lv_16sc_t* result, const lv_16sc_t* local_code, const lv_16sc_t* in, unsigned int num_points) diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_multiply_16ic.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_multiply_16ic.h index 848330453..50df6d4cf 100644 --- a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_multiply_16ic.h +++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_multiply_16ic.h @@ -81,7 +81,7 @@ static inline void volk_gnsssdr_16ic_x2_multiply_16ic_generic(lv_16sc_t* result, static inline void volk_gnsssdr_16ic_x2_multiply_16ic_a_sse2(lv_16sc_t* out, const lv_16sc_t* in_a, const lv_16sc_t* in_b, unsigned int num_points) { const unsigned int sse_iters = num_points / 4; - __m128i a,b,c, c_sr, mask_imag, mask_real, real, imag, imag1,imag2, b_sl, a_sl, result; + __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, result; mask_imag = _mm_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); mask_real = _mm_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); @@ -134,7 +134,7 @@ static inline void volk_gnsssdr_16ic_x2_multiply_16ic_a_sse2(lv_16sc_t* out, con static inline void volk_gnsssdr_16ic_x2_multiply_16ic_u_sse2(lv_16sc_t* out, const lv_16sc_t* in_a, const lv_16sc_t* in_b, unsigned int num_points) { const unsigned int sse_iters = num_points / 4; - __m128i a,b,c, c_sr, mask_imag, mask_real, real, imag, imag1,imag2, b_sl, a_sl, result; + __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1,imag2, b_sl, a_sl, result; mask_imag = _mm_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); mask_real = _mm_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); @@ -181,6 +181,115 @@ static inline void volk_gnsssdr_16ic_x2_multiply_16ic_u_sse2(lv_16sc_t* out, con #endif /* LV_HAVE_SSE2 */ +#ifdef LV_HAVE_AVX2 +#include + +static inline void volk_gnsssdr_16ic_x2_multiply_16ic_u_avx2(lv_16sc_t* out, const lv_16sc_t* in_a, const lv_16sc_t* in_b, unsigned int num_points) +{ + unsigned int number = 0; + const unsigned int avx2_points = num_points / 8; + + const lv_16sc_t* _in_a = in_a; + const lv_16sc_t* _in_b = in_b; + lv_16sc_t* _out = out; + + __m256i a, b, c, c_sr, real, imag, imag1, imag2, b_sl, a_sl, result; + + const __m256i mask_imag = _mm256_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); + const __m256i mask_real = _mm256_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); + + for(;number < avx2_points; number++) + { + a = _mm256_loadu_si256((__m256i*)_in_a); // Load the ar + ai, br + bi as ar,ai,br,bi + b = _mm256_loadu_si256((__m256i*)_in_b); // Load the cr + ci, dr + di as cr,ci,dr,di + c = _mm256_mullo_epi16(a, b); + + c_sr = _mm256_srli_si256(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst. + real = _mm256_subs_epi16(c, c_sr); + real = _mm256_and_si256(real, mask_real); // a3.r*b3.r-a3.i*b3.i , 0, a3.r*b3.r- a3.i*b3.i + + b_sl = _mm256_slli_si256(b, 2); // b3.r, b2.i .... + a_sl = _mm256_slli_si256(a, 2); // a3.r, a2.i .... + + imag1 = _mm256_mullo_epi16(a, b_sl); // a3.i*b3.r, .... + imag2 = _mm256_mullo_epi16(b, a_sl); // b3.i*a3.r, .... + + imag = _mm256_adds_epi16(imag1, imag2); + imag = _mm256_and_si256(imag, mask_imag); // a3.i*b3.r+b3.i*a3.r, 0, ... + + result = _mm256_or_si256(real, imag); + + _mm256_storeu_si256((__m256i*)_out, result); + + _in_a += 8; + _in_b += 8; + _out += 8; + } + + number = avx2_points * 8; + for(;number < num_points; number++) + { + *_out++ = (*_in_a++) * (*_in_b++); + } +} +#endif /* LV_HAVE_AVX2 */ + + +#ifdef LV_HAVE_AVX2 +#include + +static inline void volk_gnsssdr_16ic_x2_multiply_16ic_a_avx2(lv_16sc_t* out, const lv_16sc_t* in_a, const lv_16sc_t* in_b, unsigned int num_points) +{ + unsigned int number = 0; + const unsigned int avx2_points = num_points / 8; + + const lv_16sc_t* _in_a = in_a; + const lv_16sc_t* _in_b = in_b; + lv_16sc_t* _out = out; + + __m256i a, b, c, c_sr, real, imag, imag1, imag2, b_sl, a_sl, result; + + const __m256i mask_imag = _mm256_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0); + const __m256i mask_real = _mm256_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255); + + for(;number < avx2_points; number++) + { + a = _mm256_load_si256((__m256i*)_in_a); // Load the ar + ai, br + bi as ar,ai,br,bi + b = _mm256_load_si256((__m256i*)_in_b); // Load the cr + ci, dr + di as cr,ci,dr,di + c = _mm256_mullo_epi16(a, b); + + c_sr = _mm256_srli_si256(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst. + real = _mm256_subs_epi16(c, c_sr); + real = _mm256_and_si256(real, mask_real); // a3.r*b3.r-a3.i*b3.i , 0, a3.r*b3.r- a3.i*b3.i + + b_sl = _mm256_slli_si256(b, 2); // b3.r, b2.i .... + a_sl = _mm256_slli_si256(a, 2); // a3.r, a2.i .... + + imag1 = _mm256_mullo_epi16(a, b_sl); // a3.i*b3.r, .... + imag2 = _mm256_mullo_epi16(b, a_sl); // b3.i*a3.r, .... + + imag = _mm256_adds_epi16(imag1, imag2); + imag = _mm256_and_si256(imag, mask_imag); // a3.i*b3.r+b3.i*a3.r, 0, ... + + result = _mm256_or_si256(real, imag); + + _mm256_store_si256((__m256i*)_out, result); + + _in_a += 8; + _in_b += 8; + _out += 8; + } + + number = avx2_points * 8; + for(;number < num_points; number++) + { + *_out++ = (*_in_a++) * (*_in_b++); + } +} +#endif /* LV_HAVE_AVX2 */ + + + #ifdef LV_HAVE_NEON #include