mirror of
https://github.com/gnss-sdr/gnss-sdr
synced 2024-12-14 12:10:34 +00:00
Make prefetching more portable
See https://github.com/gnuradio/volk/issues/97 and https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html#Other-Builtins
This commit is contained in:
parent
bbd1c2fe7c
commit
3cce2e8340
@ -39,6 +39,8 @@
|
|||||||
# define __VOLK_ATTR_EXPORT
|
# define __VOLK_ATTR_EXPORT
|
||||||
# define __VOLK_ATTR_IMPORT
|
# define __VOLK_ATTR_IMPORT
|
||||||
# endif
|
# endif
|
||||||
|
# define __VOLK_PREFETCH(addr) __builtin_prefetch(addr)
|
||||||
|
# define __VOLK_PREFETCH_LOCALITY(addr, rw, locality) __builtin_prefetch(addr, rw, locality)
|
||||||
#elif _MSC_VER
|
#elif _MSC_VER
|
||||||
# define __VOLK_ATTR_ALIGNED(x) __declspec(align(x))
|
# define __VOLK_ATTR_ALIGNED(x) __declspec(align(x))
|
||||||
# define __VOLK_ATTR_UNUSED
|
# define __VOLK_ATTR_UNUSED
|
||||||
@ -46,6 +48,8 @@
|
|||||||
# define __VOLK_ATTR_DEPRECATED __declspec(deprecated)
|
# define __VOLK_ATTR_DEPRECATED __declspec(deprecated)
|
||||||
# define __VOLK_ATTR_EXPORT __declspec(dllexport)
|
# define __VOLK_ATTR_EXPORT __declspec(dllexport)
|
||||||
# define __VOLK_ATTR_IMPORT __declspec(dllimport)
|
# define __VOLK_ATTR_IMPORT __declspec(dllimport)
|
||||||
|
# define __VOLK_PREFETCH(addr)
|
||||||
|
# define __VOLK_PREFETCH_LOCALITY(addr, rw, locality)
|
||||||
#else
|
#else
|
||||||
# define __VOLK_ATTR_ALIGNED(x)
|
# define __VOLK_ATTR_ALIGNED(x)
|
||||||
# define __VOLK_ATTR_UNUSED
|
# define __VOLK_ATTR_UNUSED
|
||||||
@ -53,6 +57,8 @@
|
|||||||
# define __VOLK_ATTR_DEPRECATED
|
# define __VOLK_ATTR_DEPRECATED
|
||||||
# define __VOLK_ATTR_EXPORT
|
# define __VOLK_ATTR_EXPORT
|
||||||
# define __VOLK_ATTR_IMPORT
|
# define __VOLK_ATTR_IMPORT
|
||||||
|
# define __VOLK_PREFETCH(addr)
|
||||||
|
# define __VOLK_PREFETCH_LOCALITY(addr, rw, locality)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -197,7 +197,7 @@ static inline void volk_gnsssdr_16ic_convert_32fc_neon(lv_32fc_t* outputVector,
|
|||||||
for(i = 0; i < sse_iters; i++)
|
for(i = 0; i < sse_iters; i++)
|
||||||
{
|
{
|
||||||
a16x4 = vld1_s16((const int16_t*)_in);
|
a16x4 = vld1_s16((const int16_t*)_in);
|
||||||
__builtin_prefetch(_in + 4);
|
__VOLK_PREFETCH(_in + 4);
|
||||||
a32x4 = vmovl_s16(a16x4);
|
a32x4 = vmovl_s16(a16x4);
|
||||||
f32x4 = vcvtq_f32_s32(a32x4);
|
f32x4 = vcvtq_f32_s32(a32x4);
|
||||||
vst1q_f32((float32_t*)_out, f32x4);
|
vst1q_f32((float32_t*)_out, f32x4);
|
||||||
|
@ -181,7 +181,7 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_a_sse3(lv_16sc_t* out
|
|||||||
//next two samples
|
//next two samples
|
||||||
_in += 2;
|
_in += 2;
|
||||||
a = _mm_set_ps((float)(lv_cimag(_in[1])), (float)(lv_creal(_in[1])), (float)(lv_cimag(_in[0])), (float)(lv_creal(_in[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
a = _mm_set_ps((float)(lv_cimag(_in[1])), (float)(lv_creal(_in[1])), (float)(lv_cimag(_in[0])), (float)(lv_creal(_in[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
||||||
__builtin_prefetch(_in + 8);
|
__VOLK_PREFETCH(_in + 8);
|
||||||
//complex 32fc multiplication b=a*two_phase_acc_reg
|
//complex 32fc multiplication b=a*two_phase_acc_reg
|
||||||
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
||||||
@ -287,7 +287,7 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_a_sse3_reload(lv_16sc
|
|||||||
//next two samples
|
//next two samples
|
||||||
_in += 2;
|
_in += 2;
|
||||||
a = _mm_set_ps((float)(lv_cimag(_in[1])), (float)(lv_creal(_in[1])), (float)(lv_cimag(_in[0])), (float)(lv_creal(_in[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
a = _mm_set_ps((float)(lv_cimag(_in[1])), (float)(lv_creal(_in[1])), (float)(lv_cimag(_in[0])), (float)(lv_creal(_in[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
||||||
__builtin_prefetch(_in + 8);
|
__VOLK_PREFETCH(_in + 8);
|
||||||
//complex 32fc multiplication b=a*two_phase_acc_reg
|
//complex 32fc multiplication b=a*two_phase_acc_reg
|
||||||
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
||||||
@ -344,7 +344,7 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_a_sse3_reload(lv_16sc
|
|||||||
//next two samples
|
//next two samples
|
||||||
_in += 2;
|
_in += 2;
|
||||||
a = _mm_set_ps((float)(lv_cimag(_in[1])), (float)(lv_creal(_in[1])), (float)(lv_cimag(_in[0])), (float)(lv_creal(_in[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
a = _mm_set_ps((float)(lv_cimag(_in[1])), (float)(lv_creal(_in[1])), (float)(lv_cimag(_in[0])), (float)(lv_creal(_in[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
||||||
__builtin_prefetch(_in + 8);
|
__VOLK_PREFETCH(_in + 8);
|
||||||
//complex 32fc multiplication b=a*two_phase_acc_reg
|
//complex 32fc multiplication b=a*two_phase_acc_reg
|
||||||
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
||||||
@ -436,7 +436,7 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_u_sse3(lv_16sc_t* out
|
|||||||
//next two samples
|
//next two samples
|
||||||
_in += 2;
|
_in += 2;
|
||||||
a = _mm_set_ps((float)(lv_cimag(_in[1])), (float)(lv_creal(_in[1])), (float)(lv_cimag(_in[0])), (float)(lv_creal(_in[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
a = _mm_set_ps((float)(lv_cimag(_in[1])), (float)(lv_creal(_in[1])), (float)(lv_cimag(_in[0])), (float)(lv_creal(_in[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
||||||
__builtin_prefetch(_in + 8);
|
__VOLK_PREFETCH(_in + 8);
|
||||||
//complex 32fc multiplication b=a*two_phase_acc_reg
|
//complex 32fc multiplication b=a*two_phase_acc_reg
|
||||||
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
||||||
@ -541,7 +541,7 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_u_sse3_reload(lv_16sc
|
|||||||
//next two samples
|
//next two samples
|
||||||
_in += 2;
|
_in += 2;
|
||||||
a = _mm_set_ps((float)(lv_cimag(_in[1])), (float)(lv_creal(_in[1])), (float)(lv_cimag(_in[0])), (float)(lv_creal(_in[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
a = _mm_set_ps((float)(lv_cimag(_in[1])), (float)(lv_creal(_in[1])), (float)(lv_cimag(_in[0])), (float)(lv_creal(_in[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
||||||
__builtin_prefetch(_in + 8);
|
__VOLK_PREFETCH(_in + 8);
|
||||||
//complex 32fc multiplication b=a*two_phase_acc_reg
|
//complex 32fc multiplication b=a*two_phase_acc_reg
|
||||||
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
||||||
@ -598,7 +598,7 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_u_sse3_reload(lv_16sc
|
|||||||
//next two samples
|
//next two samples
|
||||||
_in += 2;
|
_in += 2;
|
||||||
a = _mm_set_ps((float)(lv_cimag(_in[1])), (float)(lv_creal(_in[1])), (float)(lv_cimag(_in[0])), (float)(lv_creal(_in[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
a = _mm_set_ps((float)(lv_cimag(_in[1])), (float)(lv_creal(_in[1])), (float)(lv_cimag(_in[0])), (float)(lv_creal(_in[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
||||||
__builtin_prefetch(_in + 8);
|
__VOLK_PREFETCH(_in + 8);
|
||||||
//complex 32fc multiplication b=a*two_phase_acc_reg
|
//complex 32fc multiplication b=a*two_phase_acc_reg
|
||||||
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
||||||
@ -686,7 +686,7 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_neon(lv_16sc_t* outVe
|
|||||||
{
|
{
|
||||||
/* load 4 complex numbers (int 16 bits each component) */
|
/* load 4 complex numbers (int 16 bits each component) */
|
||||||
tmp16 = vld2_s16((int16_t*)_in);
|
tmp16 = vld2_s16((int16_t*)_in);
|
||||||
__builtin_prefetch(_in + 8);
|
__VOLK_PREFETCH(_in + 8);
|
||||||
_in += 4;
|
_in += 4;
|
||||||
|
|
||||||
/* promote them to int 32 bits */
|
/* promote them to int 32 bits */
|
||||||
@ -822,7 +822,7 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_neon_reload(lv_16sc_t
|
|||||||
{
|
{
|
||||||
/* load 4 complex numbers (int 16 bits each component) */
|
/* load 4 complex numbers (int 16 bits each component) */
|
||||||
tmp16 = vld2_s16((int16_t*)_in);
|
tmp16 = vld2_s16((int16_t*)_in);
|
||||||
__builtin_prefetch(_in + 8);
|
__VOLK_PREFETCH(_in + 8);
|
||||||
_in += 4;
|
_in += 4;
|
||||||
|
|
||||||
/* promote them to int 32 bits */
|
/* promote them to int 32 bits */
|
||||||
@ -891,7 +891,7 @@ static inline void volk_gnsssdr_16ic_s32fc_x2_rotator_16ic_neon_reload(lv_16sc_t
|
|||||||
{
|
{
|
||||||
/* load 4 complex numbers (int 16 bits each component) */
|
/* load 4 complex numbers (int 16 bits each component) */
|
||||||
tmp16 = vld2_s16((int16_t*)_in);
|
tmp16 = vld2_s16((int16_t*)_in);
|
||||||
__builtin_prefetch(_in + 8);
|
__VOLK_PREFETCH(_in + 8);
|
||||||
_in += 4;
|
_in += 4;
|
||||||
|
|
||||||
/* promote them to int 32 bits */
|
/* promote them to int 32 bits */
|
||||||
|
@ -108,9 +108,9 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_a_sse2(lv_16sc_t* out, con
|
|||||||
{
|
{
|
||||||
// a[127:0]=[a3.i,a3.r,a2.i,a2.r,a1.i,a1.r,a0.i,a0.r]
|
// a[127:0]=[a3.i,a3.r,a2.i,a2.r,a1.i,a1.r,a0.i,a0.r]
|
||||||
a = _mm_load_si128((__m128i*)_in_a); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
a = _mm_load_si128((__m128i*)_in_a); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
||||||
__builtin_prefetch(_in_a + 8);
|
__VOLK_PREFETCH(_in_a + 8);
|
||||||
b = _mm_load_si128((__m128i*)_in_b);
|
b = _mm_load_si128((__m128i*)_in_b);
|
||||||
__builtin_prefetch(_in_b + 8);
|
__VOLK_PREFETCH(_in_b + 8);
|
||||||
c = _mm_mullo_epi16(a, b); // a3.i*b3.i, a3.r*b3.r, ....
|
c = _mm_mullo_epi16(a, b); // a3.i*b3.i, a3.r*b3.r, ....
|
||||||
|
|
||||||
c_sr = _mm_srli_si128(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst.
|
c_sr = _mm_srli_si128(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst.
|
||||||
@ -188,9 +188,9 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_u_sse2(lv_16sc_t* out, con
|
|||||||
//imaginery part -> reinterpret_cast<cv T*>(a)[2*i + 1]
|
//imaginery part -> reinterpret_cast<cv T*>(a)[2*i + 1]
|
||||||
// a[127:0]=[a3.i,a3.r,a2.i,a2.r,a1.i,a1.r,a0.i,a0.r]
|
// a[127:0]=[a3.i,a3.r,a2.i,a2.r,a1.i,a1.r,a0.i,a0.r]
|
||||||
a = _mm_loadu_si128((__m128i*)_in_a); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
a = _mm_loadu_si128((__m128i*)_in_a); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
||||||
__builtin_prefetch(_in_a + 8);
|
__VOLK_PREFETCH(_in_a + 8);
|
||||||
b = _mm_loadu_si128((__m128i*)_in_b);
|
b = _mm_loadu_si128((__m128i*)_in_b);
|
||||||
__builtin_prefetch(_in_b + 8);
|
__VOLK_PREFETCH(_in_b + 8);
|
||||||
c = _mm_mullo_epi16(a, b); // a3.i*b3.i, a3.r*b3.r, ....
|
c = _mm_mullo_epi16(a, b); // a3.i*b3.i, a3.r*b3.r, ....
|
||||||
|
|
||||||
c_sr = _mm_srli_si128(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst.
|
c_sr = _mm_srli_si128(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst.
|
||||||
@ -264,9 +264,9 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_u_axv2(lv_16sc_t* out, con
|
|||||||
for(number = 0; number < avx_iters; number++)
|
for(number = 0; number < avx_iters; number++)
|
||||||
{
|
{
|
||||||
a = _mm256_loadu_si256((__m256i*)_in_a);
|
a = _mm256_loadu_si256((__m256i*)_in_a);
|
||||||
__builtin_prefetch(_in_a + 16);
|
__VOLK_PREFETCH(_in_a + 16);
|
||||||
b = _mm256_loadu_si256((__m256i*)_in_b);
|
b = _mm256_loadu_si256((__m256i*)_in_b);
|
||||||
__builtin_prefetch(_in_b + 16);
|
__VOLK_PREFETCH(_in_b + 16);
|
||||||
c = _mm256_mullo_epi16(a, b);
|
c = _mm256_mullo_epi16(a, b);
|
||||||
|
|
||||||
c_sr = _mm256_srli_si256(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst.
|
c_sr = _mm256_srli_si256(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst.
|
||||||
@ -341,9 +341,9 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_a_axv2(lv_16sc_t* out, con
|
|||||||
for(number = 0; number < avx_iters; number++)
|
for(number = 0; number < avx_iters; number++)
|
||||||
{
|
{
|
||||||
a = _mm256_load_si256((__m256i*)_in_a);
|
a = _mm256_load_si256((__m256i*)_in_a);
|
||||||
__builtin_prefetch(_in_a + 16);
|
__VOLK_PREFETCH(_in_a + 16);
|
||||||
b = _mm256_load_si256((__m256i*)_in_b);
|
b = _mm256_load_si256((__m256i*)_in_b);
|
||||||
__builtin_prefetch(_in_b + 16);
|
__VOLK_PREFETCH(_in_b + 16);
|
||||||
c = _mm256_mullo_epi16(a, b);
|
c = _mm256_mullo_epi16(a, b);
|
||||||
|
|
||||||
c_sr = _mm256_srli_si256(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst.
|
c_sr = _mm256_srli_si256(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst.
|
||||||
@ -416,8 +416,8 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_neon(lv_16sc_t* out, const
|
|||||||
{
|
{
|
||||||
a_val = vld2_s16((int16_t*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
|
a_val = vld2_s16((int16_t*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
|
||||||
b_val = vld2_s16((int16_t*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
|
b_val = vld2_s16((int16_t*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
|
||||||
__builtin_prefetch(a_ptr + 8);
|
__VOLK_PREFETCH(a_ptr + 8);
|
||||||
__builtin_prefetch(b_ptr + 8);
|
__VOLK_PREFETCH(b_ptr + 8);
|
||||||
|
|
||||||
// multiply the real*real and imag*imag to get real result
|
// multiply the real*real and imag*imag to get real result
|
||||||
// a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r
|
// a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r
|
||||||
@ -482,8 +482,8 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_neon_vma(lv_16sc_t* out, c
|
|||||||
{
|
{
|
||||||
a_val = vld2_s16((int16_t*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
|
a_val = vld2_s16((int16_t*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
|
||||||
b_val = vld2_s16((int16_t*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
|
b_val = vld2_s16((int16_t*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
|
||||||
__builtin_prefetch(a_ptr + 8);
|
__VOLK_PREFETCH(a_ptr + 8);
|
||||||
__builtin_prefetch(b_ptr + 8);
|
__VOLK_PREFETCH(b_ptr + 8);
|
||||||
|
|
||||||
tmp.val[0] = vmul_s16(a_val.val[0], b_val.val[0]);
|
tmp.val[0] = vmul_s16(a_val.val[0], b_val.val[0]);
|
||||||
tmp.val[1] = vmul_s16(a_val.val[1], b_val.val[0]);
|
tmp.val[1] = vmul_s16(a_val.val[1], b_val.val[0]);
|
||||||
@ -536,8 +536,8 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_neon_optvma(lv_16sc_t* out
|
|||||||
{
|
{
|
||||||
a_val = vld2_s16((int16_t*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
|
a_val = vld2_s16((int16_t*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
|
||||||
b_val = vld2_s16((int16_t*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
|
b_val = vld2_s16((int16_t*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
|
||||||
__builtin_prefetch(a_ptr + 8);
|
__VOLK_PREFETCH(a_ptr + 8);
|
||||||
__builtin_prefetch(b_ptr + 8);
|
__VOLK_PREFETCH(b_ptr + 8);
|
||||||
|
|
||||||
// use 2 accumulators to remove inter-instruction data dependencies
|
// use 2 accumulators to remove inter-instruction data dependencies
|
||||||
accumulator1.val[0] = vmla_s16(accumulator1.val[0], a_val.val[0], b_val.val[0]);
|
accumulator1.val[0] = vmla_s16(accumulator1.val[0], a_val.val[0], b_val.val[0]);
|
||||||
|
@ -145,7 +145,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_sse2(lv_16sc_t* resul
|
|||||||
{
|
{
|
||||||
// b[127:0]=[a3.i,a3.r,a2.i,a2.r,a1.i,a1.r,a0.i,a0.r]
|
// b[127:0]=[a3.i,a3.r,a2.i,a2.r,a1.i,a1.r,a0.i,a0.r]
|
||||||
b = _mm_load_si128((__m128i*)_in_common); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
b = _mm_load_si128((__m128i*)_in_common); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
||||||
__builtin_prefetch(_in_common + 8);
|
__VOLK_PREFETCH(_in_common + 8);
|
||||||
for (n_vec = 0; n_vec < num_a_vectors; n_vec++)
|
for (n_vec = 0; n_vec < num_a_vectors; n_vec++)
|
||||||
{
|
{
|
||||||
a = _mm_load_si128((__m128i*)&(_in_a[n_vec][index*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
a = _mm_load_si128((__m128i*)&(_in_a[n_vec][index*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
||||||
@ -239,7 +239,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_u_sse2(lv_16sc_t* resul
|
|||||||
{
|
{
|
||||||
// b[127:0]=[a3.i,a3.r,a2.i,a2.r,a1.i,a1.r,a0.i,a0.r]
|
// b[127:0]=[a3.i,a3.r,a2.i,a2.r,a1.i,a1.r,a0.i,a0.r]
|
||||||
b = _mm_loadu_si128((__m128i*)_in_common); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
b = _mm_loadu_si128((__m128i*)_in_common); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
||||||
__builtin_prefetch(_in_common + 8);
|
__VOLK_PREFETCH(_in_common + 8);
|
||||||
for (n_vec = 0; n_vec < num_a_vectors; n_vec++)
|
for (n_vec = 0; n_vec < num_a_vectors; n_vec++)
|
||||||
{
|
{
|
||||||
a = _mm_loadu_si128((__m128i*)&(_in_a[n_vec][index*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
a = _mm_loadu_si128((__m128i*)&(_in_a[n_vec][index*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
||||||
@ -332,7 +332,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_a_avx2(lv_16sc_t* resul
|
|||||||
for(index = 0; index < sse_iters; index++)
|
for(index = 0; index < sse_iters; index++)
|
||||||
{
|
{
|
||||||
b = _mm256_load_si256((__m256i*)_in_common);
|
b = _mm256_load_si256((__m256i*)_in_common);
|
||||||
__builtin_prefetch(_in_common + 16);
|
__VOLK_PREFETCH(_in_common + 16);
|
||||||
for (n_vec = 0; n_vec < num_a_vectors; n_vec++)
|
for (n_vec = 0; n_vec < num_a_vectors; n_vec++)
|
||||||
{
|
{
|
||||||
a = _mm256_load_si256((__m256i*)&(_in_a[n_vec][index*8]));
|
a = _mm256_load_si256((__m256i*)&(_in_a[n_vec][index*8]));
|
||||||
@ -426,7 +426,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_u_avx2(lv_16sc_t* resul
|
|||||||
for(index = 0; index < sse_iters; index++)
|
for(index = 0; index < sse_iters; index++)
|
||||||
{
|
{
|
||||||
b = _mm256_loadu_si256((__m256i*)_in_common);
|
b = _mm256_loadu_si256((__m256i*)_in_common);
|
||||||
__builtin_prefetch(_in_common + 16);
|
__VOLK_PREFETCH(_in_common + 16);
|
||||||
for (n_vec = 0; n_vec < num_a_vectors; n_vec++)
|
for (n_vec = 0; n_vec < num_a_vectors; n_vec++)
|
||||||
{
|
{
|
||||||
a = _mm256_loadu_si256((__m256i*)&(_in_a[n_vec][index*8]));
|
a = _mm256_loadu_si256((__m256i*)&(_in_a[n_vec][index*8]));
|
||||||
@ -518,11 +518,11 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_neon(lv_16sc_t* result,
|
|||||||
for(index = 0; index < neon_iters; index++)
|
for(index = 0; index < neon_iters; index++)
|
||||||
{
|
{
|
||||||
b_val = vld2_s16((int16_t*)_in_common); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
b_val = vld2_s16((int16_t*)_in_common); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
||||||
__builtin_prefetch(_in_common + 8);
|
__VOLK_PREFETCH(_in_common + 8);
|
||||||
for (n_vec = 0; n_vec < num_a_vectors; n_vec++)
|
for (n_vec = 0; n_vec < num_a_vectors; n_vec++)
|
||||||
{
|
{
|
||||||
a_val = vld2_s16((int16_t*)&(_in_a[n_vec][index*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
a_val = vld2_s16((int16_t*)&(_in_a[n_vec][index*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
||||||
//__builtin_prefetch(&_in_a[n_vec][index*4] + 8);
|
//__VOLK_PREFETCH(&_in_a[n_vec][index*4] + 8);
|
||||||
|
|
||||||
// multiply the real*real and imag*imag to get real result
|
// multiply the real*real and imag*imag to get real result
|
||||||
// a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r
|
// a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r
|
||||||
@ -604,7 +604,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_neon_vma(lv_16sc_t* res
|
|||||||
for(index = 0; index < neon_iters; index++)
|
for(index = 0; index < neon_iters; index++)
|
||||||
{
|
{
|
||||||
b_val = vld2_s16((int16_t*)_in_common); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
b_val = vld2_s16((int16_t*)_in_common); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
||||||
__builtin_prefetch(_in_common + 8);
|
__VOLK_PREFETCH(_in_common + 8);
|
||||||
for (n_vec = 0; n_vec < num_a_vectors; n_vec++)
|
for (n_vec = 0; n_vec < num_a_vectors; n_vec++)
|
||||||
{
|
{
|
||||||
a_val = vld2_s16((int16_t*)&(_in_a[n_vec][index*4]));
|
a_val = vld2_s16((int16_t*)&(_in_a[n_vec][index*4]));
|
||||||
@ -684,7 +684,7 @@ static inline void volk_gnsssdr_16ic_x2_dot_prod_16ic_xn_neon_optvma(lv_16sc_t*
|
|||||||
for(index = 0; index < neon_iters; index++)
|
for(index = 0; index < neon_iters; index++)
|
||||||
{
|
{
|
||||||
b_val = vld2_s16((int16_t*)_in_common); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
b_val = vld2_s16((int16_t*)_in_common); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
||||||
__builtin_prefetch(_in_common + 8);
|
__VOLK_PREFETCH(_in_common + 8);
|
||||||
for (n_vec = 0; n_vec < num_a_vectors; n_vec++)
|
for (n_vec = 0; n_vec < num_a_vectors; n_vec++)
|
||||||
{
|
{
|
||||||
a_val = vld2_s16((int16_t*)&(_in_a[n_vec][index*4]));
|
a_val = vld2_s16((int16_t*)&(_in_a[n_vec][index*4]));
|
||||||
|
@ -309,8 +309,8 @@ static inline void volk_gnsssdr_16ic_x2_multiply_16ic_neon(lv_16sc_t* out, const
|
|||||||
{
|
{
|
||||||
a_val = vld2_s16((int16_t*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
|
a_val = vld2_s16((int16_t*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
|
||||||
b_val = vld2_s16((int16_t*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
|
b_val = vld2_s16((int16_t*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
|
||||||
__builtin_prefetch(a_ptr + 4);
|
__VOLK_PREFETCH(a_ptr + 4);
|
||||||
__builtin_prefetch(b_ptr + 4);
|
__VOLK_PREFETCH(b_ptr + 4);
|
||||||
|
|
||||||
// multiply the real*real and imag*imag to get real result
|
// multiply the real*real and imag*imag to get real result
|
||||||
// a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r
|
// a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r
|
||||||
|
@ -248,7 +248,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(lv_16sc_
|
|||||||
//next two samples
|
//next two samples
|
||||||
_in_common += 2;
|
_in_common += 2;
|
||||||
pa = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
pa = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
||||||
__builtin_prefetch(_in_common + 8);
|
__VOLK_PREFETCH(_in_common + 8);
|
||||||
//complex 32fc multiplication b=a*two_phase_acc_reg
|
//complex 32fc multiplication b=a*two_phase_acc_reg
|
||||||
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
||||||
@ -428,7 +428,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3_reload(l
|
|||||||
//next two samples
|
//next two samples
|
||||||
_in_common += 2;
|
_in_common += 2;
|
||||||
pa = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
pa = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
||||||
__builtin_prefetch(_in_common + 8);
|
__VOLK_PREFETCH(_in_common + 8);
|
||||||
//complex 32fc multiplication b=a*two_phase_acc_reg
|
//complex 32fc multiplication b=a*two_phase_acc_reg
|
||||||
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
||||||
@ -504,7 +504,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3_reload(l
|
|||||||
//next two samples
|
//next two samples
|
||||||
_in_common += 2;
|
_in_common += 2;
|
||||||
pa = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
pa = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
||||||
__builtin_prefetch(_in_common + 8);
|
__VOLK_PREFETCH(_in_common + 8);
|
||||||
//complex 32fc multiplication b=a*two_phase_acc_reg
|
//complex 32fc multiplication b=a*two_phase_acc_reg
|
||||||
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
||||||
@ -651,7 +651,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_
|
|||||||
{
|
{
|
||||||
// Phase rotation on operand in_common starts here:
|
// Phase rotation on operand in_common starts here:
|
||||||
pa = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
pa = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
||||||
__builtin_prefetch(_in_common + 8);
|
__VOLK_PREFETCH(_in_common + 8);
|
||||||
//complex 32fc multiplication b=a*two_phase_acc_reg
|
//complex 32fc multiplication b=a*two_phase_acc_reg
|
||||||
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
||||||
@ -672,7 +672,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_
|
|||||||
//next two samples
|
//next two samples
|
||||||
_in_common += 2;
|
_in_common += 2;
|
||||||
pa = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
pa = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
||||||
__builtin_prefetch(_in_common + 8);
|
__VOLK_PREFETCH(_in_common + 8);
|
||||||
//complex 32fc multiplication b=a*two_phase_acc_reg
|
//complex 32fc multiplication b=a*two_phase_acc_reg
|
||||||
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
||||||
@ -877,7 +877,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_avx2(lv_16sc_
|
|||||||
//next two samples
|
//next two samples
|
||||||
_in_common += 2;
|
_in_common += 2;
|
||||||
a = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
a = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
||||||
__builtin_prefetch(_in_common + 16);
|
__VOLK_PREFETCH(_in_common + 16);
|
||||||
//complex 32fc multiplication b=a*two_phase_acc_reg
|
//complex 32fc multiplication b=a*two_phase_acc_reg
|
||||||
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
||||||
@ -1087,7 +1087,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_avx2_reload(l
|
|||||||
//next two samples
|
//next two samples
|
||||||
_in_common += 2;
|
_in_common += 2;
|
||||||
a = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
a = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
||||||
__builtin_prefetch(_in_common + 16);
|
__VOLK_PREFETCH(_in_common + 16);
|
||||||
//complex 32fc multiplication b=a*two_phase_acc_reg
|
//complex 32fc multiplication b=a*two_phase_acc_reg
|
||||||
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
||||||
@ -1203,7 +1203,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_avx2_reload(l
|
|||||||
//next two samples
|
//next two samples
|
||||||
_in_common += 2;
|
_in_common += 2;
|
||||||
a = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
a = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
|
||||||
__builtin_prefetch(_in_common + 16);
|
__VOLK_PREFETCH(_in_common + 16);
|
||||||
//complex 32fc multiplication b=a*two_phase_acc_reg
|
//complex 32fc multiplication b=a*two_phase_acc_reg
|
||||||
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
|
||||||
@ -1350,7 +1350,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_neon(lv_16sc_t*
|
|||||||
{
|
{
|
||||||
/* load 4 complex numbers (int 16 bits each component) */
|
/* load 4 complex numbers (int 16 bits each component) */
|
||||||
tmp16 = vld2_s16((int16_t*)_in_common);
|
tmp16 = vld2_s16((int16_t*)_in_common);
|
||||||
__builtin_prefetch(_in_common + 8);
|
__VOLK_PREFETCH(_in_common + 8);
|
||||||
_in_common += 4;
|
_in_common += 4;
|
||||||
|
|
||||||
/* promote them to int 32 bits */
|
/* promote them to int 32 bits */
|
||||||
@ -1398,7 +1398,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_neon(lv_16sc_t*
|
|||||||
for (n_vec = 0; n_vec < num_a_vectors; n_vec++)
|
for (n_vec = 0; n_vec < num_a_vectors; n_vec++)
|
||||||
{
|
{
|
||||||
a_val = vld2_s16((int16_t*)&(_in_a[n_vec][number*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
a_val = vld2_s16((int16_t*)&(_in_a[n_vec][number*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
|
||||||
//__builtin_prefetch(&_in_a[n_vec][number*4] + 8);
|
//__VOLK_PREFETCH(&_in_a[n_vec][number*4] + 8);
|
||||||
|
|
||||||
// multiply the real*real and imag*imag to get real result
|
// multiply the real*real and imag*imag to get real result
|
||||||
// a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r
|
// a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r
|
||||||
@ -1533,7 +1533,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_neon_vma(lv_16s
|
|||||||
{
|
{
|
||||||
/* load 4 complex numbers (int 16 bits each component) */
|
/* load 4 complex numbers (int 16 bits each component) */
|
||||||
tmp16 = vld2_s16((int16_t*)_in_common);
|
tmp16 = vld2_s16((int16_t*)_in_common);
|
||||||
__builtin_prefetch(_in_common + 8);
|
__VOLK_PREFETCH(_in_common + 8);
|
||||||
_in_common += 4;
|
_in_common += 4;
|
||||||
|
|
||||||
/* promote them to int 32 bits */
|
/* promote them to int 32 bits */
|
||||||
@ -1726,7 +1726,7 @@ static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_neon_optvma(lv_
|
|||||||
{
|
{
|
||||||
/* load 4 complex numbers (int 16 bits each component) */
|
/* load 4 complex numbers (int 16 bits each component) */
|
||||||
b_val = vld2_s16((int16_t*)_in_common);
|
b_val = vld2_s16((int16_t*)_in_common);
|
||||||
__builtin_prefetch(_in_common + 8);
|
__VOLK_PREFETCH(_in_common + 8);
|
||||||
_in_common += 4;
|
_in_common += 4;
|
||||||
|
|
||||||
/* promote them to int 32 bits */
|
/* promote them to int 32 bits */
|
||||||
|
@ -395,8 +395,8 @@ static inline void volk_gnsssdr_16ic_xn_resampler_16ic_xn_a_avx(lv_16sc_t** resu
|
|||||||
indexn = n0;
|
indexn = n0;
|
||||||
for(n = 0; n < avx_iters; n++)
|
for(n = 0; n < avx_iters; n++)
|
||||||
{
|
{
|
||||||
__builtin_prefetch(&_result[current_correlator_tap][8 * n + 7], 1, 0);
|
__VOLK_PREFETCH_LOCALITY(&_result[current_correlator_tap][8 * n + 7], 1, 0);
|
||||||
__builtin_prefetch(&local_code_chip_index[8], 1, 3);
|
__VOLK_PREFETCH_LOCALITY(&local_code_chip_index[8], 1, 3);
|
||||||
aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn);
|
aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn);
|
||||||
aux = _mm256_add_ps(aux, aux2);
|
aux = _mm256_add_ps(aux, aux2);
|
||||||
// floor
|
// floor
|
||||||
@ -472,8 +472,8 @@ static inline void volk_gnsssdr_16ic_xn_resampler_16ic_xn_u_avx(lv_16sc_t** resu
|
|||||||
indexn = n0;
|
indexn = n0;
|
||||||
for(n = 0; n < avx_iters; n++)
|
for(n = 0; n < avx_iters; n++)
|
||||||
{
|
{
|
||||||
__builtin_prefetch(&_result[current_correlator_tap][8 * n + 7], 1, 0);
|
__VOLK_PREFETCH_LOCALITY(&_result[current_correlator_tap][8 * n + 7], 1, 0);
|
||||||
__builtin_prefetch(&local_code_chip_index[8], 1, 3);
|
__VOLK_PREFETCH_LOCALITY(&local_code_chip_index[8], 1, 3);
|
||||||
aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn);
|
aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn);
|
||||||
aux = _mm256_add_ps(aux, aux2);
|
aux = _mm256_add_ps(aux, aux2);
|
||||||
// floor
|
// floor
|
||||||
@ -554,8 +554,8 @@ static inline void volk_gnsssdr_16ic_xn_resampler_16ic_xn_neon(lv_16sc_t** resul
|
|||||||
indexn = n0;
|
indexn = n0;
|
||||||
for(n = 0; n < neon_iters; n++)
|
for(n = 0; n < neon_iters; n++)
|
||||||
{
|
{
|
||||||
__builtin_prefetch(&_result[current_correlator_tap][4 * n + 3], 1, 0);
|
__VOLK_PREFETCH_LOCALITY(&_result[current_correlator_tap][4 * n + 3], 1, 0);
|
||||||
__builtin_prefetch(&local_code_chip_index[4]);
|
__VOLK_PREFETCH(&local_code_chip_index[4]);
|
||||||
aux = vmulq_f32(code_phase_step_chips_reg, indexn);
|
aux = vmulq_f32(code_phase_step_chips_reg, indexn);
|
||||||
aux = vaddq_f32(aux, aux2);
|
aux = vaddq_f32(aux, aux2);
|
||||||
|
|
||||||
@ -588,7 +588,7 @@ static inline void volk_gnsssdr_16ic_xn_resampler_16ic_xn_neon(lv_16sc_t** resul
|
|||||||
}
|
}
|
||||||
for(n = neon_iters * 4; n < num_points; n++)
|
for(n = neon_iters * 4; n < num_points; n++)
|
||||||
{
|
{
|
||||||
__builtin_prefetch(&_result[current_correlator_tap][n], 1, 0);
|
__VOLK_PREFETCH_LOCALITY(&_result[current_correlator_tap][n], 1, 0);
|
||||||
// resample code for current tap
|
// resample code for current tap
|
||||||
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
|
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
|
||||||
//Take into account that in multitap correlators, the shifts can be negative!
|
//Take into account that in multitap correlators, the shifts can be negative!
|
||||||
|
@ -100,7 +100,7 @@ static inline void volk_gnsssdr_32f_sincos_32fc_u_sse4_1(lv_32fc_t* out, const f
|
|||||||
for(;number < quarterPoints; number++)
|
for(;number < quarterPoints; number++)
|
||||||
{
|
{
|
||||||
aVal = _mm_loadu_ps(aPtr);
|
aVal = _mm_loadu_ps(aPtr);
|
||||||
__builtin_prefetch(aPtr + 8);
|
__VOLK_PREFETCH(aPtr + 8);
|
||||||
s = _mm_sub_ps(aVal, _mm_and_ps(_mm_mul_ps(aVal, ftwos), _mm_cmplt_ps(aVal, fzeroes)));
|
s = _mm_sub_ps(aVal, _mm_and_ps(_mm_mul_ps(aVal, ftwos), _mm_cmplt_ps(aVal, fzeroes)));
|
||||||
q = _mm_cvtps_epi32(_mm_floor_ps(_mm_mul_ps(s, m4pi)));
|
q = _mm_cvtps_epi32(_mm_floor_ps(_mm_mul_ps(s, m4pi)));
|
||||||
r = _mm_add_epi32(q, _mm_and_si128(q, ones));
|
r = _mm_add_epi32(q, _mm_and_si128(q, ones));
|
||||||
@ -194,7 +194,7 @@ static inline void volk_gnsssdr_32f_sincos_32fc_a_sse4_1(lv_32fc_t* out, const f
|
|||||||
for(;number < quarterPoints; number++)
|
for(;number < quarterPoints; number++)
|
||||||
{
|
{
|
||||||
aVal = _mm_load_ps(aPtr);
|
aVal = _mm_load_ps(aPtr);
|
||||||
__builtin_prefetch(aPtr + 8);
|
__VOLK_PREFETCH(aPtr + 8);
|
||||||
s = _mm_sub_ps(aVal, _mm_and_ps(_mm_mul_ps(aVal, ftwos), _mm_cmplt_ps(aVal, fzeroes)));
|
s = _mm_sub_ps(aVal, _mm_and_ps(_mm_mul_ps(aVal, ftwos), _mm_cmplt_ps(aVal, fzeroes)));
|
||||||
q = _mm_cvtps_epi32(_mm_floor_ps(_mm_mul_ps(s, m4pi)));
|
q = _mm_cvtps_epi32(_mm_floor_ps(_mm_mul_ps(s, m4pi)));
|
||||||
r = _mm_add_epi32(q, _mm_and_si128(q, ones));
|
r = _mm_add_epi32(q, _mm_and_si128(q, ones));
|
||||||
@ -292,7 +292,7 @@ static inline void volk_gnsssdr_32f_sincos_32fc_a_sse2(lv_32fc_t* out, const flo
|
|||||||
for(;number < sse_iters; number++)
|
for(;number < sse_iters; number++)
|
||||||
{
|
{
|
||||||
x = _mm_load_ps(aPtr);
|
x = _mm_load_ps(aPtr);
|
||||||
__builtin_prefetch(aPtr + 8);
|
__VOLK_PREFETCH(aPtr + 8);
|
||||||
|
|
||||||
sign_bit_sin = x;
|
sign_bit_sin = x;
|
||||||
/* take the absolute value */
|
/* take the absolute value */
|
||||||
@ -445,7 +445,7 @@ static inline void volk_gnsssdr_32f_sincos_32fc_u_sse2(lv_32fc_t* out, const flo
|
|||||||
for(;number < sse_iters; number++)
|
for(;number < sse_iters; number++)
|
||||||
{
|
{
|
||||||
x = _mm_loadu_ps(aPtr);
|
x = _mm_loadu_ps(aPtr);
|
||||||
__builtin_prefetch(aPtr + 8);
|
__VOLK_PREFETCH(aPtr + 8);
|
||||||
|
|
||||||
sign_bit_sin = x;
|
sign_bit_sin = x;
|
||||||
/* take the absolute value */
|
/* take the absolute value */
|
||||||
@ -640,7 +640,7 @@ static inline void volk_gnsssdr_32f_sincos_32fc_neon(lv_32fc_t* out, const float
|
|||||||
for(;number < neon_iters; number++)
|
for(;number < neon_iters; number++)
|
||||||
{
|
{
|
||||||
x = vld1q_f32(aPtr);
|
x = vld1q_f32(aPtr);
|
||||||
__builtin_prefetch(aPtr + 8);
|
__VOLK_PREFETCH(aPtr + 8);
|
||||||
|
|
||||||
sign_mask_sin = vcltq_f32(x, vdupq_n_f32(0));
|
sign_mask_sin = vcltq_f32(x, vdupq_n_f32(0));
|
||||||
x = vabsq_f32(x);
|
x = vabsq_f32(x);
|
||||||
|
@ -84,7 +84,7 @@ static inline void volk_gnsssdr_32fc_convert_16ic_u_sse2(lv_16sc_t* outputVector
|
|||||||
{
|
{
|
||||||
inputVal1 = _mm_loadu_ps((float*)inputVectorPtr); inputVectorPtr += 4;
|
inputVal1 = _mm_loadu_ps((float*)inputVectorPtr); inputVectorPtr += 4;
|
||||||
inputVal2 = _mm_loadu_ps((float*)inputVectorPtr); inputVectorPtr += 4;
|
inputVal2 = _mm_loadu_ps((float*)inputVectorPtr); inputVectorPtr += 4;
|
||||||
__builtin_prefetch(inputVectorPtr + 8);
|
__VOLK_PREFETCH(inputVectorPtr + 8);
|
||||||
|
|
||||||
// Clip
|
// Clip
|
||||||
ret1 = _mm_max_ps(_mm_min_ps(inputVal1, vmax_val), vmin_val);
|
ret1 = _mm_max_ps(_mm_min_ps(inputVal1, vmax_val), vmin_val);
|
||||||
@ -137,7 +137,7 @@ static inline void volk_gnsssdr_32fc_convert_16ic_u_sse(lv_16sc_t* outputVector,
|
|||||||
{
|
{
|
||||||
inputVal1 = _mm_loadu_ps((float*)inputVectorPtr); inputVectorPtr += 4;
|
inputVal1 = _mm_loadu_ps((float*)inputVectorPtr); inputVectorPtr += 4;
|
||||||
inputVal2 = _mm_loadu_ps((float*)inputVectorPtr); inputVectorPtr += 4;
|
inputVal2 = _mm_loadu_ps((float*)inputVectorPtr); inputVectorPtr += 4;
|
||||||
__builtin_prefetch(inputVectorPtr + 8);
|
__VOLK_PREFETCH(inputVectorPtr + 8);
|
||||||
|
|
||||||
// Clip
|
// Clip
|
||||||
ret1 = _mm_max_ps(_mm_min_ps(inputVal1, vmax_val), vmin_val);
|
ret1 = _mm_max_ps(_mm_min_ps(inputVal1, vmax_val), vmin_val);
|
||||||
@ -190,7 +190,7 @@ static inline void volk_gnsssdr_32fc_convert_16ic_a_sse2(lv_16sc_t* outputVector
|
|||||||
{
|
{
|
||||||
inputVal1 = _mm_load_ps((float*)inputVectorPtr); inputVectorPtr += 4;
|
inputVal1 = _mm_load_ps((float*)inputVectorPtr); inputVectorPtr += 4;
|
||||||
inputVal2 = _mm_load_ps((float*)inputVectorPtr); inputVectorPtr += 4;
|
inputVal2 = _mm_load_ps((float*)inputVectorPtr); inputVectorPtr += 4;
|
||||||
__builtin_prefetch(inputVectorPtr + 8);
|
__VOLK_PREFETCH(inputVectorPtr + 8);
|
||||||
|
|
||||||
// Clip
|
// Clip
|
||||||
ret1 = _mm_max_ps(_mm_min_ps(inputVal1, vmax_val), vmin_val);
|
ret1 = _mm_max_ps(_mm_min_ps(inputVal1, vmax_val), vmin_val);
|
||||||
@ -241,7 +241,7 @@ static inline void volk_gnsssdr_32fc_convert_16ic_a_sse(lv_16sc_t* outputVector,
|
|||||||
{
|
{
|
||||||
inputVal1 = _mm_load_ps((float*)inputVectorPtr); inputVectorPtr += 4;
|
inputVal1 = _mm_load_ps((float*)inputVectorPtr); inputVectorPtr += 4;
|
||||||
inputVal2 = _mm_load_ps((float*)inputVectorPtr); inputVectorPtr += 4;
|
inputVal2 = _mm_load_ps((float*)inputVectorPtr); inputVectorPtr += 4;
|
||||||
__builtin_prefetch(inputVectorPtr + 8);
|
__VOLK_PREFETCH(inputVectorPtr + 8);
|
||||||
|
|
||||||
// Clip
|
// Clip
|
||||||
ret1 = _mm_max_ps(_mm_min_ps(inputVal1, vmax_val), vmin_val);
|
ret1 = _mm_max_ps(_mm_min_ps(inputVal1, vmax_val), vmin_val);
|
||||||
@ -296,7 +296,7 @@ static inline void volk_gnsssdr_32fc_convert_16ic_neon(lv_16sc_t* outputVector,
|
|||||||
{
|
{
|
||||||
a = vld1q_f32((const float32_t*)(inputVectorPtr)); inputVectorPtr += 4;
|
a = vld1q_f32((const float32_t*)(inputVectorPtr)); inputVectorPtr += 4;
|
||||||
b = vld1q_f32((const float32_t*)(inputVectorPtr)); inputVectorPtr += 4;
|
b = vld1q_f32((const float32_t*)(inputVectorPtr)); inputVectorPtr += 4;
|
||||||
__builtin_prefetch(inputVectorPtr + 8);
|
__VOLK_PREFETCH(inputVectorPtr + 8);
|
||||||
|
|
||||||
ret1 = vmaxq_f32(vminq_f32(a, max_val), min_val);
|
ret1 = vmaxq_f32(vminq_f32(a, max_val), min_val);
|
||||||
ret2 = vmaxq_f32(vminq_f32(b, max_val), min_val);
|
ret2 = vmaxq_f32(vminq_f32(b, max_val), min_val);
|
||||||
|
@ -207,7 +207,7 @@ static inline void volk_gnsssdr_32fc_x2_rotator_dot_prod_32fc_xn_u_sse3(lv_32fc_
|
|||||||
{
|
{
|
||||||
// Phase rotation on operand in_common starts here:
|
// Phase rotation on operand in_common starts here:
|
||||||
a = _mm_loadu_ps((float*)_in_common);
|
a = _mm_loadu_ps((float*)_in_common);
|
||||||
// __builtin_prefetch(_in_common + 4);
|
// __VOLK_PREFETCH(_in_common + 4);
|
||||||
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm_movehdup_ps(two_phase_acc_reg);
|
yh = _mm_movehdup_ps(two_phase_acc_reg);
|
||||||
tmp1 = _mm_mul_ps(a, yl);
|
tmp1 = _mm_mul_ps(a, yl);
|
||||||
@ -316,7 +316,7 @@ static inline void volk_gnsssdr_32fc_x2_rotator_dot_prod_32fc_xn_a_sse3(lv_32fc_
|
|||||||
{
|
{
|
||||||
// Phase rotation on operand in_common starts here:
|
// Phase rotation on operand in_common starts here:
|
||||||
a = _mm_load_ps((float*)_in_common);
|
a = _mm_load_ps((float*)_in_common);
|
||||||
// __builtin_prefetch(_in_common + 4);
|
// __VOLK_PREFETCH(_in_common + 4);
|
||||||
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm_movehdup_ps(two_phase_acc_reg);
|
yh = _mm_movehdup_ps(two_phase_acc_reg);
|
||||||
tmp1 = _mm_mul_ps(a, yl);
|
tmp1 = _mm_mul_ps(a, yl);
|
||||||
@ -435,7 +435,7 @@ static inline void volk_gnsssdr_32fc_x2_rotator_dot_prod_32fc_xn_u_avx(lv_32fc_t
|
|||||||
{
|
{
|
||||||
// Phase rotation on operand in_common starts here:
|
// Phase rotation on operand in_common starts here:
|
||||||
a = _mm256_loadu_ps((float*)_in_common);
|
a = _mm256_loadu_ps((float*)_in_common);
|
||||||
__builtin_prefetch(_in_common + 16);
|
__VOLK_PREFETCH(_in_common + 16);
|
||||||
yl = _mm256_moveldup_ps(four_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm256_moveldup_ps(four_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm256_movehdup_ps(four_phase_acc_reg);
|
yh = _mm256_movehdup_ps(four_phase_acc_reg);
|
||||||
tmp1 = _mm256_mul_ps(a, yl);
|
tmp1 = _mm256_mul_ps(a, yl);
|
||||||
@ -562,7 +562,7 @@ static inline void volk_gnsssdr_32fc_x2_rotator_dot_prod_32fc_xn_a_avx(lv_32fc_t
|
|||||||
{
|
{
|
||||||
// Phase rotation on operand in_common starts here:
|
// Phase rotation on operand in_common starts here:
|
||||||
a = _mm256_load_ps((float*)_in_common);
|
a = _mm256_load_ps((float*)_in_common);
|
||||||
__builtin_prefetch(_in_common + 16);
|
__VOLK_PREFETCH(_in_common + 16);
|
||||||
yl = _mm256_moveldup_ps(four_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
yl = _mm256_moveldup_ps(four_phase_acc_reg); // Load yl with cr,cr,dr,dr
|
||||||
yh = _mm256_movehdup_ps(four_phase_acc_reg);
|
yh = _mm256_movehdup_ps(four_phase_acc_reg);
|
||||||
tmp1 = _mm256_mul_ps(a, yl);
|
tmp1 = _mm256_mul_ps(a, yl);
|
||||||
@ -697,7 +697,7 @@ static inline void volk_gnsssdr_32fc_x2_rotator_dot_prod_32fc_xn_neon(lv_32fc_t*
|
|||||||
{
|
{
|
||||||
/* load 4 complex numbers (float 32 bits each component) */
|
/* load 4 complex numbers (float 32 bits each component) */
|
||||||
b_val = vld2q_f32((float32_t*)_in_common);
|
b_val = vld2q_f32((float32_t*)_in_common);
|
||||||
__builtin_prefetch(_in_common + 8);
|
__VOLK_PREFETCH(_in_common + 8);
|
||||||
_in_common += 4;
|
_in_common += 4;
|
||||||
|
|
||||||
/* complex multiplication of four complex samples (float 32 bits each component) */
|
/* complex multiplication of four complex samples (float 32 bits each component) */
|
||||||
|
@ -394,8 +394,8 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_a_avx(lv_32fc_t** resu
|
|||||||
indexn = n0;
|
indexn = n0;
|
||||||
for(n = 0; n < avx_iters; n++)
|
for(n = 0; n < avx_iters; n++)
|
||||||
{
|
{
|
||||||
__builtin_prefetch(&_result[current_correlator_tap][8 * n + 7], 1, 0);
|
__VOLK_PREFETCH_LOCALITY(&_result[current_correlator_tap][8 * n + 7], 1, 0);
|
||||||
__builtin_prefetch(&local_code_chip_index[8], 1, 3);
|
__VOLK_PREFETCH_LOCALITY(&local_code_chip_index[8], 1, 3);
|
||||||
aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn);
|
aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn);
|
||||||
aux = _mm256_add_ps(aux, aux2);
|
aux = _mm256_add_ps(aux, aux2);
|
||||||
// floor
|
// floor
|
||||||
@ -471,8 +471,8 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_u_avx(lv_32fc_t** resu
|
|||||||
indexn = n0;
|
indexn = n0;
|
||||||
for(n = 0; n < avx_iters; n++)
|
for(n = 0; n < avx_iters; n++)
|
||||||
{
|
{
|
||||||
__builtin_prefetch(&_result[current_correlator_tap][8 * n + 7], 1, 0);
|
__VOLK_PREFETCH_LOCALITY(&_result[current_correlator_tap][8 * n + 7], 1, 0);
|
||||||
__builtin_prefetch(&local_code_chip_index[8], 1, 3);
|
__VOLK_PREFETCH_LOCALITY(&local_code_chip_index[8], 1, 3);
|
||||||
aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn);
|
aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn);
|
||||||
aux = _mm256_add_ps(aux, aux2);
|
aux = _mm256_add_ps(aux, aux2);
|
||||||
// floor
|
// floor
|
||||||
@ -555,8 +555,8 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_neon(lv_32fc_t** resul
|
|||||||
indexn = n0;
|
indexn = n0;
|
||||||
for(n = 0; n < neon_iters; n++)
|
for(n = 0; n < neon_iters; n++)
|
||||||
{
|
{
|
||||||
__builtin_prefetch(&_result[current_correlator_tap][4 * n + 3], 1, 0);
|
__VOLK_PREFETCH_LOCALITY(&_result[current_correlator_tap][4 * n + 3], 1, 0);
|
||||||
__builtin_prefetch(&local_code_chip_index[4]);
|
__VOLK_PREFETCH(&local_code_chip_index[4]);
|
||||||
aux = vmulq_f32(code_phase_step_chips_reg, indexn);
|
aux = vmulq_f32(code_phase_step_chips_reg, indexn);
|
||||||
aux = vaddq_f32(aux, aux2);
|
aux = vaddq_f32(aux, aux2);
|
||||||
|
|
||||||
@ -589,7 +589,7 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_neon(lv_32fc_t** resul
|
|||||||
}
|
}
|
||||||
for(n = neon_iters * 4; n < num_points; n++)
|
for(n = neon_iters * 4; n < num_points; n++)
|
||||||
{
|
{
|
||||||
__builtin_prefetch(&_result[current_correlator_tap][n], 1, 0);
|
__VOLK_PREFETCH_LOCALITY(&_result[current_correlator_tap][n], 1, 0);
|
||||||
// resample code for current tap
|
// resample code for current tap
|
||||||
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
|
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
|
||||||
//Take into account that in multitap correlators, the shifts can be negative!
|
//Take into account that in multitap correlators, the shifts can be negative!
|
||||||
|
@ -303,7 +303,7 @@ static inline void volk_gnsssdr_8ic_conjugate_8ic_neon(lv_8sc_t* cVector, const
|
|||||||
for (i = 0; i < sse_iters; ++i)
|
for (i = 0; i < sse_iters; ++i)
|
||||||
{
|
{
|
||||||
a_val = vld2_s8((const int8_t*)a);
|
a_val = vld2_s8((const int8_t*)a);
|
||||||
__builtin_prefetch(a + 16);
|
__VOLK_PREFETCH(a + 16);
|
||||||
a_val.val[1] = vneg_s8(a_val.val[1]);
|
a_val.val[1] = vneg_s8(a_val.val[1]);
|
||||||
vst2_s8((int8_t*)c, a_val);
|
vst2_s8((int8_t*)c, a_val);
|
||||||
a += 8;
|
a += 8;
|
||||||
|
@ -457,8 +457,8 @@ static inline void volk_gnsssdr_8ic_x2_dot_prod_8ic_neon(lv_8sc_t* result, const
|
|||||||
{
|
{
|
||||||
a_val = vld2_s8((const int8_t*)a);
|
a_val = vld2_s8((const int8_t*)a);
|
||||||
b_val = vld2_s8((const int8_t*)b);
|
b_val = vld2_s8((const int8_t*)b);
|
||||||
__builtin_prefetch(a + 16);
|
__VOLK_PREFETCH(a + 16);
|
||||||
__builtin_prefetch(b + 16);
|
__VOLK_PREFETCH(b + 16);
|
||||||
|
|
||||||
// multiply the real*real and imag*imag to get real result
|
// multiply the real*real and imag*imag to get real result
|
||||||
tmp_real.val[0] = vmul_s8(a_val.val[0], b_val.val[0]);
|
tmp_real.val[0] = vmul_s8(a_val.val[0], b_val.val[0]);
|
||||||
|
Loading…
Reference in New Issue
Block a user