1
0
mirror of https://github.com/gnss-sdr/gnss-sdr synced 2024-12-14 12:10:34 +00:00

Fixing resamplers

Under some circumstances (i.e. negative Doppler) it could cause a
segmentation fault. It is now fixed for all protokernels.
This commit is contained in:
Carles Fernandez 2016-04-12 01:05:47 +02:00
parent fc6fdc277c
commit 414eaddb42
2 changed files with 82 additions and 50 deletions

View File

@ -64,6 +64,7 @@
#define INCLUDED_volk_gnsssdr_16ic_xn_resampler2_16ic_xn_H #define INCLUDED_volk_gnsssdr_16ic_xn_resampler2_16ic_xn_H
#include <math.h> #include <math.h>
#include <stdlib.h>
#include <volk_gnsssdr/volk_gnsssdr_common.h> #include <volk_gnsssdr/volk_gnsssdr_common.h>
#include <volk_gnsssdr/volk_gnsssdr_complex.h> #include <volk_gnsssdr/volk_gnsssdr_complex.h>
@ -80,7 +81,7 @@ static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_generic(lv_16sc_t** r
// resample code for current tap // resample code for current tap
local_code_chip_index = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips); local_code_chip_index = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative! //Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index < 0) local_code_chip_index += code_length_chips; if (local_code_chip_index < 0) local_code_chip_index += (int)code_length_chips * (abs(local_code_chip_index) / code_length_chips + 1);
local_code_chip_index = local_code_chip_index % code_length_chips; local_code_chip_index = local_code_chip_index % code_length_chips;
result[current_correlator_tap][n] = local_code[local_code_chip_index]; result[current_correlator_tap][n] = local_code[local_code_chip_index];
} }
@ -144,7 +145,7 @@ static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_a_sse4_1(lv_16sc_t**
// resample code for current tap // resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips); local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative! //Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += code_length_chips; if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips; local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_]; _result[current_correlator_tap][n] = local_code[local_code_chip_index_];
} }
@ -208,7 +209,7 @@ static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_u_sse4_1(lv_16sc_t**
// resample code for current tap // resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips); local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative! //Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += code_length_chips; if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips; local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_]; _result[current_correlator_tap][n] = local_code[local_code_chip_index_];
} }
@ -276,7 +277,7 @@ static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_a_sse3(lv_16sc_t** re
// resample code for current tap // resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips); local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative! //Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += code_length_chips; if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips; local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_]; _result[current_correlator_tap][n] = local_code[local_code_chip_index_];
} }
@ -344,7 +345,7 @@ static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_u_sse3(lv_16sc_t** re
// resample code for current tap // resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips); local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative! //Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += code_length_chips; if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips; local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_]; _result[current_correlator_tap][n] = local_code[local_code_chip_index_];
} }
@ -382,14 +383,13 @@ static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_a_avx(lv_16sc_t** res
indexn = n0; indexn = n0;
for(unsigned int n = 0; n < avx_iters; n++) for(unsigned int n = 0; n < avx_iters; n++)
{ {
__builtin_prefetch(&_result[current_correlator_tap][8 * n + 7], 1, 0);
__builtin_prefetch(&local_code_chip_index[8], 1, 3);
aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn); aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn);
aux = _mm256_add_ps(aux, aux2); aux = _mm256_add_ps(aux, aux2);
// floor // floor
aux = _mm256_floor_ps(aux); aux = _mm256_floor_ps(aux);
negatives = _mm256_cmp_ps(aux, zeros, 0x01);
aux3 = _mm256_and_ps(code_length_chips_reg_f, negatives);
aux = _mm256_add_ps(aux, aux3);
// fmod // fmod
c = _mm256_div_ps(aux, code_length_chips_reg_f); c = _mm256_div_ps(aux, code_length_chips_reg_f);
i = _mm256_cvttps_epi32(c); i = _mm256_cvttps_epi32(c);
@ -397,6 +397,13 @@ static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_a_avx(lv_16sc_t** res
base = _mm256_mul_ps(cTrunc, code_length_chips_reg_f); base = _mm256_mul_ps(cTrunc, code_length_chips_reg_f);
local_code_chip_index_reg = _mm256_cvttps_epi32(_mm256_sub_ps(aux, base)); local_code_chip_index_reg = _mm256_cvttps_epi32(_mm256_sub_ps(aux, base));
// no negatives
c = _mm256_cvtepi32_ps(local_code_chip_index_reg);
negatives = _mm256_cmp_ps(c, zeros, 0x01 );
aux3 = _mm256_and_ps(code_length_chips_reg_f, negatives);
aux = _mm256_add_ps(c, aux3);
local_code_chip_index_reg = _mm256_cvttps_epi32(aux);
_mm256_store_si256((__m256i*)local_code_chip_index, local_code_chip_index_reg); _mm256_store_si256((__m256i*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 8; ++k) for(unsigned int k = 0; k < 8; ++k)
{ {
@ -413,7 +420,7 @@ static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_a_avx(lv_16sc_t** res
// resample code for current tap // resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips); local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative! //Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += code_length_chips; if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips; local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_]; _result[current_correlator_tap][n] = local_code[local_code_chip_index_];
} }
@ -451,14 +458,13 @@ static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_u_avx(lv_16sc_t** res
indexn = n0; indexn = n0;
for(unsigned int n = 0; n < avx_iters; n++) for(unsigned int n = 0; n < avx_iters; n++)
{ {
__builtin_prefetch(&_result[current_correlator_tap][8 * n + 7], 1, 0);
__builtin_prefetch(&local_code_chip_index[8], 1, 3);
aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn); aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn);
aux = _mm256_add_ps(aux, aux2); aux = _mm256_add_ps(aux, aux2);
// floor // floor
aux = _mm256_floor_ps(aux); aux = _mm256_floor_ps(aux);
negatives = _mm256_cmp_ps(aux, zeros, 0x01);
aux3 = _mm256_and_ps(code_length_chips_reg_f, negatives);
aux = _mm256_add_ps(aux, aux3);
// fmod // fmod
c = _mm256_div_ps(aux, code_length_chips_reg_f); c = _mm256_div_ps(aux, code_length_chips_reg_f);
i = _mm256_cvttps_epi32(c); i = _mm256_cvttps_epi32(c);
@ -466,6 +472,13 @@ static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_u_avx(lv_16sc_t** res
base = _mm256_mul_ps(cTrunc, code_length_chips_reg_f); base = _mm256_mul_ps(cTrunc, code_length_chips_reg_f);
local_code_chip_index_reg = _mm256_cvttps_epi32(_mm256_sub_ps(aux, base)); local_code_chip_index_reg = _mm256_cvttps_epi32(_mm256_sub_ps(aux, base));
// no negatives
c = _mm256_cvtepi32_ps(local_code_chip_index_reg);
negatives = _mm256_cmp_ps(c, zeros, 0x01 );
aux3 = _mm256_and_ps(code_length_chips_reg_f, negatives);
aux = _mm256_add_ps(c, aux3);
local_code_chip_index_reg = _mm256_cvttps_epi32(aux);
_mm256_store_si256((__m256i*)local_code_chip_index, local_code_chip_index_reg); _mm256_store_si256((__m256i*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 8; ++k) for(unsigned int k = 0; k < 8; ++k)
{ {
@ -482,7 +495,7 @@ static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_u_avx(lv_16sc_t** res
// resample code for current tap // resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips); local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative! //Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += code_length_chips; if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips; local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_]; _result[current_correlator_tap][n] = local_code[local_code_chip_index_];
} }
@ -505,37 +518,39 @@ static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_neon(lv_16sc_t** resu
__VOLK_ATTR_ALIGNED(16) int32_t local_code_chip_index[4]; __VOLK_ATTR_ALIGNED(16) int32_t local_code_chip_index[4];
int32_t local_code_chip_index_; int32_t local_code_chip_index_;
const int32x4_t zeros = vdupq_n_s32(0); const int32x4_t zeros = vdupq_n_s32(0);
const float32x4_t code_length_chips_reg_f = vdupq_n_f32((float)code_length_chips); const float32x4_t code_length_chips_reg_f = vdupq_n_f32((float)code_length_chips);
const int32x4_t code_length_chips_reg_i = vdupq_n_s32((int32_t)code_length_chips); const int32x4_t code_length_chips_reg_i = vdupq_n_s32((int32_t)code_length_chips);
int32x4_t local_code_chip_index_reg, aux_i, negatives, i; int32x4_t local_code_chip_index_reg, aux_i, negatives, i;
float32x4_t aux, aux2, shifts_chips_reg, fi, c, j, cTrunc, base, indexn, reciprocal; float32x4_t aux, aux2, shifts_chips_reg, fi, c, j, cTrunc, base, indexn, reciprocal;
__VOLK_ATTR_ALIGNED(16) const float vec[4] = { 0.0f, 1.0f, 2.0f, 3.0f }; __VOLK_ATTR_ALIGNED(16) const float vec[4] = { 0.0f, 1.0f, 2.0f, 3.0f };
uint32x4_t igx; uint32x4_t igx;
reciprocal = vrecpeq_f32(code_length_chips_reg_f);
reciprocal = vmulq_f32(vrecpsq_f32(code_length_chips_reg_f, reciprocal), reciprocal);
reciprocal = vmulq_f32(vrecpsq_f32(code_length_chips_reg_f, reciprocal), reciprocal); // this refinement is required!
float32x4_t n0 = vld1q_f32((float*)vec);
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++) for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{ {
shifts_chips_reg = vdupq_n_f32((float)shifts_chips[current_correlator_tap]); shifts_chips_reg = vdupq_n_f32((float)shifts_chips[current_correlator_tap]);
aux2 = vsubq_f32(shifts_chips_reg, rem_code_phase_chips_reg); aux2 = vsubq_f32(shifts_chips_reg, rem_code_phase_chips_reg);
indexn = vld1q_f32((float*)vec); indexn = n0;
for(unsigned int n = 0; n < neon_iters; n++) for(unsigned int n = 0; n < neon_iters; n++)
{ {
__builtin_prefetch(&_result[current_correlator_tap][4 * n + 3], 1, 0); __builtin_prefetch(&_result[current_correlator_tap][4 * n + 3], 1, 0);
__builtin_prefetch(&local_code_chip_index[4]); __builtin_prefetch(&local_code_chip_index[4]);
aux = vmulq_f32(code_phase_step_chips_reg, indexn); aux = vmulq_f32(code_phase_step_chips_reg, indexn);
aux = vaddq_f32(aux, aux2); aux = vaddq_f32(aux, aux2);
//floor //floor
i = vcvtq_s32_f32(aux); i = vcvtq_s32_f32(aux);
fi = vcvtq_f32_s32(i); fi = vcvtq_f32_s32(i);
igx = vcgtq_f32(fi, aux); igx = vcgtq_f32(fi, aux);
j = vreinterpretq_f32_s32(vandq_s32(vreinterpretq_s32_u32(igx), ones)); j = vcvtq_f32_s32(vandq_s32(vreinterpretq_s32_u32(igx), ones));
aux = vsubq_f32(fi, j); aux = vsubq_f32(fi, j);
// fmod // fmod
reciprocal = vrecpeq_f32(code_length_chips_reg_f);
reciprocal = vmulq_f32(vrecpsq_f32(code_length_chips_reg_f, reciprocal), reciprocal);
reciprocal = vmulq_f32(vrecpsq_f32(code_length_chips_reg_f, reciprocal), reciprocal); // this refinement is required!
c = vmulq_f32(aux, reciprocal); c = vmulq_f32(aux, reciprocal);
i = vcvtq_s32_f32(c); i = vcvtq_s32_f32(c);
cTrunc = vcvtq_f32_s32(i); cTrunc = vcvtq_f32_s32(i);
@ -547,7 +562,8 @@ static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_neon(lv_16sc_t** resu
aux_i = vandq_s32(code_length_chips_reg_i, negatives); aux_i = vandq_s32(code_length_chips_reg_i, negatives);
local_code_chip_index_reg = vaddq_s32(local_code_chip_index_reg, aux_i); local_code_chip_index_reg = vaddq_s32(local_code_chip_index_reg, aux_i);
vst1q_s32((int*)local_code_chip_index, local_code_chip_index_reg); vst1q_s32((int32_t*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 4; ++k) for(unsigned int k = 0; k < 4; ++k)
{ {
_result[current_correlator_tap][n * 4 + k] = local_code[local_code_chip_index[k]]; _result[current_correlator_tap][n * 4 + k] = local_code[local_code_chip_index[k]];
@ -558,10 +574,10 @@ static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_neon(lv_16sc_t** resu
{ {
__builtin_prefetch(&_result[current_correlator_tap][n], 1, 0); __builtin_prefetch(&_result[current_correlator_tap][n], 1, 0);
// resample code for current tap // resample code for current tap
local_code_chip_index_ = (int32_t)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips); local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
//Take into account that in multitap correlators, the shifts can be negative! //Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += code_length_chips; if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_]; _result[current_correlator_tap][n] = local_code[local_code_chip_index_];
} }
} }

View File

@ -64,6 +64,7 @@
#define INCLUDED_volk_gnsssdr_32fc_xn_resampler_32fc_xn_H #define INCLUDED_volk_gnsssdr_32fc_xn_resampler_32fc_xn_H
#include <math.h> #include <math.h>
#include <stdlib.h> /* abs */
#include <volk_gnsssdr/volk_gnsssdr_common.h> #include <volk_gnsssdr/volk_gnsssdr_common.h>
#include <volk_gnsssdr/volk_gnsssdr_complex.h> #include <volk_gnsssdr/volk_gnsssdr_complex.h>
@ -80,7 +81,7 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_generic(lv_32fc_t** re
// resample code for current tap // resample code for current tap
local_code_chip_index = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips); local_code_chip_index = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative! //Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index < 0) local_code_chip_index += code_length_chips; if (local_code_chip_index < 0) local_code_chip_index += (int)code_length_chips * (abs(local_code_chip_index) / code_length_chips + 1);
local_code_chip_index = local_code_chip_index % code_length_chips; local_code_chip_index = local_code_chip_index % code_length_chips;
result[current_correlator_tap][n] = local_code[local_code_chip_index]; result[current_correlator_tap][n] = local_code[local_code_chip_index];
} }
@ -97,8 +98,8 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_a_sse3(lv_32fc_t** res
lv_32fc_t** _result = result; lv_32fc_t** _result = result;
const unsigned int quarterPoints = num_points / 4; const unsigned int quarterPoints = num_points / 4;
const __m128 ones = _mm_set1_ps(1.); const __m128 ones = _mm_set1_ps(1.0f);
const __m128 fours = _mm_set1_ps(4.); const __m128 fours = _mm_set1_ps(4.0f);
const __m128 rem_code_phase_chips_reg = _mm_set_ps1(rem_code_phase_chips); const __m128 rem_code_phase_chips_reg = _mm_set_ps1(rem_code_phase_chips);
const __m128 code_phase_step_chips_reg = _mm_set_ps1(code_phase_step_chips); const __m128 code_phase_step_chips_reg = _mm_set_ps1(code_phase_step_chips);
@ -115,7 +116,7 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_a_sse3(lv_32fc_t** res
{ {
shifts_chips_reg = _mm_set_ps1((float)shifts_chips[current_correlator_tap]); shifts_chips_reg = _mm_set_ps1((float)shifts_chips[current_correlator_tap]);
aux2 = _mm_sub_ps(shifts_chips_reg, rem_code_phase_chips_reg); aux2 = _mm_sub_ps(shifts_chips_reg, rem_code_phase_chips_reg);
__m128 indexn = _mm_set_ps(3., 2., 1., 0.); __m128 indexn = _mm_set_ps(3.0f, 2.0f, 1.0f, 0.0f);
for(unsigned int n = 0; n < quarterPoints; n++) for(unsigned int n = 0; n < quarterPoints; n++)
{ {
aux = _mm_mul_ps(code_phase_step_chips_reg, indexn); aux = _mm_mul_ps(code_phase_step_chips_reg, indexn);
@ -126,10 +127,9 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_a_sse3(lv_32fc_t** res
igx = _mm_cmpgt_ps(fi, aux); igx = _mm_cmpgt_ps(fi, aux);
j = _mm_and_ps(igx, ones); j = _mm_and_ps(igx, ones);
aux = _mm_sub_ps(fi, j); aux = _mm_sub_ps(fi, j);
// fmod // fmod
c = _mm_div_ps(aux, code_length_chips_reg_f); c = _mm_div_ps(aux, code_length_chips_reg_f);
i = _mm_cvtps_epi32(c); i = _mm_cvttps_epi32(c);
cTrunc = _mm_cvtepi32_ps(i); cTrunc = _mm_cvtepi32_ps(i);
base = _mm_mul_ps(cTrunc, code_length_chips_reg_f); base = _mm_mul_ps(cTrunc, code_length_chips_reg_f);
local_code_chip_index_reg = _mm_cvtps_epi32(_mm_sub_ps(aux, base)); local_code_chip_index_reg = _mm_cvtps_epi32(_mm_sub_ps(aux, base));
@ -149,7 +149,7 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_a_sse3(lv_32fc_t** res
// resample code for current tap // resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips); local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative! //Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += code_length_chips; if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1) ;
local_code_chip_index_ = local_code_chip_index_ % code_length_chips; local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_]; _result[current_correlator_tap][n] = local_code[local_code_chip_index_];
} }
@ -217,7 +217,7 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_u_sse3(lv_32fc_t** res
// resample code for current tap // resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips); local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative! //Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += code_length_chips; if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1) ;
local_code_chip_index_ = local_code_chip_index_ % code_length_chips; local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_]; _result[current_correlator_tap][n] = local_code[local_code_chip_index_];
} }
@ -280,7 +280,7 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_a_sse4_1(lv_32fc_t** r
// resample code for current tap // resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips); local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative! //Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += code_length_chips; if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1) ;
local_code_chip_index_ = local_code_chip_index_ % code_length_chips; local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_]; _result[current_correlator_tap][n] = local_code[local_code_chip_index_];
} }
@ -344,7 +344,7 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_u_sse4_1(lv_32fc_t** r
// resample code for current tap // resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips); local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative! //Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += code_length_chips; if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1) ;
local_code_chip_index_ = local_code_chip_index_ % code_length_chips; local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_]; _result[current_correlator_tap][n] = local_code[local_code_chip_index_];
} }
@ -382,14 +382,13 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_a_avx(lv_32fc_t** resu
indexn = n0; indexn = n0;
for(unsigned int n = 0; n < avx_iters; n++) for(unsigned int n = 0; n < avx_iters; n++)
{ {
__builtin_prefetch(&_result[current_correlator_tap][8 * n + 7], 1, 0);
__builtin_prefetch(&local_code_chip_index[8], 1, 3);
aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn); aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn);
aux = _mm256_add_ps(aux, aux2); aux = _mm256_add_ps(aux, aux2);
// floor // floor
aux = _mm256_floor_ps(aux); aux = _mm256_floor_ps(aux);
negatives = _mm256_cmp_ps(aux, zeros, 0x01);
aux3 = _mm256_and_ps(code_length_chips_reg_f, negatives);
aux = _mm256_add_ps(aux, aux3);
// fmod // fmod
c = _mm256_div_ps(aux, code_length_chips_reg_f); c = _mm256_div_ps(aux, code_length_chips_reg_f);
i = _mm256_cvttps_epi32(c); i = _mm256_cvttps_epi32(c);
@ -397,6 +396,13 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_a_avx(lv_32fc_t** resu
base = _mm256_mul_ps(cTrunc, code_length_chips_reg_f); base = _mm256_mul_ps(cTrunc, code_length_chips_reg_f);
local_code_chip_index_reg = _mm256_cvttps_epi32(_mm256_sub_ps(aux, base)); local_code_chip_index_reg = _mm256_cvttps_epi32(_mm256_sub_ps(aux, base));
// no negatives
c = _mm256_cvtepi32_ps(local_code_chip_index_reg);
negatives = _mm256_cmp_ps(c, zeros, 0x01 );
aux3 = _mm256_and_ps(code_length_chips_reg_f, negatives);
aux = _mm256_add_ps(c, aux3);
local_code_chip_index_reg = _mm256_cvttps_epi32(aux);
_mm256_store_si256((__m256i*)local_code_chip_index, local_code_chip_index_reg); _mm256_store_si256((__m256i*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 8; ++k) for(unsigned int k = 0; k < 8; ++k)
{ {
@ -413,7 +419,7 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_a_avx(lv_32fc_t** resu
// resample code for current tap // resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips); local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative! //Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += code_length_chips; if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1) ;
local_code_chip_index_ = local_code_chip_index_ % code_length_chips; local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_]; _result[current_correlator_tap][n] = local_code[local_code_chip_index_];
} }
@ -451,14 +457,13 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_u_avx(lv_32fc_t** resu
indexn = n0; indexn = n0;
for(unsigned int n = 0; n < avx_iters; n++) for(unsigned int n = 0; n < avx_iters; n++)
{ {
__builtin_prefetch(&_result[current_correlator_tap][8 * n + 7], 1, 0);
__builtin_prefetch(&local_code_chip_index[8], 1, 3);
aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn); aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn);
aux = _mm256_add_ps(aux, aux2); aux = _mm256_add_ps(aux, aux2);
// floor // floor
aux = _mm256_floor_ps(aux); aux = _mm256_floor_ps(aux);
negatives = _mm256_cmp_ps(aux, zeros, 0x01);
aux3 = _mm256_and_ps(code_length_chips_reg_f, negatives);
aux = _mm256_add_ps(aux, aux3);
// fmod // fmod
c = _mm256_div_ps(aux, code_length_chips_reg_f); c = _mm256_div_ps(aux, code_length_chips_reg_f);
i = _mm256_cvttps_epi32(c); i = _mm256_cvttps_epi32(c);
@ -466,6 +471,13 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_u_avx(lv_32fc_t** resu
base = _mm256_mul_ps(cTrunc, code_length_chips_reg_f); base = _mm256_mul_ps(cTrunc, code_length_chips_reg_f);
local_code_chip_index_reg = _mm256_cvttps_epi32(_mm256_sub_ps(aux, base)); local_code_chip_index_reg = _mm256_cvttps_epi32(_mm256_sub_ps(aux, base));
// no negatives
c = _mm256_cvtepi32_ps(local_code_chip_index_reg);
negatives = _mm256_cmp_ps(c, zeros, 0x01 );
aux3 = _mm256_and_ps(code_length_chips_reg_f, negatives);
aux = _mm256_add_ps(c, aux3);
local_code_chip_index_reg = _mm256_cvttps_epi32(aux);
_mm256_store_si256((__m256i*)local_code_chip_index, local_code_chip_index_reg); _mm256_store_si256((__m256i*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 8; ++k) for(unsigned int k = 0; k < 8; ++k)
{ {
@ -482,7 +494,7 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_u_avx(lv_32fc_t** resu
// resample code for current tap // resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips); local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative! //Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += code_length_chips; if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1) ;
local_code_chip_index_ = local_code_chip_index_ % code_length_chips; local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_]; _result[current_correlator_tap][n] = local_code[local_code_chip_index_];
} }
@ -514,27 +526,31 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_neon(lv_32fc_t** resul
float32x4_t aux, aux2, shifts_chips_reg, fi, c, j, cTrunc, base, indexn, reciprocal; float32x4_t aux, aux2, shifts_chips_reg, fi, c, j, cTrunc, base, indexn, reciprocal;
__VOLK_ATTR_ALIGNED(16) const float vec[4] = { 0.0f, 1.0f, 2.0f, 3.0f }; __VOLK_ATTR_ALIGNED(16) const float vec[4] = { 0.0f, 1.0f, 2.0f, 3.0f };
uint32x4_t igx; uint32x4_t igx;
reciprocal = vrecpeq_f32(code_length_chips_reg_f);
reciprocal = vmulq_f32(vrecpsq_f32(code_length_chips_reg_f, reciprocal), reciprocal);
reciprocal = vmulq_f32(vrecpsq_f32(code_length_chips_reg_f, reciprocal), reciprocal); // this refinement is required!
float32x4_t n0 = vld1q_f32((float*)vec);
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++) for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{ {
shifts_chips_reg = vdupq_n_f32((float)shifts_chips[current_correlator_tap]); shifts_chips_reg = vdupq_n_f32((float)shifts_chips[current_correlator_tap]);
aux2 = vsubq_f32(shifts_chips_reg, rem_code_phase_chips_reg); aux2 = vsubq_f32(shifts_chips_reg, rem_code_phase_chips_reg);
indexn = vld1q_f32((float*)vec); indexn = n0;
for(unsigned int n = 0; n < neon_iters; n++) for(unsigned int n = 0; n < neon_iters; n++)
{ {
__builtin_prefetch(&_result[current_correlator_tap][4 * n + 3], 1, 0); __builtin_prefetch(&_result[current_correlator_tap][4 * n + 3], 1, 0);
__builtin_prefetch(&local_code_chip_index[4]); __builtin_prefetch(&local_code_chip_index[4]);
aux = vmulq_f32(code_phase_step_chips_reg, indexn); aux = vmulq_f32(code_phase_step_chips_reg, indexn);
aux = vaddq_f32(aux, aux2); aux = vaddq_f32(aux, aux2);
//floor //floor
i = vcvtq_s32_f32(aux); i = vcvtq_s32_f32(aux);
fi = vcvtq_f32_s32(i); fi = vcvtq_f32_s32(i);
igx = vcgtq_f32(fi, aux); igx = vcgtq_f32(fi, aux);
j = vcvtq_f32_s32(vandq_s32(vreinterpretq_s32_u32(igx), ones)); j = vcvtq_f32_s32(vandq_s32(vreinterpretq_s32_u32(igx), ones));
aux = vsubq_f32(fi, j); aux = vsubq_f32(fi, j);
// fmod // fmod
reciprocal = vrecpeq_f32(code_length_chips_reg_f);
reciprocal = vmulq_f32(vrecpsq_f32(code_length_chips_reg_f, reciprocal), reciprocal);
reciprocal = vmulq_f32(vrecpsq_f32(code_length_chips_reg_f, reciprocal), reciprocal); // this refinement is required!
c = vmulq_f32(aux, reciprocal); c = vmulq_f32(aux, reciprocal);
i = vcvtq_s32_f32(c); i = vcvtq_s32_f32(c);
cTrunc = vcvtq_f32_s32(i); cTrunc = vcvtq_f32_s32(i);
@ -560,7 +576,7 @@ static inline void volk_gnsssdr_32fc_xn_resampler_32fc_xn_neon(lv_32fc_t** resul
// resample code for current tap // resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips); local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative! //Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += code_length_chips; if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips; local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_]; _result[current_correlator_tap][n] = local_code[local_code_chip_index_];
} }