mirror of
https://github.com/gnss-sdr/gnss-sdr
synced 2024-12-15 04:30:33 +00:00
Add aligned kernel for AVX
This commit is contained in:
parent
80c79125b4
commit
27eece55da
@ -176,9 +176,10 @@ static inline void volk_gnsssdr_32fc_32f_rotator_dot_prod_32fc_xn_u_avx(lv_32fc_
|
||||
|
||||
const float* aPtr = (float*)in_common;
|
||||
const float* bPtr[ num_a_vectors];
|
||||
for( vec_ind = 0; vec_ind < num_a_vectors; ++vec_ind ){
|
||||
bPtr[vec_ind] = in_a[vec_ind];
|
||||
}
|
||||
for( vec_ind = 0; vec_ind < num_a_vectors; ++vec_ind )
|
||||
{
|
||||
bPtr[vec_ind] = in_a[vec_ind];
|
||||
}
|
||||
|
||||
lv_32fc_t _phase = (*phase);
|
||||
lv_32fc_t wo;
|
||||
@ -193,20 +194,22 @@ static inline void volk_gnsssdr_32fc_32f_rotator_dot_prod_32fc_xn_u_avx(lv_32fc_
|
||||
__m256 dotProdVal2[num_a_vectors];
|
||||
__m256 dotProdVal3[num_a_vectors];
|
||||
|
||||
for( vec_ind = 0; vec_ind < num_a_vectors; vec_ind++ ){
|
||||
dotProdVal0[vec_ind] = _mm256_setzero_ps();
|
||||
dotProdVal1[vec_ind] = _mm256_setzero_ps();
|
||||
dotProdVal2[vec_ind] = _mm256_setzero_ps();
|
||||
dotProdVal3[vec_ind] = _mm256_setzero_ps();
|
||||
}
|
||||
for( vec_ind = 0; vec_ind < num_a_vectors; vec_ind++ )
|
||||
{
|
||||
dotProdVal0[vec_ind] = _mm256_setzero_ps();
|
||||
dotProdVal1[vec_ind] = _mm256_setzero_ps();
|
||||
dotProdVal2[vec_ind] = _mm256_setzero_ps();
|
||||
dotProdVal3[vec_ind] = _mm256_setzero_ps();
|
||||
}
|
||||
|
||||
// Set up the complex rotator
|
||||
__m256 z0, z1, z2, z3;
|
||||
__attribute__((aligned(32))) lv_32fc_t phase_vec[16];
|
||||
for( vec_ind = 0; vec_ind < 16; ++vec_ind ){
|
||||
phase_vec[vec_ind] = _phase;
|
||||
_phase *= phase_inc;
|
||||
}
|
||||
__VOLK_ATTR_ALIGNED(32) lv_32fc_t phase_vec[16];
|
||||
for( vec_ind = 0; vec_ind < 16; ++vec_ind )
|
||||
{
|
||||
phase_vec[vec_ind] = _phase;
|
||||
_phase *= phase_inc;
|
||||
}
|
||||
|
||||
z0 = _mm256_load_ps( (float *)phase_vec );
|
||||
z1 = _mm256_load_ps( (float *)(phase_vec + 4) );
|
||||
@ -215,104 +218,267 @@ static inline void volk_gnsssdr_32fc_32f_rotator_dot_prod_32fc_xn_u_avx(lv_32fc_
|
||||
|
||||
lv_32fc_t dz = phase_inc; dz *= dz; dz *= dz; dz *= dz; dz *= dz; // dz = phase_inc^16;
|
||||
|
||||
for( vec_ind = 0; vec_ind < 4; ++vec_ind ){
|
||||
phase_vec[vec_ind] = dz;
|
||||
}
|
||||
for( vec_ind = 0; vec_ind < 4; ++vec_ind )
|
||||
{
|
||||
phase_vec[vec_ind] = dz;
|
||||
}
|
||||
|
||||
__m256 dz_reg = _mm256_load_ps( (float *)phase_vec );
|
||||
dz_reg = _mm256_complexnormalise_ps( dz_reg );
|
||||
|
||||
for(;number < sixteenthPoints; number++){
|
||||
|
||||
a0Val = _mm256_loadu_ps(aPtr);
|
||||
a1Val = _mm256_loadu_ps(aPtr+8);
|
||||
a2Val = _mm256_loadu_ps(aPtr+16);
|
||||
a3Val = _mm256_loadu_ps(aPtr+24);
|
||||
|
||||
a0Val = _mm256_complexmul_ps( a0Val, z0 );
|
||||
a1Val = _mm256_complexmul_ps( a1Val, z1 );
|
||||
a2Val = _mm256_complexmul_ps( a2Val, z2 );
|
||||
a3Val = _mm256_complexmul_ps( a3Val, z3 );
|
||||
|
||||
z0 = _mm256_complexmul_ps( z0, dz_reg );
|
||||
z1 = _mm256_complexmul_ps( z1, dz_reg );
|
||||
z2 = _mm256_complexmul_ps( z2, dz_reg );
|
||||
z3 = _mm256_complexmul_ps( z3, dz_reg );
|
||||
|
||||
|
||||
for( vec_ind = 0; vec_ind < num_a_vectors; ++vec_ind ){
|
||||
x0Val[vec_ind] = _mm256_loadu_ps(bPtr[vec_ind]); // t0|t1|t2|t3|t4|t5|t6|t7
|
||||
x1Val[vec_ind] = _mm256_loadu_ps(bPtr[vec_ind]+8);
|
||||
x0loVal[vec_ind] = _mm256_unpacklo_ps(x0Val[vec_ind], x0Val[vec_ind]); // t0|t0|t1|t1|t4|t4|t5|t5
|
||||
x0hiVal[vec_ind] = _mm256_unpackhi_ps(x0Val[vec_ind], x0Val[vec_ind]); // t2|t2|t3|t3|t6|t6|t7|t7
|
||||
x1loVal[vec_ind] = _mm256_unpacklo_ps(x1Val[vec_ind], x1Val[vec_ind]);
|
||||
x1hiVal[vec_ind] = _mm256_unpackhi_ps(x1Val[vec_ind], x1Val[vec_ind]);
|
||||
|
||||
// TODO: it may be possible to rearrange swizzling to better pipeline data
|
||||
b0Val[vec_ind] = _mm256_permute2f128_ps(x0loVal[vec_ind], x0hiVal[vec_ind], 0x20); // t0|t0|t1|t1|t2|t2|t3|t3
|
||||
b1Val[vec_ind] = _mm256_permute2f128_ps(x0loVal[vec_ind], x0hiVal[vec_ind], 0x31); // t4|t4|t5|t5|t6|t6|t7|t7
|
||||
b2Val[vec_ind] = _mm256_permute2f128_ps(x1loVal[vec_ind], x1hiVal[vec_ind], 0x20);
|
||||
b3Val[vec_ind] = _mm256_permute2f128_ps(x1loVal[vec_ind], x1hiVal[vec_ind], 0x31);
|
||||
|
||||
c0Val[vec_ind] = _mm256_mul_ps(a0Val, b0Val[vec_ind]);
|
||||
c1Val[vec_ind] = _mm256_mul_ps(a1Val, b1Val[vec_ind]);
|
||||
c2Val[vec_ind] = _mm256_mul_ps(a2Val, b2Val[vec_ind]);
|
||||
c3Val[vec_ind] = _mm256_mul_ps(a3Val, b3Val[vec_ind]);
|
||||
|
||||
dotProdVal0[vec_ind] = _mm256_add_ps(c0Val[vec_ind], dotProdVal0[vec_ind]);
|
||||
dotProdVal1[vec_ind] = _mm256_add_ps(c1Val[vec_ind], dotProdVal1[vec_ind]);
|
||||
dotProdVal2[vec_ind] = _mm256_add_ps(c2Val[vec_ind], dotProdVal2[vec_ind]);
|
||||
dotProdVal3[vec_ind] = _mm256_add_ps(c3Val[vec_ind], dotProdVal3[vec_ind]);
|
||||
|
||||
bPtr[vec_ind] += 16;
|
||||
}
|
||||
|
||||
// Force the rotators back onto the unit circle
|
||||
if ((number % 64) == 0)
|
||||
for(;number < sixteenthPoints; number++)
|
||||
{
|
||||
z0 = _mm256_complexnormalise_ps( z0 );
|
||||
z1 = _mm256_complexnormalise_ps( z1 );
|
||||
z2 = _mm256_complexnormalise_ps( z2 );
|
||||
z3 = _mm256_complexnormalise_ps( z3 );
|
||||
}
|
||||
a0Val = _mm256_loadu_ps(aPtr);
|
||||
a1Val = _mm256_loadu_ps(aPtr+8);
|
||||
a2Val = _mm256_loadu_ps(aPtr+16);
|
||||
a3Val = _mm256_loadu_ps(aPtr+24);
|
||||
|
||||
aPtr += 32;
|
||||
}
|
||||
a0Val = _mm256_complexmul_ps( a0Val, z0 );
|
||||
a1Val = _mm256_complexmul_ps( a1Val, z1 );
|
||||
a2Val = _mm256_complexmul_ps( a2Val, z2 );
|
||||
a3Val = _mm256_complexmul_ps( a3Val, z3 );
|
||||
|
||||
z0 = _mm256_complexmul_ps( z0, dz_reg );
|
||||
z1 = _mm256_complexmul_ps( z1, dz_reg );
|
||||
z2 = _mm256_complexmul_ps( z2, dz_reg );
|
||||
z3 = _mm256_complexmul_ps( z3, dz_reg );
|
||||
|
||||
for( vec_ind = 0; vec_ind < num_a_vectors; ++vec_ind )
|
||||
{
|
||||
x0Val[vec_ind] = _mm256_loadu_ps(bPtr[vec_ind]); // t0|t1|t2|t3|t4|t5|t6|t7
|
||||
x1Val[vec_ind] = _mm256_loadu_ps(bPtr[vec_ind]+8);
|
||||
x0loVal[vec_ind] = _mm256_unpacklo_ps(x0Val[vec_ind], x0Val[vec_ind]); // t0|t0|t1|t1|t4|t4|t5|t5
|
||||
x0hiVal[vec_ind] = _mm256_unpackhi_ps(x0Val[vec_ind], x0Val[vec_ind]); // t2|t2|t3|t3|t6|t6|t7|t7
|
||||
x1loVal[vec_ind] = _mm256_unpacklo_ps(x1Val[vec_ind], x1Val[vec_ind]);
|
||||
x1hiVal[vec_ind] = _mm256_unpackhi_ps(x1Val[vec_ind], x1Val[vec_ind]);
|
||||
|
||||
// TODO: it may be possible to rearrange swizzling to better pipeline data
|
||||
b0Val[vec_ind] = _mm256_permute2f128_ps(x0loVal[vec_ind], x0hiVal[vec_ind], 0x20); // t0|t0|t1|t1|t2|t2|t3|t3
|
||||
b1Val[vec_ind] = _mm256_permute2f128_ps(x0loVal[vec_ind], x0hiVal[vec_ind], 0x31); // t4|t4|t5|t5|t6|t6|t7|t7
|
||||
b2Val[vec_ind] = _mm256_permute2f128_ps(x1loVal[vec_ind], x1hiVal[vec_ind], 0x20);
|
||||
b3Val[vec_ind] = _mm256_permute2f128_ps(x1loVal[vec_ind], x1hiVal[vec_ind], 0x31);
|
||||
|
||||
c0Val[vec_ind] = _mm256_mul_ps(a0Val, b0Val[vec_ind]);
|
||||
c1Val[vec_ind] = _mm256_mul_ps(a1Val, b1Val[vec_ind]);
|
||||
c2Val[vec_ind] = _mm256_mul_ps(a2Val, b2Val[vec_ind]);
|
||||
c3Val[vec_ind] = _mm256_mul_ps(a3Val, b3Val[vec_ind]);
|
||||
|
||||
dotProdVal0[vec_ind] = _mm256_add_ps(c0Val[vec_ind], dotProdVal0[vec_ind]);
|
||||
dotProdVal1[vec_ind] = _mm256_add_ps(c1Val[vec_ind], dotProdVal1[vec_ind]);
|
||||
dotProdVal2[vec_ind] = _mm256_add_ps(c2Val[vec_ind], dotProdVal2[vec_ind]);
|
||||
dotProdVal3[vec_ind] = _mm256_add_ps(c3Val[vec_ind], dotProdVal3[vec_ind]);
|
||||
|
||||
bPtr[vec_ind] += 16;
|
||||
}
|
||||
|
||||
// Force the rotators back onto the unit circle
|
||||
if ((number % 64) == 0)
|
||||
{
|
||||
z0 = _mm256_complexnormalise_ps( z0 );
|
||||
z1 = _mm256_complexnormalise_ps( z1 );
|
||||
z2 = _mm256_complexnormalise_ps( z2 );
|
||||
z3 = _mm256_complexnormalise_ps( z3 );
|
||||
}
|
||||
|
||||
aPtr += 32;
|
||||
}
|
||||
__VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
|
||||
|
||||
for( vec_ind = 0; vec_ind < num_a_vectors; ++vec_ind ){
|
||||
dotProdVal0[vec_ind] = _mm256_add_ps(dotProdVal0[vec_ind], dotProdVal1[vec_ind]);
|
||||
dotProdVal0[vec_ind] = _mm256_add_ps(dotProdVal0[vec_ind], dotProdVal2[vec_ind]);
|
||||
dotProdVal0[vec_ind] = _mm256_add_ps(dotProdVal0[vec_ind], dotProdVal3[vec_ind]);
|
||||
for( vec_ind = 0; vec_ind < num_a_vectors; ++vec_ind )
|
||||
{
|
||||
dotProdVal0[vec_ind] = _mm256_add_ps(dotProdVal0[vec_ind], dotProdVal1[vec_ind]);
|
||||
dotProdVal0[vec_ind] = _mm256_add_ps(dotProdVal0[vec_ind], dotProdVal2[vec_ind]);
|
||||
dotProdVal0[vec_ind] = _mm256_add_ps(dotProdVal0[vec_ind], dotProdVal3[vec_ind]);
|
||||
|
||||
_mm256_store_ps((float *)dotProductVector,dotProdVal0[vec_ind]); // Store the results back into the dot product vector
|
||||
_mm256_store_ps((float *)dotProductVector, dotProdVal0[vec_ind]); // Store the results back into the dot product vector
|
||||
|
||||
result[ vec_ind ] = lv_cmake( 0, 0 );
|
||||
for( i = 0; i < 4; ++i ){
|
||||
result[vec_ind] += dotProductVector[i];
|
||||
result[ vec_ind ] = lv_cmake( 0, 0 );
|
||||
for( i = 0; i < 4; ++i )
|
||||
{
|
||||
result[vec_ind] += dotProductVector[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
z0 = _mm256_complexnormalise_ps( z0 );
|
||||
_mm256_store_ps((float*)phase_vec, z0);
|
||||
_phase = phase_vec[0];
|
||||
_phase = phase_vec[0];
|
||||
_mm256_zeroupper();
|
||||
|
||||
|
||||
number = sixteenthPoints*16;
|
||||
for(;number < num_points; number++){
|
||||
wo = (*aPtr++)*_phase;
|
||||
_phase *= phase_inc;
|
||||
for(;number < num_points; number++)
|
||||
{
|
||||
wo = (*aPtr++)*_phase;
|
||||
_phase *= phase_inc;
|
||||
|
||||
for( vec_ind = 0; vec_ind < num_a_vectors; ++vec_ind ){
|
||||
result[vec_ind] += wo * in_a[vec_ind][number];
|
||||
for( vec_ind = 0; vec_ind < num_a_vectors; ++vec_ind )
|
||||
{
|
||||
result[vec_ind] += wo * in_a[vec_ind][number];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
*phase = _phase;
|
||||
|
||||
}
|
||||
|
||||
#endif /* LV_HAVE_AVX */
|
||||
|
||||
|
||||
#ifdef LV_HAVE_AVX
|
||||
#include <immintrin.h>
|
||||
#include <volk_gnsssdr/volk_gnsssdr_avx_intrinsics.h>
|
||||
static inline void volk_gnsssdr_32fc_32f_rotator_dot_prod_32fc_xn_a_avx(lv_32fc_t* result, const lv_32fc_t* in_common, const lv_32fc_t phase_inc, lv_32fc_t* phase, const float** in_a, int num_a_vectors, unsigned int num_points)
|
||||
{
|
||||
unsigned int number = 0;
|
||||
unsigned int vec_ind = 0;
|
||||
unsigned int i = 0;
|
||||
const unsigned int sixteenthPoints = num_points / 16;
|
||||
|
||||
const float* aPtr = (float*)in_common;
|
||||
const float* bPtr[ num_a_vectors];
|
||||
for( vec_ind = 0; vec_ind < num_a_vectors; ++vec_ind )
|
||||
{
|
||||
bPtr[vec_ind] = in_a[vec_ind];
|
||||
}
|
||||
|
||||
lv_32fc_t _phase = (*phase);
|
||||
lv_32fc_t wo;
|
||||
|
||||
__m256 a0Val, a1Val, a2Val, a3Val;
|
||||
__m256 b0Val[num_a_vectors], b1Val[num_a_vectors], b2Val[num_a_vectors], b3Val[num_a_vectors];
|
||||
__m256 x0Val[num_a_vectors], x1Val[num_a_vectors], x0loVal[num_a_vectors], x0hiVal[num_a_vectors], x1loVal[num_a_vectors], x1hiVal[num_a_vectors];
|
||||
__m256 c0Val[num_a_vectors], c1Val[num_a_vectors], c2Val[num_a_vectors], c3Val[num_a_vectors];
|
||||
|
||||
__m256 dotProdVal0[num_a_vectors];
|
||||
__m256 dotProdVal1[num_a_vectors];
|
||||
__m256 dotProdVal2[num_a_vectors];
|
||||
__m256 dotProdVal3[num_a_vectors];
|
||||
|
||||
for( vec_ind = 0; vec_ind < num_a_vectors; vec_ind++ )
|
||||
{
|
||||
dotProdVal0[vec_ind] = _mm256_setzero_ps();
|
||||
dotProdVal1[vec_ind] = _mm256_setzero_ps();
|
||||
dotProdVal2[vec_ind] = _mm256_setzero_ps();
|
||||
dotProdVal3[vec_ind] = _mm256_setzero_ps();
|
||||
}
|
||||
|
||||
// Set up the complex rotator
|
||||
__m256 z0, z1, z2, z3;
|
||||
__VOLK_ATTR_ALIGNED(32) lv_32fc_t phase_vec[16];
|
||||
for( vec_ind = 0; vec_ind < 16; ++vec_ind )
|
||||
{
|
||||
phase_vec[vec_ind] = _phase;
|
||||
_phase *= phase_inc;
|
||||
}
|
||||
|
||||
z0 = _mm256_load_ps( (float *)phase_vec );
|
||||
z1 = _mm256_load_ps( (float *)(phase_vec + 4) );
|
||||
z2 = _mm256_load_ps( (float *)(phase_vec + 8) );
|
||||
z3 = _mm256_load_ps( (float *)(phase_vec + 12) );
|
||||
|
||||
lv_32fc_t dz = phase_inc; dz *= dz; dz *= dz; dz *= dz; dz *= dz; // dz = phase_inc^16;
|
||||
|
||||
for( vec_ind = 0; vec_ind < 4; ++vec_ind )
|
||||
{
|
||||
phase_vec[vec_ind] = dz;
|
||||
}
|
||||
|
||||
__m256 dz_reg = _mm256_load_ps( (float *)phase_vec );
|
||||
dz_reg = _mm256_complexnormalise_ps( dz_reg );
|
||||
|
||||
for(;number < sixteenthPoints; number++)
|
||||
{
|
||||
|
||||
a0Val = _mm256_load_ps(aPtr);
|
||||
a1Val = _mm256_load_ps(aPtr+8);
|
||||
a2Val = _mm256_load_ps(aPtr+16);
|
||||
a3Val = _mm256_load_ps(aPtr+24);
|
||||
|
||||
a0Val = _mm256_complexmul_ps( a0Val, z0 );
|
||||
a1Val = _mm256_complexmul_ps( a1Val, z1 );
|
||||
a2Val = _mm256_complexmul_ps( a2Val, z2 );
|
||||
a3Val = _mm256_complexmul_ps( a3Val, z3 );
|
||||
|
||||
z0 = _mm256_complexmul_ps( z0, dz_reg );
|
||||
z1 = _mm256_complexmul_ps( z1, dz_reg );
|
||||
z2 = _mm256_complexmul_ps( z2, dz_reg );
|
||||
z3 = _mm256_complexmul_ps( z3, dz_reg );
|
||||
|
||||
for( vec_ind = 0; vec_ind < num_a_vectors; ++vec_ind )
|
||||
{
|
||||
x0Val[vec_ind] = _mm256_loadu_ps(bPtr[vec_ind]); // t0|t1|t2|t3|t4|t5|t6|t7
|
||||
x1Val[vec_ind] = _mm256_loadu_ps(bPtr[vec_ind]+8);
|
||||
x0loVal[vec_ind] = _mm256_unpacklo_ps(x0Val[vec_ind], x0Val[vec_ind]); // t0|t0|t1|t1|t4|t4|t5|t5
|
||||
x0hiVal[vec_ind] = _mm256_unpackhi_ps(x0Val[vec_ind], x0Val[vec_ind]); // t2|t2|t3|t3|t6|t6|t7|t7
|
||||
x1loVal[vec_ind] = _mm256_unpacklo_ps(x1Val[vec_ind], x1Val[vec_ind]);
|
||||
x1hiVal[vec_ind] = _mm256_unpackhi_ps(x1Val[vec_ind], x1Val[vec_ind]);
|
||||
|
||||
// TODO: it may be possible to rearrange swizzling to better pipeline data
|
||||
b0Val[vec_ind] = _mm256_permute2f128_ps(x0loVal[vec_ind], x0hiVal[vec_ind], 0x20); // t0|t0|t1|t1|t2|t2|t3|t3
|
||||
b1Val[vec_ind] = _mm256_permute2f128_ps(x0loVal[vec_ind], x0hiVal[vec_ind], 0x31); // t4|t4|t5|t5|t6|t6|t7|t7
|
||||
b2Val[vec_ind] = _mm256_permute2f128_ps(x1loVal[vec_ind], x1hiVal[vec_ind], 0x20);
|
||||
b3Val[vec_ind] = _mm256_permute2f128_ps(x1loVal[vec_ind], x1hiVal[vec_ind], 0x31);
|
||||
|
||||
c0Val[vec_ind] = _mm256_mul_ps(a0Val, b0Val[vec_ind]);
|
||||
c1Val[vec_ind] = _mm256_mul_ps(a1Val, b1Val[vec_ind]);
|
||||
c2Val[vec_ind] = _mm256_mul_ps(a2Val, b2Val[vec_ind]);
|
||||
c3Val[vec_ind] = _mm256_mul_ps(a3Val, b3Val[vec_ind]);
|
||||
|
||||
dotProdVal0[vec_ind] = _mm256_add_ps(c0Val[vec_ind], dotProdVal0[vec_ind]);
|
||||
dotProdVal1[vec_ind] = _mm256_add_ps(c1Val[vec_ind], dotProdVal1[vec_ind]);
|
||||
dotProdVal2[vec_ind] = _mm256_add_ps(c2Val[vec_ind], dotProdVal2[vec_ind]);
|
||||
dotProdVal3[vec_ind] = _mm256_add_ps(c3Val[vec_ind], dotProdVal3[vec_ind]);
|
||||
|
||||
bPtr[vec_ind] += 16;
|
||||
}
|
||||
|
||||
// Force the rotators back onto the unit circle
|
||||
if ((number % 64) == 0)
|
||||
{
|
||||
z0 = _mm256_complexnormalise_ps( z0 );
|
||||
z1 = _mm256_complexnormalise_ps( z1 );
|
||||
z2 = _mm256_complexnormalise_ps( z2 );
|
||||
z3 = _mm256_complexnormalise_ps( z3 );
|
||||
}
|
||||
|
||||
aPtr += 32;
|
||||
}
|
||||
__VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
|
||||
|
||||
for( vec_ind = 0; vec_ind < num_a_vectors; ++vec_ind )
|
||||
{
|
||||
dotProdVal0[vec_ind] = _mm256_add_ps(dotProdVal0[vec_ind], dotProdVal1[vec_ind]);
|
||||
dotProdVal0[vec_ind] = _mm256_add_ps(dotProdVal0[vec_ind], dotProdVal2[vec_ind]);
|
||||
dotProdVal0[vec_ind] = _mm256_add_ps(dotProdVal0[vec_ind], dotProdVal3[vec_ind]);
|
||||
|
||||
_mm256_store_ps((float *)dotProductVector, dotProdVal0[vec_ind]); // Store the results back into the dot product vector
|
||||
|
||||
result[ vec_ind ] = lv_cmake( 0, 0 );
|
||||
for( i = 0; i < 4; ++i )
|
||||
{
|
||||
result[vec_ind] += dotProductVector[i];
|
||||
}
|
||||
}
|
||||
|
||||
z0 = _mm256_complexnormalise_ps( z0 );
|
||||
_mm256_store_ps((float*)phase_vec, z0);
|
||||
_phase = phase_vec[0];
|
||||
_mm256_zeroupper();
|
||||
|
||||
number = sixteenthPoints*16;
|
||||
for(;number < num_points; number++)
|
||||
{
|
||||
wo = (*aPtr++)*_phase;
|
||||
_phase *= phase_inc;
|
||||
|
||||
for( vec_ind = 0; vec_ind < num_a_vectors; ++vec_ind )
|
||||
{
|
||||
result[vec_ind] += wo * in_a[vec_ind][number];
|
||||
}
|
||||
}
|
||||
|
||||
*phase = _phase;
|
||||
}
|
||||
|
||||
|
||||
#endif /* LV_HAVE_AVX */
|
||||
|
||||
#endif /* INCLUDED_volk_gnsssdr_32fc_32f_rotator_dot_prod_32fc_xn_H */
|
||||
|
@ -128,5 +128,35 @@ static inline void volk_gnsssdr_32fc_32f_rotator_dotprodxnpuppet_32fc_u_avx(lv_3
|
||||
|
||||
#endif // AVX
|
||||
|
||||
|
||||
#ifdef LV_HAVE_AVX
|
||||
static inline void volk_gnsssdr_32fc_32f_rotator_dotprodxnpuppet_32fc_a_avx(lv_32fc_t* result, const lv_32fc_t* local_code, const float* in, unsigned int num_points)
|
||||
{
|
||||
// phases must be normalized. Phase rotator expects a complex exponential input!
|
||||
float rem_carrier_phase_in_rad = 0.25;
|
||||
float phase_step_rad = 0.1;
|
||||
lv_32fc_t phase[1];
|
||||
phase[0] = lv_cmake(cos(rem_carrier_phase_in_rad), sin(rem_carrier_phase_in_rad));
|
||||
lv_32fc_t phase_inc[1];
|
||||
phase_inc[0] = lv_cmake(cos(phase_step_rad), sin(phase_step_rad));
|
||||
unsigned int n;
|
||||
int num_a_vectors = 3;
|
||||
float ** in_a = (float **)volk_gnsssdr_malloc(sizeof(float *) * num_a_vectors, volk_gnsssdr_get_alignment());
|
||||
for(n = 0; n < num_a_vectors; n++)
|
||||
{
|
||||
in_a[n] = (float *)volk_gnsssdr_malloc(sizeof(float ) * num_points, volk_gnsssdr_get_alignment());
|
||||
memcpy((float*)in_a[n], (float*)in, sizeof(float) * num_points);
|
||||
}
|
||||
volk_gnsssdr_32fc_32f_rotator_dot_prod_32fc_xn_a_avx(result, local_code, phase_inc[0], phase, (const float**) in_a, num_a_vectors, num_points);
|
||||
|
||||
for(n = 0; n < num_a_vectors; n++)
|
||||
{
|
||||
volk_gnsssdr_free(in_a[n]);
|
||||
}
|
||||
volk_gnsssdr_free(in_a);
|
||||
}
|
||||
|
||||
#endif // AVX
|
||||
|
||||
#endif // INCLUDED_volk_gnsssdr_32fc_32f_rotator_dotprodxnpuppet_32fc_H
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user