1
0
mirror of https://github.com/gnss-sdr/gnss-sdr synced 2024-06-24 22:13:15 +00:00

More consistent naming

now volk_gnsssdr_16ic_xn_resampler_16ic implements the same resampler
than volk_gnsssdr_32fc_xn_resampler_32fc. The old one, which is faster
in SSE implementations at the expense of some constraints on the inputs
(to be documented), is now named
volk_gnsssdr_16ic_xn_resampler_fast_16ic
This commit is contained in:
Carles Fernandez 2016-04-28 20:12:27 +02:00
parent 4c4f6c1def
commit 25e5c744d7
10 changed files with 1190 additions and 1190 deletions

View File

@ -1,5 +1,5 @@
/*!
* \file volk_gnsssdr_16ic_resampler_16ic.h
* \file volk_gnsssdr_16ic_resampler_fast_16ic.h
* \brief VOLK_GNSSSDR kernel: resamples a 16 bits complex vector.
* \authors <ul>
* <li> Javier Arribas, 2015. jarribas(at)cttc.es
@ -34,7 +34,7 @@
*/
/*!
* \page volk_gnsssdr_16ic_resampler_16ic
* \page volk_gnsssdr_16ic_resampler_fast_16ic
*
* \b Overview
*
@ -42,7 +42,7 @@
*
* <b>Dispatcher Prototype</b>
* \code
* void volk_gnsssdr_16ic_resampler_16ic(lv_16sc_t* result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, int code_length_chips, unsigned int num_output_samples)
* void volk_gnsssdr_16ic_resampler_fast_16ic(lv_16sc_t* result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, int code_length_chips, unsigned int num_output_samples)
* \endcode
*
* \b Inputs
@ -57,8 +57,8 @@
*
*/
#ifndef INCLUDED_volk_gnsssdr_16ic_resampler_16ic_H
#define INCLUDED_volk_gnsssdr_16ic_resampler_16ic_H
#ifndef INCLUDED_volk_gnsssdr_16ic_resampler_fast_16ic_H
#define INCLUDED_volk_gnsssdr_16ic_resampler_fast_16ic_H
#include <math.h>
#include <volk_gnsssdr/volk_gnsssdr_common.h>
@ -72,7 +72,7 @@
// return (r > 0.0) ? (r + 0.5) : (r - 0.5);
//}
static inline void volk_gnsssdr_16ic_resampler_16ic_generic(lv_16sc_t* result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, int code_length_chips, unsigned int num_output_samples)
static inline void volk_gnsssdr_16ic_resampler_fast_16ic_generic(lv_16sc_t* result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, int code_length_chips, unsigned int num_output_samples)
{
int local_code_chip_index;
//fesetround(FE_TONEAREST);
@ -92,7 +92,7 @@ static inline void volk_gnsssdr_16ic_resampler_16ic_generic(lv_16sc_t* result, c
#ifdef LV_HAVE_SSE2
#include <emmintrin.h>
static inline void volk_gnsssdr_16ic_resampler_16ic_a_sse2(lv_16sc_t* result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, int code_length_chips, unsigned int num_output_samples)//, int* scratch_buffer, float* scratch_buffer_float)
static inline void volk_gnsssdr_16ic_resampler_fast_16ic_a_sse2(lv_16sc_t* result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, int code_length_chips, unsigned int num_output_samples)//, int* scratch_buffer, float* scratch_buffer_float)
{
_MM_SET_ROUNDING_MODE (_MM_ROUND_NEAREST);//_MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO
unsigned int number;
@ -175,7 +175,7 @@ static inline void volk_gnsssdr_16ic_resampler_16ic_a_sse2(lv_16sc_t* result, co
#ifdef LV_HAVE_SSE2
#include <emmintrin.h>
static inline void volk_gnsssdr_16ic_resampler_16ic_u_sse2(lv_16sc_t* result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, int code_length_chips, unsigned int num_output_samples)//, int* scratch_buffer, float* scratch_buffer_float)
static inline void volk_gnsssdr_16ic_resampler_fast_16ic_u_sse2(lv_16sc_t* result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, int code_length_chips, unsigned int num_output_samples)//, int* scratch_buffer, float* scratch_buffer_float)
{
_MM_SET_ROUNDING_MODE (_MM_ROUND_NEAREST);//_MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO
unsigned int number;
@ -257,7 +257,7 @@ static inline void volk_gnsssdr_16ic_resampler_16ic_u_sse2(lv_16sc_t* result, co
#ifdef LV_HAVE_NEON
#include <arm_neon.h>
static inline void volk_gnsssdr_16ic_resampler_16ic_neon(lv_16sc_t* result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, int code_length_chips, unsigned int num_output_samples)//, int* scratch_buffer, float* scratch_buffer_float)
static inline void volk_gnsssdr_16ic_resampler_fast_16ic_neon(lv_16sc_t* result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, int code_length_chips, unsigned int num_output_samples)//, int* scratch_buffer, float* scratch_buffer_float)
{
unsigned int number;
const unsigned int quarterPoints = num_output_samples / 4;
@ -337,4 +337,4 @@ static inline void volk_gnsssdr_16ic_resampler_16ic_neon(lv_16sc_t* result, cons
#endif /* LV_HAVE_NEON */
#endif /*INCLUDED_volk_gnsssdr_16ic_resampler_16ic_H*/
#endif /*INCLUDED_volk_gnsssdr_16ic_resampler_fast_16ic_H*/

View File

@ -1,5 +1,5 @@
/*!
* \file volk_gnsssdr_16ic_resamplerpuppet_16ic.h
* \file volk_gnsssdr_16ic_resamplerfastpuppet_16ic.h
* \brief VOLK_GNSSSDR puppet for the 16-bit complex vector resampler kernel.
* \authors <ul>
* <li> Carles Fernandez Prades 2016 cfernandez at cttc dot cat
@ -32,56 +32,56 @@
* -------------------------------------------------------------------------
*/
#ifndef INCLUDED_volk_gnsssdr_16ic_resamplerpuppet_16ic_H
#define INCLUDED_volk_gnsssdr_16ic_resamplerpuppet_16ic_H
#ifndef INCLUDED_volk_gnsssdr_16ic_resamplerfastpuppet_16ic_H
#define INCLUDED_volk_gnsssdr_16ic_resamplerfastpuppet_16ic_H
#include "volk_gnsssdr/volk_gnsssdr_16ic_resampler_16ic.h"
#include "volk_gnsssdr/volk_gnsssdr_16ic_resampler_fast_16ic.h"
#ifdef LV_HAVE_GENERIC
static inline void volk_gnsssdr_16ic_resamplerpuppet_16ic_generic(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
static inline void volk_gnsssdr_16ic_resamplerfastpuppet_16ic_generic(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float rem_code_phase_chips = -0.123;
float code_phase_step_chips = 0.1;
int code_length_chips = 1023;
volk_gnsssdr_16ic_resampler_16ic_generic(result, local_code, rem_code_phase_chips, code_phase_step_chips, code_length_chips, num_points);
volk_gnsssdr_16ic_resampler_fast_16ic_generic(result, local_code, rem_code_phase_chips, code_phase_step_chips, code_length_chips, num_points);
}
#endif /* LV_HAVE_GENERIC */
#ifdef LV_HAVE_SSE2
static inline void volk_gnsssdr_16ic_resamplerpuppet_16ic_a_sse2(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
static inline void volk_gnsssdr_16ic_resamplerfastpuppet_16ic_a_sse2(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float rem_code_phase_chips = -0.123;
float code_phase_step_chips = 0.1;
int code_length_chips = 1023;
volk_gnsssdr_16ic_resampler_16ic_a_sse2(result, local_code, rem_code_phase_chips, code_phase_step_chips, code_length_chips, num_points );
volk_gnsssdr_16ic_resampler_fast_16ic_a_sse2(result, local_code, rem_code_phase_chips, code_phase_step_chips, code_length_chips, num_points );
}
#endif /* LV_HAVE_SSE2 */
#ifdef LV_HAVE_SSE2
static inline void volk_gnsssdr_16ic_resamplerpuppet_16ic_u_sse2(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
static inline void volk_gnsssdr_16ic_resamplerfastpuppet_16ic_u_sse2(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float rem_code_phase_chips = -0.123;
float code_phase_step_chips = 0.1;
int code_length_chips = 1023;
volk_gnsssdr_16ic_resampler_16ic_u_sse2(result, local_code, rem_code_phase_chips, code_phase_step_chips, code_length_chips, num_points );
volk_gnsssdr_16ic_resampler_fast_16ic_u_sse2(result, local_code, rem_code_phase_chips, code_phase_step_chips, code_length_chips, num_points );
}
#endif /* LV_HAVE_SSE2 */
#ifdef LV_HAVE_NEON
static inline void volk_gnsssdr_16ic_resamplerpuppet_16ic_neon(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
static inline void volk_gnsssdr_16ic_resamplerfastpuppet_16ic_neon(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float rem_code_phase_chips = -0.123;
float code_phase_step_chips = 0.1;
int code_length_chips = 1023;
volk_gnsssdr_16ic_resampler_16ic_neon(result, local_code, rem_code_phase_chips, code_phase_step_chips, code_length_chips, num_points );
volk_gnsssdr_16ic_resampler_fast_16ic_neon(result, local_code, rem_code_phase_chips, code_phase_step_chips, code_length_chips, num_points );
}
#endif /* LV_HAVE_NEON */
#endif // INCLUDED_volk_gnsssdr_16ic_resamplerpuppet_16ic_H
#endif // INCLUDED_volk_gnsssdr_16ic_resamplerfastpuppet_16ic_H

View File

@ -0,0 +1,153 @@
/*!
* \file volk_gnsssdr_16ic_resamplerfastxnpuppet_16ic.h
* \brief VOLK_GNSSSDR puppet for the multiple 16-bit complex vector resampler kernel.
* \authors <ul>
* <li> Carles Fernandez Prades 2016 cfernandez at cttc dot cat
* </ul>
*
* VOLK_GNSSSDR puppet for integrating the multiple resampler into the test system
*
* -------------------------------------------------------------------------
*
* Copyright (C) 2010-2015 (see AUTHORS file for a list of contributors)
*
* GNSS-SDR is a software defined Global Navigation
* Satellite Systems receiver
*
* This file is part of GNSS-SDR.
*
* GNSS-SDR is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GNSS-SDR is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNSS-SDR. If not, see <http://www.gnu.org/licenses/>.
*
* -------------------------------------------------------------------------
*/
#ifndef INCLUDED_volk_gnsssdr_16ic_resamplerfastxnpuppet_16ic_H
#define INCLUDED_volk_gnsssdr_16ic_resamplerfastxnpuppet_16ic_H
#include "volk_gnsssdr/volk_gnsssdr_16ic_xn_resampler_fast_16ic_xn.h"
#include <volk_gnsssdr/volk_gnsssdr_malloc.h>
#include <volk_gnsssdr/volk_gnsssdr_complex.h>
#include <volk_gnsssdr/volk_gnsssdr.h>
#include <string.h>
#ifdef LV_HAVE_GENERIC
static inline void volk_gnsssdr_16ic_resamplerfastxnpuppet_16ic_generic(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = 0.1;
int code_length_chips = 2046;
int num_out_vectors = 3;
float* rem_code_phase_chips = (float*)volk_gnsssdr_malloc(sizeof(float) * num_out_vectors, volk_gnsssdr_get_alignment());
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
rem_code_phase_chips[n] = -0.234;
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler_fast_16ic_xn_generic(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, code_length_chips, num_out_vectors, num_points);
memcpy((lv_16sc_t*)result, (lv_16sc_t*)result_aux[0], sizeof(lv_16sc_t) * num_points);
volk_gnsssdr_free(rem_code_phase_chips);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
}
volk_gnsssdr_free(result_aux);
}
#endif /* LV_HAVE_GENERIC */
#ifdef LV_HAVE_SSE2
static inline void volk_gnsssdr_16ic_resamplerfastxnpuppet_16ic_a_sse2(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = 0.1;
int code_length_chips = 2046;
int num_out_vectors = 3;
float * rem_code_phase_chips = (float*)volk_gnsssdr_malloc(sizeof(float) * num_out_vectors, volk_gnsssdr_get_alignment());
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
rem_code_phase_chips[n] = -0.234;
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler_fast_16ic_xn_a_sse2(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, code_length_chips, num_out_vectors, num_points);
memcpy(result, result_aux[0], sizeof(lv_16sc_t) * num_points);
volk_gnsssdr_free(rem_code_phase_chips);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
}
volk_gnsssdr_free(result_aux);
}
#endif
#ifdef LV_HAVE_SSE2
static inline void volk_gnsssdr_16ic_resamplerfastxnpuppet_16ic_u_sse2(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = 0.1;
int code_length_chips = 2046;
int num_out_vectors = 3;
float * rem_code_phase_chips = (float*)volk_gnsssdr_malloc(sizeof(float) * num_out_vectors, volk_gnsssdr_get_alignment());
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
rem_code_phase_chips[n] = -0.234;
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler_fast_16ic_xn_u_sse2(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, code_length_chips, num_out_vectors, num_points);
memcpy(result, result_aux[0], sizeof(lv_16sc_t) * num_points);
volk_gnsssdr_free(rem_code_phase_chips);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
}
volk_gnsssdr_free(result_aux);
}
#endif
#ifdef LV_HAVE_NEON
static inline void volk_gnsssdr_16ic_resamplerfastxnpuppet_16ic_neon(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = 0.1;
int code_length_chips = 2046;
int num_out_vectors = 3;
float * rem_code_phase_chips = (float*)volk_gnsssdr_malloc(sizeof(float) * num_out_vectors, volk_gnsssdr_get_alignment());
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
rem_code_phase_chips[n] = -0.234;
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler_fast_16ic_xn_neon(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, code_length_chips, num_out_vectors, num_points);
memcpy(result, result_aux[0], sizeof(lv_16sc_t) * num_points);
volk_gnsssdr_free(rem_code_phase_chips);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
}
volk_gnsssdr_free(result_aux);
}
#endif
#endif // INCLUDED_volk_gnsssdr_16ic_resamplerpuppet_16ic_H

View File

@ -1,282 +0,0 @@
/*!
* \file volk_gnsssdr_16ic_resamplerxnpuppet_16ic.h
* \brief VOLK_GNSSSDR puppet for the multiple 16-bit complex vector resampler kernel.
* \authors <ul>
* <li> Carles Fernandez Prades 2016 cfernandez at cttc dot cat
* </ul>
*
* VOLK_GNSSSDR puppet for integrating the multiple resampler into the test system
*
* -------------------------------------------------------------------------
*
* Copyright (C) 2010-2015 (see AUTHORS file for a list of contributors)
*
* GNSS-SDR is a software defined Global Navigation
* Satellite Systems receiver
*
* This file is part of GNSS-SDR.
*
* GNSS-SDR is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GNSS-SDR is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNSS-SDR. If not, see <http://www.gnu.org/licenses/>.
*
* -------------------------------------------------------------------------
*/
#ifndef INCLUDED_volk_gnsssdr_16ic_resamplerxnpuppet2_16ic_H
#define INCLUDED_volk_gnsssdr_16ic_resamplerxnpuppet2_16ic_H
#include "volk_gnsssdr/volk_gnsssdr_16ic_xn_resampler2_16ic_xn.h"
#include <volk_gnsssdr/volk_gnsssdr_malloc.h>
#include <volk_gnsssdr/volk_gnsssdr_complex.h>
#include <volk_gnsssdr/volk_gnsssdr.h>
#include <string.h>
#ifdef LV_HAVE_GENERIC
static inline void volk_gnsssdr_16ic_resamplerxnpuppet2_16ic_generic(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = -0.6;
int code_length_chips = 2046;
int num_out_vectors = 3;
float rem_code_phase_chips = -0.234;
float shifts_chips[3] = { -0.1, 0.0, 0.1 };
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler2_16ic_xn_generic(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, shifts_chips, code_length_chips, num_out_vectors, num_points);
memcpy((lv_16sc_t*)result, (lv_16sc_t*)result_aux[0], sizeof(lv_16sc_t) * num_points);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
}
volk_gnsssdr_free(result_aux);
}
#endif /* LV_HAVE_GENERIC */
#ifdef LV_HAVE_SSE3
static inline void volk_gnsssdr_16ic_resamplerxnpuppet2_16ic_a_sse3(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = -0.6;
int code_length_chips = 2046;
int num_out_vectors = 3;
float rem_code_phase_chips = -0.234;
float shifts_chips[3] = { -0.1, 0.0, 0.1 };
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler2_16ic_xn_a_sse3(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, shifts_chips, code_length_chips, num_out_vectors, num_points);
memcpy((lv_16sc_t*)result, (lv_16sc_t*)result_aux[0], sizeof(lv_16sc_t) * num_points);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
}
volk_gnsssdr_free(result_aux);
}
#endif
#ifdef LV_HAVE_SSE3
static inline void volk_gnsssdr_16ic_resamplerxnpuppet2_16ic_u_sse3(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = -0.6;
int code_length_chips = 2046;
int num_out_vectors = 3;
float rem_code_phase_chips = -0.234;
float shifts_chips[3] = { -0.1, 0.0, 0.1 };
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler2_16ic_xn_u_sse3(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, shifts_chips, code_length_chips, num_out_vectors, num_points);
memcpy((lv_16sc_t*)result, (lv_16sc_t*)result_aux[0], sizeof(lv_16sc_t) * num_points);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
}
volk_gnsssdr_free(result_aux);
}
#endif
#ifdef LV_HAVE_SSE4_1
static inline void volk_gnsssdr_16ic_resamplerxnpuppet2_16ic_u_sse4_1(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = -0.6;
int code_length_chips = 2046;
int num_out_vectors = 3;
float rem_code_phase_chips = -0.234;
float shifts_chips[3] = { -0.1, 0.0, 0.1 };
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler2_16ic_xn_u_sse4_1(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, shifts_chips, code_length_chips, num_out_vectors, num_points);
memcpy((lv_16sc_t*)result, (lv_16sc_t*)result_aux[0], sizeof(lv_16sc_t) * num_points);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
}
volk_gnsssdr_free(result_aux);
}
#endif
#ifdef LV_HAVE_SSE4_1
static inline void volk_gnsssdr_16ic_resamplerxnpuppet2_16ic_a_sse4_1(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = -0.6;
int code_length_chips = 2046;
int num_out_vectors = 3;
float rem_code_phase_chips = -0.234;
float shifts_chips[3] = { -0.1, 0.0, 0.1 };
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler2_16ic_xn_a_sse4_1(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, shifts_chips, code_length_chips, num_out_vectors, num_points);
memcpy((lv_16sc_t*)result, (lv_16sc_t*)result_aux[0], sizeof(lv_16sc_t) * num_points);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
}
volk_gnsssdr_free(result_aux);
}
#endif
#ifdef LV_HAVE_AVX
static inline void volk_gnsssdr_16ic_resamplerxnpuppet2_16ic_u_avx(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = -0.6;
int code_length_chips = 2046;
int num_out_vectors = 3;
float rem_code_phase_chips = -0.234;
float shifts_chips[3] = { -0.1, 0.0, 0.1 };
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler2_16ic_xn_u_avx(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, shifts_chips, code_length_chips, num_out_vectors, num_points);
memcpy((lv_16sc_t*)result, (lv_16sc_t*)result_aux[0], sizeof(lv_16sc_t) * num_points);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
}
volk_gnsssdr_free(result_aux);
}
#endif
#ifdef LV_HAVE_AVX
static inline void volk_gnsssdr_16ic_resamplerxnpuppet2_16ic_a_avx(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = -0.6;
int code_length_chips = 2046;
int num_out_vectors = 3;
float rem_code_phase_chips = -0.234;
float shifts_chips[3] = { -0.1, 0.0, 0.1 };
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler2_16ic_xn_a_avx(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, shifts_chips, code_length_chips, num_out_vectors, num_points);
memcpy((lv_16sc_t*)result, (lv_16sc_t*)result_aux[0], sizeof(lv_16sc_t) * num_points);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
}
volk_gnsssdr_free(result_aux);
}
#endif
#ifdef LV_HAVE_NEON
static inline void volk_gnsssdr_16ic_resamplerxnpuppet2_16ic_neon(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = -0.6;
int code_length_chips = 2046;
int num_out_vectors = 3;
float rem_code_phase_chips = -0.234;
float shifts_chips[3] = { -0.1, 0.0, 0.1 };
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler2_16ic_xn_neon(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, shifts_chips, code_length_chips, num_out_vectors, num_points);
memcpy((lv_16sc_t*)result, (lv_16sc_t*)result_aux[0], sizeof(lv_16sc_t) * num_points);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
}
volk_gnsssdr_free(result_aux);
}
#endif
#endif // INCLUDED_volk_gnsssdr_16ic_resamplerpuppet_16ic_H

View File

@ -44,21 +44,23 @@
#ifdef LV_HAVE_GENERIC
static inline void volk_gnsssdr_16ic_resamplerxnpuppet_16ic_generic(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = 0.1;
float code_phase_step_chips = -0.6;
int code_length_chips = 2046;
int num_out_vectors = 3;
float* rem_code_phase_chips = (float*)volk_gnsssdr_malloc(sizeof(float) * num_out_vectors, volk_gnsssdr_get_alignment());
float rem_code_phase_chips = -0.234;
float shifts_chips[3] = { -0.1, 0.0, 0.1 };
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
rem_code_phase_chips[n] = -0.234;
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler_16ic_xn_generic(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, code_length_chips, num_out_vectors, num_points);
volk_gnsssdr_16ic_xn_resampler_16ic_xn_generic(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, shifts_chips, code_length_chips, num_out_vectors, num_points);
memcpy((lv_16sc_t*)result, (lv_16sc_t*)result_aux[0], sizeof(lv_16sc_t) * num_points);
volk_gnsssdr_free(rem_code_phase_chips);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
@ -67,25 +69,57 @@ static inline void volk_gnsssdr_16ic_resamplerxnpuppet_16ic_generic(lv_16sc_t* r
}
#endif /* LV_HAVE_GENERIC */
#ifdef LV_HAVE_SSE2
static inline void volk_gnsssdr_16ic_resamplerxnpuppet_16ic_a_sse2(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
#ifdef LV_HAVE_SSE3
static inline void volk_gnsssdr_16ic_resamplerxnpuppet_16ic_a_sse3(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = 0.1;
float code_phase_step_chips = -0.6;
int code_length_chips = 2046;
int num_out_vectors = 3;
float * rem_code_phase_chips = (float*)volk_gnsssdr_malloc(sizeof(float) * num_out_vectors, volk_gnsssdr_get_alignment());
float rem_code_phase_chips = -0.234;
float shifts_chips[3] = { -0.1, 0.0, 0.1 };
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
rem_code_phase_chips[n] = -0.234;
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler_16ic_xn_a_sse2(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, code_length_chips, num_out_vectors, num_points);
memcpy(result, result_aux[0], sizeof(lv_16sc_t) * num_points);
volk_gnsssdr_free(rem_code_phase_chips);
volk_gnsssdr_16ic_xn_resampler_16ic_xn_a_sse3(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, shifts_chips, code_length_chips, num_out_vectors, num_points);
memcpy((lv_16sc_t*)result, (lv_16sc_t*)result_aux[0], sizeof(lv_16sc_t) * num_points);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
}
volk_gnsssdr_free(result_aux);
}
#endif
#ifdef LV_HAVE_SSE3
static inline void volk_gnsssdr_16ic_resamplerxnpuppet_16ic_u_sse3(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = -0.6;
int code_length_chips = 2046;
int num_out_vectors = 3;
float rem_code_phase_chips = -0.234;
float shifts_chips[3] = { -0.1, 0.0, 0.1 };
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler_16ic_xn_u_sse3(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, shifts_chips, code_length_chips, num_out_vectors, num_points);
memcpy((lv_16sc_t*)result, (lv_16sc_t*)result_aux[0], sizeof(lv_16sc_t) * num_points);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
@ -96,23 +130,116 @@ static inline void volk_gnsssdr_16ic_resamplerxnpuppet_16ic_a_sse2(lv_16sc_t* re
#endif
#ifdef LV_HAVE_SSE2
static inline void volk_gnsssdr_16ic_resamplerxnpuppet_16ic_u_sse2(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
#ifdef LV_HAVE_SSE4_1
static inline void volk_gnsssdr_16ic_resamplerxnpuppet_16ic_u_sse4_1(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = 0.1;
float code_phase_step_chips = -0.6;
int code_length_chips = 2046;
int num_out_vectors = 3;
float * rem_code_phase_chips = (float*)volk_gnsssdr_malloc(sizeof(float) * num_out_vectors, volk_gnsssdr_get_alignment());
float rem_code_phase_chips = -0.234;
float shifts_chips[3] = { -0.1, 0.0, 0.1 };
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
rem_code_phase_chips[n] = -0.234;
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler_16ic_xn_u_sse2(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, code_length_chips, num_out_vectors, num_points);
memcpy(result, result_aux[0], sizeof(lv_16sc_t) * num_points);
volk_gnsssdr_free(rem_code_phase_chips);
volk_gnsssdr_16ic_xn_resampler_16ic_xn_u_sse4_1(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, shifts_chips, code_length_chips, num_out_vectors, num_points);
memcpy((lv_16sc_t*)result, (lv_16sc_t*)result_aux[0], sizeof(lv_16sc_t) * num_points);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
}
volk_gnsssdr_free(result_aux);
}
#endif
#ifdef LV_HAVE_SSE4_1
static inline void volk_gnsssdr_16ic_resamplerxnpuppet_16ic_a_sse4_1(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = -0.6;
int code_length_chips = 2046;
int num_out_vectors = 3;
float rem_code_phase_chips = -0.234;
float shifts_chips[3] = { -0.1, 0.0, 0.1 };
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler_16ic_xn_a_sse4_1(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, shifts_chips, code_length_chips, num_out_vectors, num_points);
memcpy((lv_16sc_t*)result, (lv_16sc_t*)result_aux[0], sizeof(lv_16sc_t) * num_points);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
}
volk_gnsssdr_free(result_aux);
}
#endif
#ifdef LV_HAVE_AVX
static inline void volk_gnsssdr_16ic_resamplerxnpuppet_16ic_u_avx(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = -0.6;
int code_length_chips = 2046;
int num_out_vectors = 3;
float rem_code_phase_chips = -0.234;
float shifts_chips[3] = { -0.1, 0.0, 0.1 };
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler_16ic_xn_u_avx(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, shifts_chips, code_length_chips, num_out_vectors, num_points);
memcpy((lv_16sc_t*)result, (lv_16sc_t*)result_aux[0], sizeof(lv_16sc_t) * num_points);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
}
volk_gnsssdr_free(result_aux);
}
#endif
#ifdef LV_HAVE_AVX
static inline void volk_gnsssdr_16ic_resamplerxnpuppet_16ic_a_avx(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = -0.6;
int code_length_chips = 2046;
int num_out_vectors = 3;
float rem_code_phase_chips = -0.234;
float shifts_chips[3] = { -0.1, 0.0, 0.1 };
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler_16ic_xn_a_avx(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, shifts_chips, code_length_chips, num_out_vectors, num_points);
memcpy((lv_16sc_t*)result, (lv_16sc_t*)result_aux[0], sizeof(lv_16sc_t) * num_points);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
@ -126,20 +253,23 @@ static inline void volk_gnsssdr_16ic_resamplerxnpuppet_16ic_u_sse2(lv_16sc_t* re
#ifdef LV_HAVE_NEON
static inline void volk_gnsssdr_16ic_resamplerxnpuppet_16ic_neon(lv_16sc_t* result, const lv_16sc_t* local_code, unsigned int num_points)
{
float code_phase_step_chips = 0.1;
float code_phase_step_chips = -0.6;
int code_length_chips = 2046;
int num_out_vectors = 3;
float * rem_code_phase_chips = (float*)volk_gnsssdr_malloc(sizeof(float) * num_out_vectors, volk_gnsssdr_get_alignment());
float rem_code_phase_chips = -0.234;
float shifts_chips[3] = { -0.1, 0.0, 0.1 };
lv_16sc_t** result_aux = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_out_vectors, volk_gnsssdr_get_alignment());
for(unsigned int n = 0; n < num_out_vectors; n++)
{
rem_code_phase_chips[n] = -0.234;
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
result_aux[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
}
volk_gnsssdr_16ic_xn_resampler_16ic_xn_neon(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, code_length_chips, num_out_vectors, num_points);
memcpy(result, result_aux[0], sizeof(lv_16sc_t) * num_points);
volk_gnsssdr_free(rem_code_phase_chips);
volk_gnsssdr_16ic_xn_resampler_16ic_xn_neon(result_aux, local_code, rem_code_phase_chips, code_phase_step_chips, shifts_chips, code_length_chips, num_out_vectors, num_points);
memcpy((lv_16sc_t*)result, (lv_16sc_t*)result_aux[0], sizeof(lv_16sc_t) * num_points);
for(unsigned int n = 0; n < num_out_vectors; n++)
{
volk_gnsssdr_free(result_aux[n]);
@ -149,5 +279,4 @@ static inline void volk_gnsssdr_16ic_resamplerxnpuppet_16ic_neon(lv_16sc_t* resu
#endif
#endif // INCLUDED_volk_gnsssdr_16ic_resamplerpuppet_16ic_H

View File

@ -1,591 +0,0 @@
/*!
* \file volk_gnsssdr_16ic_xn_resampler2_16ic_xn.h
* \brief VOLK_GNSSSDR kernel: Resamples N 16 bits integer short complex vectors using zero hold resample algorithm.
* \authors <ul>
* <li> Javier Arribas, 2015. jarribas(at)cttc.es
* </ul>
*
* VOLK_GNSSSDR kernel that resamples N 16 bits integer short complex vectors using zero hold resample algorithm.
* It resamples a single GNSS local code signal replica into N vectors fractional-resampled and fractional-delayed
* (i.e. it creates the Early, Prompt, and Late code replicas)
*
* -------------------------------------------------------------------------
*
* Copyright (C) 2010-2015 (see AUTHORS file for a list of contributors)
*
* GNSS-SDR is a software defined Global Navigation
* Satellite Systems receiver
*
* This file is part of GNSS-SDR.
*
* GNSS-SDR is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GNSS-SDR is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNSS-SDR. If not, see <http://www.gnu.org/licenses/>.
*
* -------------------------------------------------------------------------
*/
/*!
* \page volk_gnsssdr_16ic_xn_resampler2_16ic_xn
*
* \b Overview
*
* Resamples a complex vector (16-bit integer each component), providing \p num_out_vectors outputs.
*
* <b>Dispatcher Prototype</b>
* \code
* void volk_gnsssdr_16ic_xn_resampler2_16ic_xn(lv_16sc_t** result, const lv_16sc_t* local_code, float* rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
* \endcode
*
* \b Inputs
* \li local_code: One of the vectors to be multiplied.
* \li rem_code_phase_chips: Remnant code phase [chips].
* \li code_phase_step_chips: Phase increment per sample [chips/sample].
* \li shifts_chips: Vector of floats that defines the spacing (in chips) between the replicas of \p local_code
* \li code_length_chips: Code length in chips.
* \li num_out_vectors: Number of output vectors.
* \li num_points: The number of data values to be in the resampled vector.
*
* \b Outputs
* \li result: Pointer to a vector of pointers where the results will be stored.
*
*/
#ifndef INCLUDED_volk_gnsssdr_16ic_xn_resampler2_16ic_xn_H
#define INCLUDED_volk_gnsssdr_16ic_xn_resampler2_16ic_xn_H
#include <math.h>
#include <stdlib.h>
#include <volk_gnsssdr/volk_gnsssdr_common.h>
#include <volk_gnsssdr/volk_gnsssdr_complex.h>
#ifdef LV_HAVE_GENERIC
static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_generic(lv_16sc_t** result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
{
int local_code_chip_index;
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
for (int n = 0; n < num_points; n++)
{
// resample code for current tap
local_code_chip_index = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index < 0) local_code_chip_index += (int)code_length_chips * (abs(local_code_chip_index) / code_length_chips + 1);
local_code_chip_index = local_code_chip_index % code_length_chips;
result[current_correlator_tap][n] = local_code[local_code_chip_index];
}
}
}
#endif /*LV_HAVE_GENERIC*/
#ifdef LV_HAVE_SSE4_1
#include <smmintrin.h>
static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_a_sse4_1(lv_16sc_t** result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
{
lv_16sc_t** _result = result;
const unsigned int quarterPoints = num_points / 4;
const __m128 fours = _mm_set1_ps(4.0f);
const __m128 rem_code_phase_chips_reg = _mm_set_ps1(rem_code_phase_chips);
const __m128 code_phase_step_chips_reg = _mm_set_ps1(code_phase_step_chips);
__VOLK_ATTR_ALIGNED(16) int local_code_chip_index[4];
int local_code_chip_index_;
const __m128i zeros = _mm_setzero_si128();
const __m128 code_length_chips_reg_f = _mm_set_ps1((float)code_length_chips);
const __m128i code_length_chips_reg_i = _mm_set1_epi32((int)code_length_chips);
__m128i local_code_chip_index_reg, aux_i, negatives, i;
__m128 aux, aux2, shifts_chips_reg, c, cTrunc, base;
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
shifts_chips_reg = _mm_set_ps1((float)shifts_chips[current_correlator_tap]);
aux2 = _mm_sub_ps(shifts_chips_reg, rem_code_phase_chips_reg);
__m128 indexn = _mm_set_ps(3.0f, 2.0f, 1.0f, 0.0f);
for(unsigned int n = 0; n < quarterPoints; n++)
{
aux = _mm_mul_ps(code_phase_step_chips_reg, indexn);
aux = _mm_add_ps(aux, aux2);
// floor
aux = _mm_floor_ps(aux);
// fmod
c = _mm_div_ps(aux, code_length_chips_reg_f);
i = _mm_cvttps_epi32(c);
cTrunc = _mm_cvtepi32_ps(i);
base = _mm_mul_ps(cTrunc, code_length_chips_reg_f);
local_code_chip_index_reg = _mm_cvtps_epi32(_mm_sub_ps(aux, base));
negatives = _mm_cmplt_epi32(local_code_chip_index_reg, zeros);
aux_i = _mm_and_si128(code_length_chips_reg_i, negatives);
local_code_chip_index_reg = _mm_add_epi32(local_code_chip_index_reg, aux_i);
_mm_store_si128((__m128i*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 4; ++k)
{
_result[current_correlator_tap][n * 4 + k] = local_code[local_code_chip_index[k]];
}
indexn = _mm_add_ps(indexn, fours);
}
for(unsigned int n = quarterPoints * 4; n < num_points; n++)
{
// resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_];
}
}
}
#endif
#ifdef LV_HAVE_SSE4_1
#include <smmintrin.h>
static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_u_sse4_1(lv_16sc_t** result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
{
lv_16sc_t** _result = result;
const unsigned int quarterPoints = num_points / 4;
const __m128 fours = _mm_set1_ps(4.0f);
const __m128 rem_code_phase_chips_reg = _mm_set_ps1(rem_code_phase_chips);
const __m128 code_phase_step_chips_reg = _mm_set_ps1(code_phase_step_chips);
__VOLK_ATTR_ALIGNED(16) int local_code_chip_index[4];
int local_code_chip_index_;
const __m128i zeros = _mm_setzero_si128();
const __m128 code_length_chips_reg_f = _mm_set_ps1((float)code_length_chips);
const __m128i code_length_chips_reg_i = _mm_set1_epi32((int)code_length_chips);
__m128i local_code_chip_index_reg, aux_i, negatives, i;
__m128 aux, aux2, shifts_chips_reg, c, cTrunc, base;
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
shifts_chips_reg = _mm_set_ps1((float)shifts_chips[current_correlator_tap]);
aux2 = _mm_sub_ps(shifts_chips_reg, rem_code_phase_chips_reg);
__m128 indexn = _mm_set_ps(3.0f, 2.0f, 1.0f, 0.0f);
for(unsigned int n = 0; n < quarterPoints; n++)
{
aux = _mm_mul_ps(code_phase_step_chips_reg, indexn);
aux = _mm_add_ps(aux, aux2);
// floor
aux = _mm_floor_ps(aux);
// fmod
c = _mm_div_ps(aux, code_length_chips_reg_f);
i = _mm_cvttps_epi32(c);
cTrunc = _mm_cvtepi32_ps(i);
base = _mm_mul_ps(cTrunc, code_length_chips_reg_f);
local_code_chip_index_reg = _mm_cvtps_epi32(_mm_sub_ps(aux, base));
negatives = _mm_cmplt_epi32(local_code_chip_index_reg, zeros);
aux_i = _mm_and_si128(code_length_chips_reg_i, negatives);
local_code_chip_index_reg = _mm_add_epi32(local_code_chip_index_reg, aux_i);
_mm_store_si128((__m128i*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 4; ++k)
{
_result[current_correlator_tap][n * 4 + k] = local_code[local_code_chip_index[k]];
}
indexn = _mm_add_ps(indexn, fours);
}
for(unsigned int n = quarterPoints * 4; n < num_points; n++)
{
// resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_];
}
}
}
#endif
#ifdef LV_HAVE_SSE3
#include <pmmintrin.h>
static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_a_sse3(lv_16sc_t** result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
{
lv_16sc_t** _result = result;
const unsigned int quarterPoints = num_points / 4;
const __m128 ones = _mm_set1_ps(1.0f);
const __m128 fours = _mm_set1_ps(4.0f);
const __m128 rem_code_phase_chips_reg = _mm_set_ps1(rem_code_phase_chips);
const __m128 code_phase_step_chips_reg = _mm_set_ps1(code_phase_step_chips);
__VOLK_ATTR_ALIGNED(16) int local_code_chip_index[4];
int local_code_chip_index_;
const __m128i zeros = _mm_setzero_si128();
const __m128 code_length_chips_reg_f = _mm_set_ps1((float)code_length_chips);
const __m128i code_length_chips_reg_i = _mm_set1_epi32((int)code_length_chips);
__m128i local_code_chip_index_reg, aux_i, negatives, i;
__m128 aux, aux2, shifts_chips_reg, fi, igx, j, c, cTrunc, base;
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
shifts_chips_reg = _mm_set_ps1((float)shifts_chips[current_correlator_tap]);
aux2 = _mm_sub_ps(shifts_chips_reg, rem_code_phase_chips_reg);
__m128 indexn = _mm_set_ps(3.0f, 2.0f, 1.0f, 0.0f);
for(unsigned int n = 0; n < quarterPoints; n++)
{
aux = _mm_mul_ps(code_phase_step_chips_reg, indexn);
aux = _mm_add_ps(aux, aux2);
// floor
i = _mm_cvttps_epi32(aux);
fi = _mm_cvtepi32_ps(i);
igx = _mm_cmpgt_ps(fi, aux);
j = _mm_and_ps(igx, ones);
aux = _mm_sub_ps(fi, j);
// fmod
c = _mm_div_ps(aux, code_length_chips_reg_f);
i = _mm_cvttps_epi32(c);
cTrunc = _mm_cvtepi32_ps(i);
base = _mm_mul_ps(cTrunc, code_length_chips_reg_f);
local_code_chip_index_reg = _mm_cvtps_epi32(_mm_sub_ps(aux, base));
negatives = _mm_cmplt_epi32(local_code_chip_index_reg, zeros);
aux_i = _mm_and_si128(code_length_chips_reg_i, negatives);
local_code_chip_index_reg = _mm_add_epi32(local_code_chip_index_reg, aux_i);
_mm_store_si128((__m128i*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 4; ++k)
{
_result[current_correlator_tap][n * 4 + k] = local_code[local_code_chip_index[k]];
}
indexn = _mm_add_ps(indexn, fours);
}
for(unsigned int n = quarterPoints * 4; n < num_points; n++)
{
// resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_];
}
}
}
#endif
#ifdef LV_HAVE_SSE3
#include <pmmintrin.h>
static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_u_sse3(lv_16sc_t** result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
{
lv_16sc_t** _result = result;
const unsigned int quarterPoints = num_points / 4;
const __m128 ones = _mm_set1_ps(1.0f);
const __m128 fours = _mm_set1_ps(4.0f);
const __m128 rem_code_phase_chips_reg = _mm_set_ps1(rem_code_phase_chips);
const __m128 code_phase_step_chips_reg = _mm_set_ps1(code_phase_step_chips);
__VOLK_ATTR_ALIGNED(16) int local_code_chip_index[4];
int local_code_chip_index_;
const __m128i zeros = _mm_setzero_si128();
const __m128 code_length_chips_reg_f = _mm_set_ps1((float)code_length_chips);
const __m128i code_length_chips_reg_i = _mm_set1_epi32((int)code_length_chips);
__m128i local_code_chip_index_reg, aux_i, negatives, i;
__m128 aux, aux2, shifts_chips_reg, fi, igx, j, c, cTrunc, base;
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
shifts_chips_reg = _mm_set_ps1((float)shifts_chips[current_correlator_tap]);
aux2 = _mm_sub_ps(shifts_chips_reg, rem_code_phase_chips_reg);
__m128 indexn = _mm_set_ps(3.0f, 2.0f, 1.0f, 0.0f);
for(unsigned int n = 0; n < quarterPoints; n++)
{
aux = _mm_mul_ps(code_phase_step_chips_reg, indexn);
aux = _mm_add_ps(aux, aux2);
// floor
i = _mm_cvttps_epi32(aux);
fi = _mm_cvtepi32_ps(i);
igx = _mm_cmpgt_ps(fi, aux);
j = _mm_and_ps(igx, ones);
aux = _mm_sub_ps(fi, j);
// fmod
c = _mm_div_ps(aux, code_length_chips_reg_f);
i = _mm_cvttps_epi32(c);
cTrunc = _mm_cvtepi32_ps(i);
base = _mm_mul_ps(cTrunc, code_length_chips_reg_f);
local_code_chip_index_reg = _mm_cvtps_epi32(_mm_sub_ps(aux, base));
negatives = _mm_cmplt_epi32(local_code_chip_index_reg, zeros);
aux_i = _mm_and_si128(code_length_chips_reg_i, negatives);
local_code_chip_index_reg = _mm_add_epi32(local_code_chip_index_reg, aux_i);
_mm_store_si128((__m128i*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 4; ++k)
{
_result[current_correlator_tap][n * 4 + k] = local_code[local_code_chip_index[k]];
}
indexn = _mm_add_ps(indexn, fours);
}
for(unsigned int n = quarterPoints * 4; n < num_points; n++)
{
// resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_];
}
}
}
#endif
#ifdef LV_HAVE_AVX
#include <immintrin.h>
static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_a_avx(lv_16sc_t** result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
{
lv_16sc_t** _result = result;
const unsigned int avx_iters = num_points / 8;
const __m256 eights = _mm256_set1_ps(8.0f);
const __m256 rem_code_phase_chips_reg = _mm256_set1_ps(rem_code_phase_chips);
const __m256 code_phase_step_chips_reg = _mm256_set1_ps(code_phase_step_chips);
__VOLK_ATTR_ALIGNED(32) int local_code_chip_index[8];
int local_code_chip_index_;
const __m256 zeros = _mm256_setzero_ps();
const __m256 code_length_chips_reg_f = _mm256_set1_ps((float)code_length_chips);
const __m256 n0 = _mm256_set_ps(7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f, 0.0f);
__m256i local_code_chip_index_reg, i;
__m256 aux, aux2, aux3, shifts_chips_reg, c, cTrunc, base, negatives, indexn;
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
shifts_chips_reg = _mm256_set1_ps((float)shifts_chips[current_correlator_tap]);
aux2 = _mm256_sub_ps(shifts_chips_reg, rem_code_phase_chips_reg);
indexn = n0;
for(unsigned int n = 0; n < avx_iters; n++)
{
__builtin_prefetch(&_result[current_correlator_tap][8 * n + 7], 1, 0);
__builtin_prefetch(&local_code_chip_index[8], 1, 3);
aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn);
aux = _mm256_add_ps(aux, aux2);
// floor
aux = _mm256_floor_ps(aux);
// fmod
c = _mm256_div_ps(aux, code_length_chips_reg_f);
i = _mm256_cvttps_epi32(c);
cTrunc = _mm256_cvtepi32_ps(i);
base = _mm256_mul_ps(cTrunc, code_length_chips_reg_f);
local_code_chip_index_reg = _mm256_cvttps_epi32(_mm256_sub_ps(aux, base));
// no negatives
c = _mm256_cvtepi32_ps(local_code_chip_index_reg);
negatives = _mm256_cmp_ps(c, zeros, 0x01 );
aux3 = _mm256_and_ps(code_length_chips_reg_f, negatives);
aux = _mm256_add_ps(c, aux3);
local_code_chip_index_reg = _mm256_cvttps_epi32(aux);
_mm256_store_si256((__m256i*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 8; ++k)
{
_result[current_correlator_tap][n * 8 + k] = local_code[local_code_chip_index[k]];
}
indexn = _mm256_add_ps(indexn, eights);
}
}
_mm256_zeroupper();
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
for(unsigned int n = avx_iters * 8; n < num_points; n++)
{
// resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_];
}
}
}
#endif
#ifdef LV_HAVE_AVX
#include <immintrin.h>
static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_u_avx(lv_16sc_t** result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
{
lv_16sc_t** _result = result;
const unsigned int avx_iters = num_points / 8;
const __m256 eights = _mm256_set1_ps(8.0f);
const __m256 rem_code_phase_chips_reg = _mm256_set1_ps(rem_code_phase_chips);
const __m256 code_phase_step_chips_reg = _mm256_set1_ps(code_phase_step_chips);
__VOLK_ATTR_ALIGNED(32) int local_code_chip_index[8];
int local_code_chip_index_;
const __m256 zeros = _mm256_setzero_ps();
const __m256 code_length_chips_reg_f = _mm256_set1_ps((float)code_length_chips);
const __m256 n0 = _mm256_set_ps(7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f, 0.0f);
__m256i local_code_chip_index_reg, i;
__m256 aux, aux2, aux3, shifts_chips_reg, c, cTrunc, base, negatives, indexn;
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
shifts_chips_reg = _mm256_set1_ps((float)shifts_chips[current_correlator_tap]);
aux2 = _mm256_sub_ps(shifts_chips_reg, rem_code_phase_chips_reg);
indexn = n0;
for(unsigned int n = 0; n < avx_iters; n++)
{
__builtin_prefetch(&_result[current_correlator_tap][8 * n + 7], 1, 0);
__builtin_prefetch(&local_code_chip_index[8], 1, 3);
aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn);
aux = _mm256_add_ps(aux, aux2);
// floor
aux = _mm256_floor_ps(aux);
// fmod
c = _mm256_div_ps(aux, code_length_chips_reg_f);
i = _mm256_cvttps_epi32(c);
cTrunc = _mm256_cvtepi32_ps(i);
base = _mm256_mul_ps(cTrunc, code_length_chips_reg_f);
local_code_chip_index_reg = _mm256_cvttps_epi32(_mm256_sub_ps(aux, base));
// no negatives
c = _mm256_cvtepi32_ps(local_code_chip_index_reg);
negatives = _mm256_cmp_ps(c, zeros, 0x01 );
aux3 = _mm256_and_ps(code_length_chips_reg_f, negatives);
aux = _mm256_add_ps(c, aux3);
local_code_chip_index_reg = _mm256_cvttps_epi32(aux);
_mm256_store_si256((__m256i*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 8; ++k)
{
_result[current_correlator_tap][n * 8 + k] = local_code[local_code_chip_index[k]];
}
indexn = _mm256_add_ps(indexn, eights);
}
}
_mm256_zeroupper();
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
for(unsigned int n = avx_iters * 8; n < num_points; n++)
{
// resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_];
}
}
}
#endif
#ifdef LV_HAVE_NEON
#include <arm_neon.h>
static inline void volk_gnsssdr_16ic_xn_resampler2_16ic_xn_neon(lv_16sc_t** result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
{
lv_16sc_t** _result = result;
const unsigned int neon_iters = num_points / 4;
const int32x4_t ones = vdupq_n_s32(1);
const float32x4_t fours = vdupq_n_f32(4.0f);
const float32x4_t rem_code_phase_chips_reg = vdupq_n_f32(rem_code_phase_chips);
const float32x4_t code_phase_step_chips_reg = vdupq_n_f32(code_phase_step_chips);
__VOLK_ATTR_ALIGNED(16) int32_t local_code_chip_index[4];
int32_t local_code_chip_index_;
const int32x4_t zeros = vdupq_n_s32(0);
const float32x4_t code_length_chips_reg_f = vdupq_n_f32((float)code_length_chips);
const int32x4_t code_length_chips_reg_i = vdupq_n_s32((int32_t)code_length_chips);
int32x4_t local_code_chip_index_reg, aux_i, negatives, i;
float32x4_t aux, aux2, shifts_chips_reg, fi, c, j, cTrunc, base, indexn, reciprocal;
__VOLK_ATTR_ALIGNED(16) const float vec[4] = { 0.0f, 1.0f, 2.0f, 3.0f };
uint32x4_t igx;
reciprocal = vrecpeq_f32(code_length_chips_reg_f);
reciprocal = vmulq_f32(vrecpsq_f32(code_length_chips_reg_f, reciprocal), reciprocal);
reciprocal = vmulq_f32(vrecpsq_f32(code_length_chips_reg_f, reciprocal), reciprocal); // this refinement is required!
float32x4_t n0 = vld1q_f32((float*)vec);
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
shifts_chips_reg = vdupq_n_f32((float)shifts_chips[current_correlator_tap]);
aux2 = vsubq_f32(shifts_chips_reg, rem_code_phase_chips_reg);
indexn = n0;
for(unsigned int n = 0; n < neon_iters; n++)
{
__builtin_prefetch(&_result[current_correlator_tap][4 * n + 3], 1, 0);
__builtin_prefetch(&local_code_chip_index[4]);
aux = vmulq_f32(code_phase_step_chips_reg, indexn);
aux = vaddq_f32(aux, aux2);
//floor
i = vcvtq_s32_f32(aux);
fi = vcvtq_f32_s32(i);
igx = vcgtq_f32(fi, aux);
j = vcvtq_f32_s32(vandq_s32(vreinterpretq_s32_u32(igx), ones));
aux = vsubq_f32(fi, j);
// fmod
c = vmulq_f32(aux, reciprocal);
i = vcvtq_s32_f32(c);
cTrunc = vcvtq_f32_s32(i);
base = vmulq_f32(cTrunc, code_length_chips_reg_f);
aux = vsubq_f32(aux, base);
local_code_chip_index_reg = vcvtq_s32_f32(aux);
negatives = vreinterpretq_s32_u32(vcltq_s32(local_code_chip_index_reg, zeros));
aux_i = vandq_s32(code_length_chips_reg_i, negatives);
local_code_chip_index_reg = vaddq_s32(local_code_chip_index_reg, aux_i);
vst1q_s32((int32_t*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 4; ++k)
{
_result[current_correlator_tap][n * 4 + k] = local_code[local_code_chip_index[k]];
}
indexn = vaddq_f32(indexn, fours);
}
for(unsigned int n = neon_iters * 4; n < num_points; n++)
{
__builtin_prefetch(&_result[current_correlator_tap][n], 1, 0);
// resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_];
}
}
}
#endif
#endif /*INCLUDED_volk_gnsssdr_16ic_xn_resampler_16ic_xn_H*/

View File

@ -5,8 +5,8 @@
* <li> Javier Arribas, 2015. jarribas(at)cttc.es
* </ul>
*
* VOLK_GNSSSDR kernel that esamples N 16 bits integer short complex vectors using zero hold resample algorithm.
* It is optimized to resample a sigle GNSS local code signal replica into N vectors fractional-resampled and fractional-delayed
* VOLK_GNSSSDR kernel that resamples N 16 bits integer short complex vectors using zero hold resample algorithm.
* It resamples a single GNSS local code signal replica into N vectors fractional-resampled and fractional-delayed
* (i.e. it creates the Early, Prompt, and Late code replicas)
*
* -------------------------------------------------------------------------
@ -43,16 +43,17 @@
*
* <b>Dispatcher Prototype</b>
* \code
* void volk_gnsssdr_16ic_xn_resampler_16ic_xn(lv_16sc_t** result, const lv_16sc_t* local_code, float* rem_code_phase_chips, float code_phase_step_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_output_samples)
* void volk_gnsssdr_16ic_xn_resampler_16ic_xn(lv_16sc_t** result, const lv_16sc_t* local_code, float* rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
* \endcode
*
* \b Inputs
* \li local_code: One of the vectors to be multiplied.
* \li rem_code_phase_chips: Remnant code phase [chips].
* \li code_phase_step_chips: Phase increment per sample [chips/sample].
* \li shifts_chips: Vector of floats that defines the spacing (in chips) between the replicas of \p local_code
* \li code_length_chips: Code length in chips.
* \li num_out_vectors Number of output vectors.
* \li num_output_samples: The number of data values to be in the resampled vector.
* \li num_out_vectors: Number of output vectors.
* \li num_points: The number of data values to be in the resampled vector.
*
* \b Outputs
* \li result: Pointer to a vector of pointers where the results will be stored.
@ -63,26 +64,26 @@
#define INCLUDED_volk_gnsssdr_16ic_xn_resampler_16ic_xn_H
#include <math.h>
#include <stdlib.h>
#include <volk_gnsssdr/volk_gnsssdr_common.h>
#include <volk_gnsssdr/volk_gnsssdr_complex.h>
#ifdef LV_HAVE_GENERIC
static inline void volk_gnsssdr_16ic_xn_resampler_16ic_xn_generic(lv_16sc_t** result, const lv_16sc_t* local_code, float* rem_code_phase_chips, float code_phase_step_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_output_samples)
static inline void volk_gnsssdr_16ic_xn_resampler_16ic_xn_generic(lv_16sc_t** result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
{
int local_code_chip_index;
//fesetround(FE_TONEAREST);
for (int current_vector = 0; current_vector < num_out_vectors; current_vector++)
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
for (unsigned int n = 0; n < num_output_samples; n++)
for (int n = 0; n < num_points; n++)
{
// resample code for current tap
local_code_chip_index = round(code_phase_step_chips * (float)(n) + rem_code_phase_chips[current_vector] - 0.5f);
if (local_code_chip_index < 0.0) local_code_chip_index += code_length_chips;
if (local_code_chip_index > (code_length_chips - 1)) local_code_chip_index -= code_length_chips;
//std::cout<<"g["<<n<<"]="<<code_phase_step_chips*static_cast<float>(n) + rem_code_phase_chips-0.5f<<","<<local_code_chip_index<<" ";
result[current_vector][n] = local_code[local_code_chip_index];
local_code_chip_index = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index < 0) local_code_chip_index += (int)code_length_chips * (abs(local_code_chip_index) / code_length_chips + 1);
local_code_chip_index = local_code_chip_index % code_length_chips;
result[current_correlator_tap][n] = local_code[local_code_chip_index];
}
}
}
@ -90,287 +91,501 @@ static inline void volk_gnsssdr_16ic_xn_resampler_16ic_xn_generic(lv_16sc_t** re
#endif /*LV_HAVE_GENERIC*/
#ifdef LV_HAVE_SSE2
#include <emmintrin.h>
static inline void volk_gnsssdr_16ic_xn_resampler_16ic_xn_a_sse2(lv_16sc_t** result, const lv_16sc_t* local_code, float* rem_code_phase_chips ,float code_phase_step_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_output_samples)
#ifdef LV_HAVE_SSE4_1
#include <smmintrin.h>
static inline void volk_gnsssdr_16ic_xn_resampler_16ic_xn_a_sse4_1(lv_16sc_t** result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
{
_MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);//_MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO
unsigned int number;
const unsigned int quarterPoints = num_output_samples / 4;
lv_16sc_t** _result = result;
const unsigned int quarterPoints = num_points / 4;
const __m128 fours = _mm_set1_ps(4.0f);
const __m128 rem_code_phase_chips_reg = _mm_set_ps1(rem_code_phase_chips);
const __m128 code_phase_step_chips_reg = _mm_set_ps1(code_phase_step_chips);
__VOLK_ATTR_ALIGNED(16) int local_code_chip_index[4];
float tmp_rem_code_phase_chips;
__m128 _rem_code_phase,_code_phase_step_chips;
__m128i _code_length_chips,_code_length_chips_minus1;
__m128 _code_phase_out,_code_phase_out_with_offset;
int local_code_chip_index_;
_code_phase_step_chips = _mm_load1_ps(&code_phase_step_chips); //load float to all four float values in m128 register
__VOLK_ATTR_ALIGNED(16) int four_times_code_length_chips_minus1[4];
four_times_code_length_chips_minus1[0] = code_length_chips - 1;
four_times_code_length_chips_minus1[1] = code_length_chips - 1;
four_times_code_length_chips_minus1[2] = code_length_chips - 1;
four_times_code_length_chips_minus1[3] = code_length_chips - 1;
const __m128i zeros = _mm_setzero_si128();
const __m128 code_length_chips_reg_f = _mm_set_ps1((float)code_length_chips);
const __m128i code_length_chips_reg_i = _mm_set1_epi32((int)code_length_chips);
__m128i local_code_chip_index_reg, aux_i, negatives, i;
__m128 aux, aux2, shifts_chips_reg, c, cTrunc, base;
__VOLK_ATTR_ALIGNED(16) int four_times_code_length_chips[4];
four_times_code_length_chips[0] = code_length_chips;
four_times_code_length_chips[1] = code_length_chips;
four_times_code_length_chips[2] = code_length_chips;
four_times_code_length_chips[3] = code_length_chips;
_code_length_chips = _mm_load_si128((__m128i*)&four_times_code_length_chips); //load float to all four float values in m128 register
_code_length_chips_minus1 = _mm_load_si128((__m128i*)&four_times_code_length_chips_minus1); //load float to all four float values in m128 register
__m128i negative_indexes, overflow_indexes,_code_phase_out_int, _code_phase_out_int_neg,_code_phase_out_int_over;
__m128i zero = _mm_setzero_si128();
__VOLK_ATTR_ALIGNED(16) float init_idx_float[4] = { 0.0f, 1.0f, 2.0f, 3.0f };
__m128 _4output_index = _mm_load_ps(init_idx_float);
__VOLK_ATTR_ALIGNED(16) float init_4constant_float[4] = { 4.0f, 4.0f, 4.0f, 4.0f };
__m128 _4constant_float = _mm_load_ps(init_4constant_float);
int current_vector = 0;
int sample_idx = 0;
for(number = 0; number < quarterPoints; number++)
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
//common to all outputs
_code_phase_out = _mm_mul_ps(_code_phase_step_chips, _4output_index); //compute the code phase point with the phase step
//output vector dependant (different code phase offset)
for(current_vector = 0; current_vector < num_out_vectors; current_vector++)
shifts_chips_reg = _mm_set_ps1((float)shifts_chips[current_correlator_tap]);
aux2 = _mm_sub_ps(shifts_chips_reg, rem_code_phase_chips_reg);
__m128 indexn = _mm_set_ps(3.0f, 2.0f, 1.0f, 0.0f);
for(unsigned int n = 0; n < quarterPoints; n++)
{
tmp_rem_code_phase_chips = rem_code_phase_chips[current_vector] - 0.5f; // adjust offset to perform correct rounding (chip transition at 0)
_rem_code_phase = _mm_load1_ps(&tmp_rem_code_phase_chips); //load float to all four float values in m128 register
aux = _mm_mul_ps(code_phase_step_chips_reg, indexn);
aux = _mm_add_ps(aux, aux2);
// floor
aux = _mm_floor_ps(aux);
_code_phase_out_with_offset = _mm_add_ps(_code_phase_out, _rem_code_phase); //add the phase offset
_code_phase_out_int = _mm_cvtps_epi32(_code_phase_out_with_offset); //convert to integer
// fmod
c = _mm_div_ps(aux, code_length_chips_reg_f);
i = _mm_cvttps_epi32(c);
cTrunc = _mm_cvtepi32_ps(i);
base = _mm_mul_ps(cTrunc, code_length_chips_reg_f);
local_code_chip_index_reg = _mm_cvtps_epi32(_mm_sub_ps(aux, base));
negative_indexes = _mm_cmplt_epi32(_code_phase_out_int, zero); //test for negative values
_code_phase_out_int_neg = _mm_add_epi32(_code_phase_out_int, _code_length_chips); //the negative values branch
_code_phase_out_int_neg = _mm_xor_si128(_code_phase_out_int, _mm_and_si128( negative_indexes, _mm_xor_si128( _code_phase_out_int_neg, _code_phase_out_int )));
overflow_indexes = _mm_cmpgt_epi32(_code_phase_out_int_neg, _code_length_chips_minus1); //test for overflow values
_code_phase_out_int_over = _mm_sub_epi32(_code_phase_out_int_neg, _code_length_chips); //the negative values branch
_code_phase_out_int_over = _mm_xor_si128(_code_phase_out_int_neg, _mm_and_si128( overflow_indexes, _mm_xor_si128( _code_phase_out_int_over, _code_phase_out_int_neg )));
_mm_store_si128((__m128i*)local_code_chip_index, _code_phase_out_int_over); // Store the results back
//todo: optimize the local code lookup table with intrinsics, if possible
_result[current_vector][sample_idx] = local_code[local_code_chip_index[0]];
_result[current_vector][sample_idx + 1] = local_code[local_code_chip_index[1]];
_result[current_vector][sample_idx + 2] = local_code[local_code_chip_index[2]];
_result[current_vector][sample_idx + 3] = local_code[local_code_chip_index[3]];
negatives = _mm_cmplt_epi32(local_code_chip_index_reg, zeros);
aux_i = _mm_and_si128(code_length_chips_reg_i, negatives);
local_code_chip_index_reg = _mm_add_epi32(local_code_chip_index_reg, aux_i);
_mm_store_si128((__m128i*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 4; ++k)
{
_result[current_correlator_tap][n * 4 + k] = local_code[local_code_chip_index[k]];
}
indexn = _mm_add_ps(indexn, fours);
}
_4output_index = _mm_add_ps(_4output_index, _4constant_float);
sample_idx += 4;
}
for(number = quarterPoints * 4; number < num_output_samples; number++)
{
for(current_vector = 0; current_vector < num_out_vectors; current_vector++)
for(unsigned int n = quarterPoints * 4; n < num_points; n++)
{
local_code_chip_index[0] = (int)(code_phase_step_chips * (float)(number) + rem_code_phase_chips[current_vector]);
if (local_code_chip_index[0] < 0.0) local_code_chip_index[0] += code_length_chips - 1;
if (local_code_chip_index[0] > (code_length_chips - 1)) local_code_chip_index[0] -= code_length_chips;
_result[current_vector][number] = local_code[local_code_chip_index[0]];
// resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_];
}
}
}
#endif /* LV_HAVE_SSE2 */
#ifdef LV_HAVE_SSE2
#include <emmintrin.h>
static inline void volk_gnsssdr_16ic_xn_resampler_16ic_xn_u_sse2(lv_16sc_t** result, const lv_16sc_t* local_code, float* rem_code_phase_chips ,float code_phase_step_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_output_samples)
{
_MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);//_MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO
unsigned int number;
const unsigned int quarterPoints = num_output_samples / 4;
lv_16sc_t** _result = result;
__VOLK_ATTR_ALIGNED(16) int local_code_chip_index[4];
float tmp_rem_code_phase_chips;
__m128 _rem_code_phase,_code_phase_step_chips;
__m128i _code_length_chips,_code_length_chips_minus1;
__m128 _code_phase_out,_code_phase_out_with_offset;
_code_phase_step_chips = _mm_load1_ps(&code_phase_step_chips); //load float to all four float values in m128 register
__VOLK_ATTR_ALIGNED(16) int four_times_code_length_chips_minus1[4];
four_times_code_length_chips_minus1[0] = code_length_chips - 1;
four_times_code_length_chips_minus1[1] = code_length_chips - 1;
four_times_code_length_chips_minus1[2] = code_length_chips - 1;
four_times_code_length_chips_minus1[3] = code_length_chips - 1;
__VOLK_ATTR_ALIGNED(16) int four_times_code_length_chips[4];
four_times_code_length_chips[0] = code_length_chips;
four_times_code_length_chips[1] = code_length_chips;
four_times_code_length_chips[2] = code_length_chips;
four_times_code_length_chips[3] = code_length_chips;
_code_length_chips = _mm_loadu_si128((__m128i*)&four_times_code_length_chips); //load float to all four float values in m128 register
_code_length_chips_minus1 = _mm_loadu_si128((__m128i*)&four_times_code_length_chips_minus1); //load float to all four float values in m128 register
__m128i negative_indexes, overflow_indexes,_code_phase_out_int, _code_phase_out_int_neg,_code_phase_out_int_over;
__m128i zero = _mm_setzero_si128();
__VOLK_ATTR_ALIGNED(16) float init_idx_float[4] = { 0.0f, 1.0f, 2.0f, 3.0f };
__m128 _4output_index = _mm_loadu_ps(init_idx_float);
__VOLK_ATTR_ALIGNED(16) float init_4constant_float[4] = { 4.0f, 4.0f, 4.0f, 4.0f };
__m128 _4constant_float = _mm_loadu_ps(init_4constant_float);
int current_vector = 0;
int sample_idx = 0;
for(number = 0; number < quarterPoints; number++)
{
//common to all outputs
_code_phase_out = _mm_mul_ps(_code_phase_step_chips, _4output_index); //compute the code phase point with the phase step
//output vector dependant (different code phase offset)
for(current_vector = 0; current_vector < num_out_vectors; current_vector++)
{
tmp_rem_code_phase_chips = rem_code_phase_chips[current_vector] - 0.5f; // adjust offset to perform correct rounding (chip transition at 0)
_rem_code_phase = _mm_load1_ps(&tmp_rem_code_phase_chips); //load float to all four float values in m128 register
_code_phase_out_with_offset = _mm_add_ps(_code_phase_out, _rem_code_phase); //add the phase offset
_code_phase_out_int = _mm_cvtps_epi32(_code_phase_out_with_offset); //convert to integer
negative_indexes = _mm_cmplt_epi32(_code_phase_out_int, zero); //test for negative values
_code_phase_out_int_neg = _mm_add_epi32(_code_phase_out_int, _code_length_chips); //the negative values branch
_code_phase_out_int_neg = _mm_xor_si128(_code_phase_out_int, _mm_and_si128( negative_indexes, _mm_xor_si128( _code_phase_out_int_neg, _code_phase_out_int )));
overflow_indexes = _mm_cmpgt_epi32(_code_phase_out_int_neg, _code_length_chips_minus1); //test for overflow values
_code_phase_out_int_over = _mm_sub_epi32(_code_phase_out_int_neg, _code_length_chips); //the negative values branch
_code_phase_out_int_over = _mm_xor_si128(_code_phase_out_int_neg, _mm_and_si128( overflow_indexes, _mm_xor_si128( _code_phase_out_int_over, _code_phase_out_int_neg )));
_mm_storeu_si128((__m128i*)local_code_chip_index, _code_phase_out_int_over); // Store the results back
//todo: optimize the local code lookup table with intrinsics, if possible
_result[current_vector][sample_idx] = local_code[local_code_chip_index[0]];
_result[current_vector][sample_idx + 1] = local_code[local_code_chip_index[1]];
_result[current_vector][sample_idx + 2] = local_code[local_code_chip_index[2]];
_result[current_vector][sample_idx + 3] = local_code[local_code_chip_index[3]];
}
_4output_index = _mm_add_ps(_4output_index, _4constant_float);
sample_idx += 4;
}
for(number = quarterPoints * 4; number < num_output_samples; number++)
{
for(current_vector = 0; current_vector < num_out_vectors; current_vector++)
{
local_code_chip_index[0] = (int)(code_phase_step_chips * (float)(number) + rem_code_phase_chips[current_vector]);
if (local_code_chip_index[0] < 0.0) local_code_chip_index[0] += code_length_chips - 1;
if (local_code_chip_index[0] > (code_length_chips - 1)) local_code_chip_index[0] -= code_length_chips;
_result[current_vector][number] = local_code[local_code_chip_index[0]];
}
}
}
#endif /* LV_HAVE_SSE2 */
#endif
#ifdef LV_HAVE_SSE4_1
#include <smmintrin.h>
static inline void volk_gnsssdr_16ic_xn_resampler_16ic_xn_u_sse4_1(lv_16sc_t** result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
{
lv_16sc_t** _result = result;
const unsigned int quarterPoints = num_points / 4;
const __m128 fours = _mm_set1_ps(4.0f);
const __m128 rem_code_phase_chips_reg = _mm_set_ps1(rem_code_phase_chips);
const __m128 code_phase_step_chips_reg = _mm_set_ps1(code_phase_step_chips);
__VOLK_ATTR_ALIGNED(16) int local_code_chip_index[4];
int local_code_chip_index_;
const __m128i zeros = _mm_setzero_si128();
const __m128 code_length_chips_reg_f = _mm_set_ps1((float)code_length_chips);
const __m128i code_length_chips_reg_i = _mm_set1_epi32((int)code_length_chips);
__m128i local_code_chip_index_reg, aux_i, negatives, i;
__m128 aux, aux2, shifts_chips_reg, c, cTrunc, base;
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
shifts_chips_reg = _mm_set_ps1((float)shifts_chips[current_correlator_tap]);
aux2 = _mm_sub_ps(shifts_chips_reg, rem_code_phase_chips_reg);
__m128 indexn = _mm_set_ps(3.0f, 2.0f, 1.0f, 0.0f);
for(unsigned int n = 0; n < quarterPoints; n++)
{
aux = _mm_mul_ps(code_phase_step_chips_reg, indexn);
aux = _mm_add_ps(aux, aux2);
// floor
aux = _mm_floor_ps(aux);
// fmod
c = _mm_div_ps(aux, code_length_chips_reg_f);
i = _mm_cvttps_epi32(c);
cTrunc = _mm_cvtepi32_ps(i);
base = _mm_mul_ps(cTrunc, code_length_chips_reg_f);
local_code_chip_index_reg = _mm_cvtps_epi32(_mm_sub_ps(aux, base));
negatives = _mm_cmplt_epi32(local_code_chip_index_reg, zeros);
aux_i = _mm_and_si128(code_length_chips_reg_i, negatives);
local_code_chip_index_reg = _mm_add_epi32(local_code_chip_index_reg, aux_i);
_mm_store_si128((__m128i*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 4; ++k)
{
_result[current_correlator_tap][n * 4 + k] = local_code[local_code_chip_index[k]];
}
indexn = _mm_add_ps(indexn, fours);
}
for(unsigned int n = quarterPoints * 4; n < num_points; n++)
{
// resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_];
}
}
}
#endif
#ifdef LV_HAVE_SSE3
#include <pmmintrin.h>
static inline void volk_gnsssdr_16ic_xn_resampler_16ic_xn_a_sse3(lv_16sc_t** result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
{
lv_16sc_t** _result = result;
const unsigned int quarterPoints = num_points / 4;
const __m128 ones = _mm_set1_ps(1.0f);
const __m128 fours = _mm_set1_ps(4.0f);
const __m128 rem_code_phase_chips_reg = _mm_set_ps1(rem_code_phase_chips);
const __m128 code_phase_step_chips_reg = _mm_set_ps1(code_phase_step_chips);
__VOLK_ATTR_ALIGNED(16) int local_code_chip_index[4];
int local_code_chip_index_;
const __m128i zeros = _mm_setzero_si128();
const __m128 code_length_chips_reg_f = _mm_set_ps1((float)code_length_chips);
const __m128i code_length_chips_reg_i = _mm_set1_epi32((int)code_length_chips);
__m128i local_code_chip_index_reg, aux_i, negatives, i;
__m128 aux, aux2, shifts_chips_reg, fi, igx, j, c, cTrunc, base;
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
shifts_chips_reg = _mm_set_ps1((float)shifts_chips[current_correlator_tap]);
aux2 = _mm_sub_ps(shifts_chips_reg, rem_code_phase_chips_reg);
__m128 indexn = _mm_set_ps(3.0f, 2.0f, 1.0f, 0.0f);
for(unsigned int n = 0; n < quarterPoints; n++)
{
aux = _mm_mul_ps(code_phase_step_chips_reg, indexn);
aux = _mm_add_ps(aux, aux2);
// floor
i = _mm_cvttps_epi32(aux);
fi = _mm_cvtepi32_ps(i);
igx = _mm_cmpgt_ps(fi, aux);
j = _mm_and_ps(igx, ones);
aux = _mm_sub_ps(fi, j);
// fmod
c = _mm_div_ps(aux, code_length_chips_reg_f);
i = _mm_cvttps_epi32(c);
cTrunc = _mm_cvtepi32_ps(i);
base = _mm_mul_ps(cTrunc, code_length_chips_reg_f);
local_code_chip_index_reg = _mm_cvtps_epi32(_mm_sub_ps(aux, base));
negatives = _mm_cmplt_epi32(local_code_chip_index_reg, zeros);
aux_i = _mm_and_si128(code_length_chips_reg_i, negatives);
local_code_chip_index_reg = _mm_add_epi32(local_code_chip_index_reg, aux_i);
_mm_store_si128((__m128i*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 4; ++k)
{
_result[current_correlator_tap][n * 4 + k] = local_code[local_code_chip_index[k]];
}
indexn = _mm_add_ps(indexn, fours);
}
for(unsigned int n = quarterPoints * 4; n < num_points; n++)
{
// resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_];
}
}
}
#endif
#ifdef LV_HAVE_SSE3
#include <pmmintrin.h>
static inline void volk_gnsssdr_16ic_xn_resampler_16ic_xn_u_sse3(lv_16sc_t** result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
{
lv_16sc_t** _result = result;
const unsigned int quarterPoints = num_points / 4;
const __m128 ones = _mm_set1_ps(1.0f);
const __m128 fours = _mm_set1_ps(4.0f);
const __m128 rem_code_phase_chips_reg = _mm_set_ps1(rem_code_phase_chips);
const __m128 code_phase_step_chips_reg = _mm_set_ps1(code_phase_step_chips);
__VOLK_ATTR_ALIGNED(16) int local_code_chip_index[4];
int local_code_chip_index_;
const __m128i zeros = _mm_setzero_si128();
const __m128 code_length_chips_reg_f = _mm_set_ps1((float)code_length_chips);
const __m128i code_length_chips_reg_i = _mm_set1_epi32((int)code_length_chips);
__m128i local_code_chip_index_reg, aux_i, negatives, i;
__m128 aux, aux2, shifts_chips_reg, fi, igx, j, c, cTrunc, base;
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
shifts_chips_reg = _mm_set_ps1((float)shifts_chips[current_correlator_tap]);
aux2 = _mm_sub_ps(shifts_chips_reg, rem_code_phase_chips_reg);
__m128 indexn = _mm_set_ps(3.0f, 2.0f, 1.0f, 0.0f);
for(unsigned int n = 0; n < quarterPoints; n++)
{
aux = _mm_mul_ps(code_phase_step_chips_reg, indexn);
aux = _mm_add_ps(aux, aux2);
// floor
i = _mm_cvttps_epi32(aux);
fi = _mm_cvtepi32_ps(i);
igx = _mm_cmpgt_ps(fi, aux);
j = _mm_and_ps(igx, ones);
aux = _mm_sub_ps(fi, j);
// fmod
c = _mm_div_ps(aux, code_length_chips_reg_f);
i = _mm_cvttps_epi32(c);
cTrunc = _mm_cvtepi32_ps(i);
base = _mm_mul_ps(cTrunc, code_length_chips_reg_f);
local_code_chip_index_reg = _mm_cvtps_epi32(_mm_sub_ps(aux, base));
negatives = _mm_cmplt_epi32(local_code_chip_index_reg, zeros);
aux_i = _mm_and_si128(code_length_chips_reg_i, negatives);
local_code_chip_index_reg = _mm_add_epi32(local_code_chip_index_reg, aux_i);
_mm_store_si128((__m128i*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 4; ++k)
{
_result[current_correlator_tap][n * 4 + k] = local_code[local_code_chip_index[k]];
}
indexn = _mm_add_ps(indexn, fours);
}
for(unsigned int n = quarterPoints * 4; n < num_points; n++)
{
// resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_];
}
}
}
#endif
#ifdef LV_HAVE_AVX
#include <immintrin.h>
static inline void volk_gnsssdr_16ic_xn_resampler_16ic_xn_a_avx(lv_16sc_t** result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
{
lv_16sc_t** _result = result;
const unsigned int avx_iters = num_points / 8;
const __m256 eights = _mm256_set1_ps(8.0f);
const __m256 rem_code_phase_chips_reg = _mm256_set1_ps(rem_code_phase_chips);
const __m256 code_phase_step_chips_reg = _mm256_set1_ps(code_phase_step_chips);
__VOLK_ATTR_ALIGNED(32) int local_code_chip_index[8];
int local_code_chip_index_;
const __m256 zeros = _mm256_setzero_ps();
const __m256 code_length_chips_reg_f = _mm256_set1_ps((float)code_length_chips);
const __m256 n0 = _mm256_set_ps(7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f, 0.0f);
__m256i local_code_chip_index_reg, i;
__m256 aux, aux2, aux3, shifts_chips_reg, c, cTrunc, base, negatives, indexn;
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
shifts_chips_reg = _mm256_set1_ps((float)shifts_chips[current_correlator_tap]);
aux2 = _mm256_sub_ps(shifts_chips_reg, rem_code_phase_chips_reg);
indexn = n0;
for(unsigned int n = 0; n < avx_iters; n++)
{
__builtin_prefetch(&_result[current_correlator_tap][8 * n + 7], 1, 0);
__builtin_prefetch(&local_code_chip_index[8], 1, 3);
aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn);
aux = _mm256_add_ps(aux, aux2);
// floor
aux = _mm256_floor_ps(aux);
// fmod
c = _mm256_div_ps(aux, code_length_chips_reg_f);
i = _mm256_cvttps_epi32(c);
cTrunc = _mm256_cvtepi32_ps(i);
base = _mm256_mul_ps(cTrunc, code_length_chips_reg_f);
local_code_chip_index_reg = _mm256_cvttps_epi32(_mm256_sub_ps(aux, base));
// no negatives
c = _mm256_cvtepi32_ps(local_code_chip_index_reg);
negatives = _mm256_cmp_ps(c, zeros, 0x01 );
aux3 = _mm256_and_ps(code_length_chips_reg_f, negatives);
aux = _mm256_add_ps(c, aux3);
local_code_chip_index_reg = _mm256_cvttps_epi32(aux);
_mm256_store_si256((__m256i*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 8; ++k)
{
_result[current_correlator_tap][n * 8 + k] = local_code[local_code_chip_index[k]];
}
indexn = _mm256_add_ps(indexn, eights);
}
}
_mm256_zeroupper();
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
for(unsigned int n = avx_iters * 8; n < num_points; n++)
{
// resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_];
}
}
}
#endif
#ifdef LV_HAVE_AVX
#include <immintrin.h>
static inline void volk_gnsssdr_16ic_xn_resampler_16ic_xn_u_avx(lv_16sc_t** result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
{
lv_16sc_t** _result = result;
const unsigned int avx_iters = num_points / 8;
const __m256 eights = _mm256_set1_ps(8.0f);
const __m256 rem_code_phase_chips_reg = _mm256_set1_ps(rem_code_phase_chips);
const __m256 code_phase_step_chips_reg = _mm256_set1_ps(code_phase_step_chips);
__VOLK_ATTR_ALIGNED(32) int local_code_chip_index[8];
int local_code_chip_index_;
const __m256 zeros = _mm256_setzero_ps();
const __m256 code_length_chips_reg_f = _mm256_set1_ps((float)code_length_chips);
const __m256 n0 = _mm256_set_ps(7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f, 0.0f);
__m256i local_code_chip_index_reg, i;
__m256 aux, aux2, aux3, shifts_chips_reg, c, cTrunc, base, negatives, indexn;
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
shifts_chips_reg = _mm256_set1_ps((float)shifts_chips[current_correlator_tap]);
aux2 = _mm256_sub_ps(shifts_chips_reg, rem_code_phase_chips_reg);
indexn = n0;
for(unsigned int n = 0; n < avx_iters; n++)
{
__builtin_prefetch(&_result[current_correlator_tap][8 * n + 7], 1, 0);
__builtin_prefetch(&local_code_chip_index[8], 1, 3);
aux = _mm256_mul_ps(code_phase_step_chips_reg, indexn);
aux = _mm256_add_ps(aux, aux2);
// floor
aux = _mm256_floor_ps(aux);
// fmod
c = _mm256_div_ps(aux, code_length_chips_reg_f);
i = _mm256_cvttps_epi32(c);
cTrunc = _mm256_cvtepi32_ps(i);
base = _mm256_mul_ps(cTrunc, code_length_chips_reg_f);
local_code_chip_index_reg = _mm256_cvttps_epi32(_mm256_sub_ps(aux, base));
// no negatives
c = _mm256_cvtepi32_ps(local_code_chip_index_reg);
negatives = _mm256_cmp_ps(c, zeros, 0x01 );
aux3 = _mm256_and_ps(code_length_chips_reg_f, negatives);
aux = _mm256_add_ps(c, aux3);
local_code_chip_index_reg = _mm256_cvttps_epi32(aux);
_mm256_store_si256((__m256i*)local_code_chip_index, local_code_chip_index_reg);
for(unsigned int k = 0; k < 8; ++k)
{
_result[current_correlator_tap][n * 8 + k] = local_code[local_code_chip_index[k]];
}
indexn = _mm256_add_ps(indexn, eights);
}
}
_mm256_zeroupper();
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
for(unsigned int n = avx_iters * 8; n < num_points; n++)
{
// resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_];
}
}
}
#endif
#ifdef LV_HAVE_NEON
#include <arm_neon.h>
static inline void volk_gnsssdr_16ic_xn_resampler_16ic_xn_neon(lv_16sc_t** result, const lv_16sc_t* local_code, float* rem_code_phase_chips ,float code_phase_step_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_output_samples)
static inline void volk_gnsssdr_16ic_xn_resampler_16ic_xn_neon(lv_16sc_t** result, const lv_16sc_t* local_code, float rem_code_phase_chips, float code_phase_step_chips, float* shifts_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_points)
{
unsigned int number;
const unsigned int quarterPoints = num_output_samples / 4;
float32x4_t half = vdupq_n_f32(0.5f);
lv_16sc_t** _result = result;
__VOLK_ATTR_ALIGNED(16) int local_code_chip_index[4];
float tmp_rem_code_phase_chips;
float32x4_t _rem_code_phase, _code_phase_step_chips;
int32x4_t _code_length_chips, _code_length_chips_minus1;
float32x4_t _code_phase_out, _code_phase_out_with_offset;
float32x4_t sign, PlusHalf, Round;
const unsigned int neon_iters = num_points / 4;
const int32x4_t ones = vdupq_n_s32(1);
const float32x4_t fours = vdupq_n_f32(4.0f);
const float32x4_t rem_code_phase_chips_reg = vdupq_n_f32(rem_code_phase_chips);
const float32x4_t code_phase_step_chips_reg = vdupq_n_f32(code_phase_step_chips);
_code_phase_step_chips = vld1q_dup_f32(&code_phase_step_chips); //load float to all four float values in float32x4_t register
__VOLK_ATTR_ALIGNED(16) int four_times_code_length_chips_minus1[4];
four_times_code_length_chips_minus1[0] = code_length_chips - 1;
four_times_code_length_chips_minus1[1] = code_length_chips - 1;
four_times_code_length_chips_minus1[2] = code_length_chips - 1;
four_times_code_length_chips_minus1[3] = code_length_chips - 1;
__VOLK_ATTR_ALIGNED(16) int32_t local_code_chip_index[4];
int32_t local_code_chip_index_;
__VOLK_ATTR_ALIGNED(16) int four_times_code_length_chips[4];
four_times_code_length_chips[0] = code_length_chips;
four_times_code_length_chips[1] = code_length_chips;
four_times_code_length_chips[2] = code_length_chips;
four_times_code_length_chips[3] = code_length_chips;
const int32x4_t zeros = vdupq_n_s32(0);
const float32x4_t code_length_chips_reg_f = vdupq_n_f32((float)code_length_chips);
const int32x4_t code_length_chips_reg_i = vdupq_n_s32((int32_t)code_length_chips);
int32x4_t local_code_chip_index_reg, aux_i, negatives, i;
float32x4_t aux, aux2, shifts_chips_reg, fi, c, j, cTrunc, base, indexn, reciprocal;
__VOLK_ATTR_ALIGNED(16) const float vec[4] = { 0.0f, 1.0f, 2.0f, 3.0f };
uint32x4_t igx;
reciprocal = vrecpeq_f32(code_length_chips_reg_f);
reciprocal = vmulq_f32(vrecpsq_f32(code_length_chips_reg_f, reciprocal), reciprocal);
reciprocal = vmulq_f32(vrecpsq_f32(code_length_chips_reg_f, reciprocal), reciprocal); // this refinement is required!
float32x4_t n0 = vld1q_f32((float*)vec);
_code_length_chips = vld1q_s32((int32_t*)&four_times_code_length_chips); //load float to all four float values in float32x4_t register
_code_length_chips_minus1 = vld1q_s32((int32_t*)&four_times_code_length_chips_minus1); //load float to all four float values in float32x4_t register
int32x4_t _code_phase_out_int, _code_phase_out_int_neg, _code_phase_out_int_over;
uint32x4_t negative_indexes, overflow_indexes;
int32x4_t zero = vmovq_n_s32(0);
__VOLK_ATTR_ALIGNED(16) float init_idx_float[4] = { 0.0f, 1.0f, 2.0f, 3.0f };
float32x4_t _4output_index = vld1q_f32(init_idx_float);
__VOLK_ATTR_ALIGNED(16) float init_4constant_float[4] = { 4.0f, 4.0f, 4.0f, 4.0f };
float32x4_t _4constant_float = vld1q_f32(init_4constant_float);
int current_vector = 0;
int sample_idx = 0;
for(number = 0; number < quarterPoints; number++)
for (int current_correlator_tap = 0; current_correlator_tap < num_out_vectors; current_correlator_tap++)
{
//common to all outputs
_code_phase_out = vmulq_f32(_code_phase_step_chips, _4output_index); //compute the code phase point with the phase step
//output vector dependant (different code phase offset)
for(current_vector = 0; current_vector < num_out_vectors; current_vector++)
shifts_chips_reg = vdupq_n_f32((float)shifts_chips[current_correlator_tap]);
aux2 = vsubq_f32(shifts_chips_reg, rem_code_phase_chips_reg);
indexn = n0;
for(unsigned int n = 0; n < neon_iters; n++)
{
tmp_rem_code_phase_chips = rem_code_phase_chips[current_vector] - 0.5f; // adjust offset to perform correct rounding (chip transition at 0)
_rem_code_phase = vld1q_dup_f32(&tmp_rem_code_phase_chips); //load float to all four float values in float32x4_t register
__builtin_prefetch(&_result[current_correlator_tap][4 * n + 3], 1, 0);
__builtin_prefetch(&local_code_chip_index[4]);
aux = vmulq_f32(code_phase_step_chips_reg, indexn);
aux = vaddq_f32(aux, aux2);
_code_phase_out_with_offset = vaddq_f32(_code_phase_out, _rem_code_phase); //add the phase offset
//_code_phase_out_int = _mm_cvtps_epi32(_code_phase_out_with_offset); //convert to integer
sign = vcvtq_f32_u32((vshrq_n_u32(vreinterpretq_u32_f32(_code_phase_out_with_offset), 31)));
PlusHalf = vaddq_f32(_code_phase_out_with_offset, half);
Round = vsubq_f32(PlusHalf, sign);
_code_phase_out_int = vcvtq_s32_f32(Round);
//floor
i = vcvtq_s32_f32(aux);
fi = vcvtq_f32_s32(i);
igx = vcgtq_f32(fi, aux);
j = vcvtq_f32_s32(vandq_s32(vreinterpretq_s32_u32(igx), ones));
aux = vsubq_f32(fi, j);
negative_indexes = vcltq_s32(_code_phase_out_int, zero); //test for negative values
_code_phase_out_int_neg = vaddq_s32(_code_phase_out_int, _code_length_chips); //the negative values branch
_code_phase_out_int_neg = veorq_s32(_code_phase_out_int, vandq_s32( (int32x4_t)negative_indexes, veorq_s32( _code_phase_out_int_neg, _code_phase_out_int )));
// fmod
c = vmulq_f32(aux, reciprocal);
i = vcvtq_s32_f32(c);
cTrunc = vcvtq_f32_s32(i);
base = vmulq_f32(cTrunc, code_length_chips_reg_f);
aux = vsubq_f32(aux, base);
local_code_chip_index_reg = vcvtq_s32_f32(aux);
overflow_indexes = vcgtq_s32(_code_phase_out_int_neg, _code_length_chips_minus1); //test for overflow values
_code_phase_out_int_over = vsubq_s32(_code_phase_out_int_neg, _code_length_chips); //the negative values branch
_code_phase_out_int_over = veorq_s32(_code_phase_out_int_neg, vandq_s32( (int32x4_t)overflow_indexes, veorq_s32( _code_phase_out_int_over, _code_phase_out_int_neg )));
negatives = vreinterpretq_s32_u32(vcltq_s32(local_code_chip_index_reg, zeros));
aux_i = vandq_s32(code_length_chips_reg_i, negatives);
local_code_chip_index_reg = vaddq_s32(local_code_chip_index_reg, aux_i);
vst1q_s32((int32_t*)local_code_chip_index, _code_phase_out_int_over); // Store the results back
vst1q_s32((int32_t*)local_code_chip_index, local_code_chip_index_reg);
//todo: optimize the local code lookup table with intrinsics, if possible
_result[current_vector][sample_idx] = local_code[local_code_chip_index[0]];
_result[current_vector][sample_idx + 1] = local_code[local_code_chip_index[1]];
_result[current_vector][sample_idx + 2] = local_code[local_code_chip_index[2]];
_result[current_vector][sample_idx + 3] = local_code[local_code_chip_index[3]];
for(unsigned int k = 0; k < 4; ++k)
{
_result[current_correlator_tap][n * 4 + k] = local_code[local_code_chip_index[k]];
}
indexn = vaddq_f32(indexn, fours);
}
_4output_index = vaddq_f32(_4output_index, _4constant_float);
sample_idx += 4;
}
for(number = quarterPoints * 4; number < num_output_samples; number++)
{
for(current_vector = 0; current_vector < num_out_vectors; current_vector++)
for(unsigned int n = neon_iters * 4; n < num_points; n++)
{
local_code_chip_index[0] = (int)(code_phase_step_chips * (float)(number) + rem_code_phase_chips[current_vector]);
if (local_code_chip_index[0] < 0.0) local_code_chip_index[0] += code_length_chips - 1;
if (local_code_chip_index[0] > (code_length_chips - 1)) local_code_chip_index[0] -= code_length_chips;
_result[current_vector][number] = local_code[local_code_chip_index[0]];
__builtin_prefetch(&_result[current_correlator_tap][n], 1, 0);
// resample code for current tap
local_code_chip_index_ = (int)floor(code_phase_step_chips * (float)n + shifts_chips[current_correlator_tap] - rem_code_phase_chips);
//Take into account that in multitap correlators, the shifts can be negative!
if (local_code_chip_index_ < 0) local_code_chip_index_ += (int)code_length_chips * (abs(local_code_chip_index_) / code_length_chips + 1);
local_code_chip_index_ = local_code_chip_index_ % code_length_chips;
_result[current_correlator_tap][n] = local_code[local_code_chip_index_];
}
}
}
#endif /* LV_HAVE_NEON */
#endif
#endif /*INCLUDED_volk_gnsssdr_16ic_xn_resampler_16ic_xn_H*/

View File

@ -0,0 +1,376 @@
/*!
* \file volk_gnsssdr_16ic_xn_resampler_fast_16ic_xn.h
* \brief VOLK_GNSSSDR kernel: Resamples N 16 bits integer short complex vectors using zero hold resample algorithm.
* \authors <ul>
* <li> Javier Arribas, 2015. jarribas(at)cttc.es
* </ul>
*
* VOLK_GNSSSDR kernel that esamples N 16 bits integer short complex vectors using zero hold resample algorithm.
* It is optimized to resample a sigle GNSS local code signal replica into N vectors fractional-resampled and fractional-delayed
* (i.e. it creates the Early, Prompt, and Late code replicas)
*
* -------------------------------------------------------------------------
*
* Copyright (C) 2010-2015 (see AUTHORS file for a list of contributors)
*
* GNSS-SDR is a software defined Global Navigation
* Satellite Systems receiver
*
* This file is part of GNSS-SDR.
*
* GNSS-SDR is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GNSS-SDR is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNSS-SDR. If not, see <http://www.gnu.org/licenses/>.
*
* -------------------------------------------------------------------------
*/
/*!
* \page volk_gnsssdr_16ic_xn_resampler_fast_16ic_xn
*
* \b Overview
*
* Resamples a complex vector (16-bit integer each component), providing \p num_out_vectors outputs.
*
* <b>Dispatcher Prototype</b>
* \code
* void volk_gnsssdr_16ic_xn_resampler_fast_16ic_xn(lv_16sc_t** result, const lv_16sc_t* local_code, float* rem_code_phase_chips, float code_phase_step_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_output_samples)
* \endcode
*
* \b Inputs
* \li local_code: One of the vectors to be multiplied.
* \li rem_code_phase_chips: Remnant code phase [chips].
* \li code_phase_step_chips: Phase increment per sample [chips/sample].
* \li code_length_chips: Code length in chips.
* \li num_out_vectors Number of output vectors.
* \li num_output_samples: The number of data values to be in the resampled vector.
*
* \b Outputs
* \li result: Pointer to a vector of pointers where the results will be stored.
*
*/
#ifndef INCLUDED_volk_gnsssdr_16ic_xn_resampler_fast_16ic_xn_H
#define INCLUDED_volk_gnsssdr_16ic_xn_resampler_fast_16ic_xn_H
#include <math.h>
#include <volk_gnsssdr/volk_gnsssdr_common.h>
#include <volk_gnsssdr/volk_gnsssdr_complex.h>
#ifdef LV_HAVE_GENERIC
static inline void volk_gnsssdr_16ic_xn_resampler_fast_16ic_xn_generic(lv_16sc_t** result, const lv_16sc_t* local_code, float* rem_code_phase_chips, float code_phase_step_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_output_samples)
{
int local_code_chip_index;
//fesetround(FE_TONEAREST);
for (int current_vector = 0; current_vector < num_out_vectors; current_vector++)
{
for (unsigned int n = 0; n < num_output_samples; n++)
{
// resample code for current tap
local_code_chip_index = round(code_phase_step_chips * (float)(n) + rem_code_phase_chips[current_vector] - 0.5f);
if (local_code_chip_index < 0.0) local_code_chip_index += code_length_chips;
if (local_code_chip_index > (code_length_chips - 1)) local_code_chip_index -= code_length_chips;
//std::cout<<"g["<<n<<"]="<<code_phase_step_chips*static_cast<float>(n) + rem_code_phase_chips-0.5f<<","<<local_code_chip_index<<" ";
result[current_vector][n] = local_code[local_code_chip_index];
}
}
}
#endif /*LV_HAVE_GENERIC*/
#ifdef LV_HAVE_SSE2
#include <emmintrin.h>
static inline void volk_gnsssdr_16ic_xn_resampler_fast_16ic_xn_a_sse2(lv_16sc_t** result, const lv_16sc_t* local_code, float* rem_code_phase_chips ,float code_phase_step_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_output_samples)
{
_MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);//_MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO
unsigned int number;
const unsigned int quarterPoints = num_output_samples / 4;
lv_16sc_t** _result = result;
__VOLK_ATTR_ALIGNED(16) int local_code_chip_index[4];
float tmp_rem_code_phase_chips;
__m128 _rem_code_phase,_code_phase_step_chips;
__m128i _code_length_chips,_code_length_chips_minus1;
__m128 _code_phase_out,_code_phase_out_with_offset;
_code_phase_step_chips = _mm_load1_ps(&code_phase_step_chips); //load float to all four float values in m128 register
__VOLK_ATTR_ALIGNED(16) int four_times_code_length_chips_minus1[4];
four_times_code_length_chips_minus1[0] = code_length_chips - 1;
four_times_code_length_chips_minus1[1] = code_length_chips - 1;
four_times_code_length_chips_minus1[2] = code_length_chips - 1;
four_times_code_length_chips_minus1[3] = code_length_chips - 1;
__VOLK_ATTR_ALIGNED(16) int four_times_code_length_chips[4];
four_times_code_length_chips[0] = code_length_chips;
four_times_code_length_chips[1] = code_length_chips;
four_times_code_length_chips[2] = code_length_chips;
four_times_code_length_chips[3] = code_length_chips;
_code_length_chips = _mm_load_si128((__m128i*)&four_times_code_length_chips); //load float to all four float values in m128 register
_code_length_chips_minus1 = _mm_load_si128((__m128i*)&four_times_code_length_chips_minus1); //load float to all four float values in m128 register
__m128i negative_indexes, overflow_indexes,_code_phase_out_int, _code_phase_out_int_neg,_code_phase_out_int_over;
__m128i zero = _mm_setzero_si128();
__VOLK_ATTR_ALIGNED(16) float init_idx_float[4] = { 0.0f, 1.0f, 2.0f, 3.0f };
__m128 _4output_index = _mm_load_ps(init_idx_float);
__VOLK_ATTR_ALIGNED(16) float init_4constant_float[4] = { 4.0f, 4.0f, 4.0f, 4.0f };
__m128 _4constant_float = _mm_load_ps(init_4constant_float);
int current_vector = 0;
int sample_idx = 0;
for(number = 0; number < quarterPoints; number++)
{
//common to all outputs
_code_phase_out = _mm_mul_ps(_code_phase_step_chips, _4output_index); //compute the code phase point with the phase step
//output vector dependant (different code phase offset)
for(current_vector = 0; current_vector < num_out_vectors; current_vector++)
{
tmp_rem_code_phase_chips = rem_code_phase_chips[current_vector] - 0.5f; // adjust offset to perform correct rounding (chip transition at 0)
_rem_code_phase = _mm_load1_ps(&tmp_rem_code_phase_chips); //load float to all four float values in m128 register
_code_phase_out_with_offset = _mm_add_ps(_code_phase_out, _rem_code_phase); //add the phase offset
_code_phase_out_int = _mm_cvtps_epi32(_code_phase_out_with_offset); //convert to integer
negative_indexes = _mm_cmplt_epi32(_code_phase_out_int, zero); //test for negative values
_code_phase_out_int_neg = _mm_add_epi32(_code_phase_out_int, _code_length_chips); //the negative values branch
_code_phase_out_int_neg = _mm_xor_si128(_code_phase_out_int, _mm_and_si128( negative_indexes, _mm_xor_si128( _code_phase_out_int_neg, _code_phase_out_int )));
overflow_indexes = _mm_cmpgt_epi32(_code_phase_out_int_neg, _code_length_chips_minus1); //test for overflow values
_code_phase_out_int_over = _mm_sub_epi32(_code_phase_out_int_neg, _code_length_chips); //the negative values branch
_code_phase_out_int_over = _mm_xor_si128(_code_phase_out_int_neg, _mm_and_si128( overflow_indexes, _mm_xor_si128( _code_phase_out_int_over, _code_phase_out_int_neg )));
_mm_store_si128((__m128i*)local_code_chip_index, _code_phase_out_int_over); // Store the results back
//todo: optimize the local code lookup table with intrinsics, if possible
_result[current_vector][sample_idx] = local_code[local_code_chip_index[0]];
_result[current_vector][sample_idx + 1] = local_code[local_code_chip_index[1]];
_result[current_vector][sample_idx + 2] = local_code[local_code_chip_index[2]];
_result[current_vector][sample_idx + 3] = local_code[local_code_chip_index[3]];
}
_4output_index = _mm_add_ps(_4output_index, _4constant_float);
sample_idx += 4;
}
for(number = quarterPoints * 4; number < num_output_samples; number++)
{
for(current_vector = 0; current_vector < num_out_vectors; current_vector++)
{
local_code_chip_index[0] = (int)(code_phase_step_chips * (float)(number) + rem_code_phase_chips[current_vector]);
if (local_code_chip_index[0] < 0.0) local_code_chip_index[0] += code_length_chips - 1;
if (local_code_chip_index[0] > (code_length_chips - 1)) local_code_chip_index[0] -= code_length_chips;
_result[current_vector][number] = local_code[local_code_chip_index[0]];
}
}
}
#endif /* LV_HAVE_SSE2 */
#ifdef LV_HAVE_SSE2
#include <emmintrin.h>
static inline void volk_gnsssdr_16ic_xn_resampler_fast_16ic_xn_u_sse2(lv_16sc_t** result, const lv_16sc_t* local_code, float* rem_code_phase_chips ,float code_phase_step_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_output_samples)
{
_MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);//_MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO
unsigned int number;
const unsigned int quarterPoints = num_output_samples / 4;
lv_16sc_t** _result = result;
__VOLK_ATTR_ALIGNED(16) int local_code_chip_index[4];
float tmp_rem_code_phase_chips;
__m128 _rem_code_phase,_code_phase_step_chips;
__m128i _code_length_chips,_code_length_chips_minus1;
__m128 _code_phase_out,_code_phase_out_with_offset;
_code_phase_step_chips = _mm_load1_ps(&code_phase_step_chips); //load float to all four float values in m128 register
__VOLK_ATTR_ALIGNED(16) int four_times_code_length_chips_minus1[4];
four_times_code_length_chips_minus1[0] = code_length_chips - 1;
four_times_code_length_chips_minus1[1] = code_length_chips - 1;
four_times_code_length_chips_minus1[2] = code_length_chips - 1;
four_times_code_length_chips_minus1[3] = code_length_chips - 1;
__VOLK_ATTR_ALIGNED(16) int four_times_code_length_chips[4];
four_times_code_length_chips[0] = code_length_chips;
four_times_code_length_chips[1] = code_length_chips;
four_times_code_length_chips[2] = code_length_chips;
four_times_code_length_chips[3] = code_length_chips;
_code_length_chips = _mm_loadu_si128((__m128i*)&four_times_code_length_chips); //load float to all four float values in m128 register
_code_length_chips_minus1 = _mm_loadu_si128((__m128i*)&four_times_code_length_chips_minus1); //load float to all four float values in m128 register
__m128i negative_indexes, overflow_indexes,_code_phase_out_int, _code_phase_out_int_neg,_code_phase_out_int_over;
__m128i zero = _mm_setzero_si128();
__VOLK_ATTR_ALIGNED(16) float init_idx_float[4] = { 0.0f, 1.0f, 2.0f, 3.0f };
__m128 _4output_index = _mm_loadu_ps(init_idx_float);
__VOLK_ATTR_ALIGNED(16) float init_4constant_float[4] = { 4.0f, 4.0f, 4.0f, 4.0f };
__m128 _4constant_float = _mm_loadu_ps(init_4constant_float);
int current_vector = 0;
int sample_idx = 0;
for(number = 0; number < quarterPoints; number++)
{
//common to all outputs
_code_phase_out = _mm_mul_ps(_code_phase_step_chips, _4output_index); //compute the code phase point with the phase step
//output vector dependant (different code phase offset)
for(current_vector = 0; current_vector < num_out_vectors; current_vector++)
{
tmp_rem_code_phase_chips = rem_code_phase_chips[current_vector] - 0.5f; // adjust offset to perform correct rounding (chip transition at 0)
_rem_code_phase = _mm_load1_ps(&tmp_rem_code_phase_chips); //load float to all four float values in m128 register
_code_phase_out_with_offset = _mm_add_ps(_code_phase_out, _rem_code_phase); //add the phase offset
_code_phase_out_int = _mm_cvtps_epi32(_code_phase_out_with_offset); //convert to integer
negative_indexes = _mm_cmplt_epi32(_code_phase_out_int, zero); //test for negative values
_code_phase_out_int_neg = _mm_add_epi32(_code_phase_out_int, _code_length_chips); //the negative values branch
_code_phase_out_int_neg = _mm_xor_si128(_code_phase_out_int, _mm_and_si128( negative_indexes, _mm_xor_si128( _code_phase_out_int_neg, _code_phase_out_int )));
overflow_indexes = _mm_cmpgt_epi32(_code_phase_out_int_neg, _code_length_chips_minus1); //test for overflow values
_code_phase_out_int_over = _mm_sub_epi32(_code_phase_out_int_neg, _code_length_chips); //the negative values branch
_code_phase_out_int_over = _mm_xor_si128(_code_phase_out_int_neg, _mm_and_si128( overflow_indexes, _mm_xor_si128( _code_phase_out_int_over, _code_phase_out_int_neg )));
_mm_storeu_si128((__m128i*)local_code_chip_index, _code_phase_out_int_over); // Store the results back
//todo: optimize the local code lookup table with intrinsics, if possible
_result[current_vector][sample_idx] = local_code[local_code_chip_index[0]];
_result[current_vector][sample_idx + 1] = local_code[local_code_chip_index[1]];
_result[current_vector][sample_idx + 2] = local_code[local_code_chip_index[2]];
_result[current_vector][sample_idx + 3] = local_code[local_code_chip_index[3]];
}
_4output_index = _mm_add_ps(_4output_index, _4constant_float);
sample_idx += 4;
}
for(number = quarterPoints * 4; number < num_output_samples; number++)
{
for(current_vector = 0; current_vector < num_out_vectors; current_vector++)
{
local_code_chip_index[0] = (int)(code_phase_step_chips * (float)(number) + rem_code_phase_chips[current_vector]);
if (local_code_chip_index[0] < 0.0) local_code_chip_index[0] += code_length_chips - 1;
if (local_code_chip_index[0] > (code_length_chips - 1)) local_code_chip_index[0] -= code_length_chips;
_result[current_vector][number] = local_code[local_code_chip_index[0]];
}
}
}
#endif /* LV_HAVE_SSE2 */
#ifdef LV_HAVE_NEON
#include <arm_neon.h>
static inline void volk_gnsssdr_16ic_xn_resampler_fast_16ic_xn_neon(lv_16sc_t** result, const lv_16sc_t* local_code, float* rem_code_phase_chips ,float code_phase_step_chips, unsigned int code_length_chips, int num_out_vectors, unsigned int num_output_samples)
{
unsigned int number;
const unsigned int quarterPoints = num_output_samples / 4;
float32x4_t half = vdupq_n_f32(0.5f);
lv_16sc_t** _result = result;
__VOLK_ATTR_ALIGNED(16) int local_code_chip_index[4];
float tmp_rem_code_phase_chips;
float32x4_t _rem_code_phase, _code_phase_step_chips;
int32x4_t _code_length_chips, _code_length_chips_minus1;
float32x4_t _code_phase_out, _code_phase_out_with_offset;
float32x4_t sign, PlusHalf, Round;
_code_phase_step_chips = vld1q_dup_f32(&code_phase_step_chips); //load float to all four float values in float32x4_t register
__VOLK_ATTR_ALIGNED(16) int four_times_code_length_chips_minus1[4];
four_times_code_length_chips_minus1[0] = code_length_chips - 1;
four_times_code_length_chips_minus1[1] = code_length_chips - 1;
four_times_code_length_chips_minus1[2] = code_length_chips - 1;
four_times_code_length_chips_minus1[3] = code_length_chips - 1;
__VOLK_ATTR_ALIGNED(16) int four_times_code_length_chips[4];
four_times_code_length_chips[0] = code_length_chips;
four_times_code_length_chips[1] = code_length_chips;
four_times_code_length_chips[2] = code_length_chips;
four_times_code_length_chips[3] = code_length_chips;
_code_length_chips = vld1q_s32((int32_t*)&four_times_code_length_chips); //load float to all four float values in float32x4_t register
_code_length_chips_minus1 = vld1q_s32((int32_t*)&four_times_code_length_chips_minus1); //load float to all four float values in float32x4_t register
int32x4_t _code_phase_out_int, _code_phase_out_int_neg, _code_phase_out_int_over;
uint32x4_t negative_indexes, overflow_indexes;
int32x4_t zero = vmovq_n_s32(0);
__VOLK_ATTR_ALIGNED(16) float init_idx_float[4] = { 0.0f, 1.0f, 2.0f, 3.0f };
float32x4_t _4output_index = vld1q_f32(init_idx_float);
__VOLK_ATTR_ALIGNED(16) float init_4constant_float[4] = { 4.0f, 4.0f, 4.0f, 4.0f };
float32x4_t _4constant_float = vld1q_f32(init_4constant_float);
int current_vector = 0;
int sample_idx = 0;
for(number = 0; number < quarterPoints; number++)
{
//common to all outputs
_code_phase_out = vmulq_f32(_code_phase_step_chips, _4output_index); //compute the code phase point with the phase step
//output vector dependant (different code phase offset)
for(current_vector = 0; current_vector < num_out_vectors; current_vector++)
{
tmp_rem_code_phase_chips = rem_code_phase_chips[current_vector] - 0.5f; // adjust offset to perform correct rounding (chip transition at 0)
_rem_code_phase = vld1q_dup_f32(&tmp_rem_code_phase_chips); //load float to all four float values in float32x4_t register
_code_phase_out_with_offset = vaddq_f32(_code_phase_out, _rem_code_phase); //add the phase offset
//_code_phase_out_int = _mm_cvtps_epi32(_code_phase_out_with_offset); //convert to integer
sign = vcvtq_f32_u32((vshrq_n_u32(vreinterpretq_u32_f32(_code_phase_out_with_offset), 31)));
PlusHalf = vaddq_f32(_code_phase_out_with_offset, half);
Round = vsubq_f32(PlusHalf, sign);
_code_phase_out_int = vcvtq_s32_f32(Round);
negative_indexes = vcltq_s32(_code_phase_out_int, zero); //test for negative values
_code_phase_out_int_neg = vaddq_s32(_code_phase_out_int, _code_length_chips); //the negative values branch
_code_phase_out_int_neg = veorq_s32(_code_phase_out_int, vandq_s32( (int32x4_t)negative_indexes, veorq_s32( _code_phase_out_int_neg, _code_phase_out_int )));
overflow_indexes = vcgtq_s32(_code_phase_out_int_neg, _code_length_chips_minus1); //test for overflow values
_code_phase_out_int_over = vsubq_s32(_code_phase_out_int_neg, _code_length_chips); //the negative values branch
_code_phase_out_int_over = veorq_s32(_code_phase_out_int_neg, vandq_s32( (int32x4_t)overflow_indexes, veorq_s32( _code_phase_out_int_over, _code_phase_out_int_neg )));
vst1q_s32((int32_t*)local_code_chip_index, _code_phase_out_int_over); // Store the results back
//todo: optimize the local code lookup table with intrinsics, if possible
_result[current_vector][sample_idx] = local_code[local_code_chip_index[0]];
_result[current_vector][sample_idx + 1] = local_code[local_code_chip_index[1]];
_result[current_vector][sample_idx + 2] = local_code[local_code_chip_index[2]];
_result[current_vector][sample_idx + 3] = local_code[local_code_chip_index[3]];
}
_4output_index = vaddq_f32(_4output_index, _4constant_float);
sample_idx += 4;
}
for(number = quarterPoints * 4; number < num_output_samples; number++)
{
for(current_vector = 0; current_vector < num_out_vectors; current_vector++)
{
local_code_chip_index[0] = (int)(code_phase_step_chips * (float)(number) + rem_code_phase_chips[current_vector]);
if (local_code_chip_index[0] < 0.0) local_code_chip_index[0] += code_length_chips - 1;
if (local_code_chip_index[0] > (code_length_chips - 1)) local_code_chip_index[0] -= code_length_chips;
_result[current_vector][number] = local_code[local_code_chip_index[0]];
}
}
}
#endif /* LV_HAVE_NEON */
#endif /*INCLUDED_volk_gnsssdr_16ic_xn_resampler_fast_16ic_xn_H*/

View File

@ -85,9 +85,9 @@ std::vector<volk_gnsssdr_test_case_t> init_test_list(volk_gnsssdr_test_params_t
(VOLK_INIT_TEST(volk_gnsssdr_16ic_convert_32fc, test_params_more_iters))
(VOLK_INIT_PUPP(volk_gnsssdr_s32f_sincospuppet_32fc, volk_gnsssdr_s32f_sincos_32fc, test_params_inacc2))
(VOLK_INIT_PUPP(volk_gnsssdr_16ic_rotatorpuppet_16ic, volk_gnsssdr_16ic_s32fc_x2_rotator_16ic, test_params_int1))
(VOLK_INIT_PUPP(volk_gnsssdr_16ic_resamplerpuppet_16ic, volk_gnsssdr_16ic_resampler_16ic, test_params))
(VOLK_INIT_PUPP(volk_gnsssdr_16ic_resamplerfastpuppet_16ic, volk_gnsssdr_16ic_resampler_fast_16ic, test_params))
(VOLK_INIT_PUPP(volk_gnsssdr_16ic_resamplerfastxnpuppet_16ic, volk_gnsssdr_16ic_xn_resampler_fast_16ic_xn, test_params))
(VOLK_INIT_PUPP(volk_gnsssdr_16ic_resamplerxnpuppet_16ic, volk_gnsssdr_16ic_xn_resampler_16ic_xn, test_params))
(VOLK_INIT_PUPP(volk_gnsssdr_16ic_resamplerxnpuppet2_16ic, volk_gnsssdr_16ic_xn_resampler2_16ic_xn, test_params))
(VOLK_INIT_PUPP(volk_gnsssdr_32fc_resamplerxnpuppet_32fc, volk_gnsssdr_32fc_xn_resampler_32fc_xn, test_params))
(VOLK_INIT_PUPP(volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic, volk_gnsssdr_16ic_x2_dot_prod_16ic_xn, test_params))
(VOLK_INIT_PUPP(volk_gnsssdr_16ic_x2_rotator_dotprodxnpuppet_16ic, volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn, test_params_int16))

View File

@ -85,7 +85,7 @@ void cpu_multicorrelator_16sc::update_local_code(int correlator_length_samples,
d_tmp_code_phases_chips[n] = d_shifts_chips[n] - rem_code_phase_chips;
}
volk_gnsssdr_16ic_xn_resampler_16ic_xn(d_local_codes_resampled,
volk_gnsssdr_16ic_xn_resampler_fast_16ic_xn(d_local_codes_resampled,
d_local_code_in,
d_tmp_code_phases_chips,
code_phase_step_chips,