80#ifndef INCLUDE_VOLK_VOLK_NEON_INTRINSICS_H_
81#define INCLUDE_VOLK_VOLK_NEON_INTRINSICS_H_
88 float32x4_t iValue, qValue, result;
89 iValue = vmulq_f32(cmplxValue.val[0], cmplxValue.val[0]);
90 qValue = vmulq_f32(cmplxValue.val[1], cmplxValue.val[1]);
91 result = vaddq_f32(iValue, qValue);
98 float32x4_t sqrt_reciprocal = vrsqrteq_f32(x);
99 sqrt_reciprocal = vmulq_f32(
100 vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
101 sqrt_reciprocal = vmulq_f32(
102 vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
104 return sqrt_reciprocal;
111 float32x4_t recip = vrecpeq_f32(x);
112 recip = vmulq_f32(vrecpsq_f32(x, recip), recip);
113 recip = vmulq_f32(vrecpsq_f32(x, recip), recip);
121 float32x4x2_t tmp_real;
122 float32x4x2_t tmp_imag;
127 tmp_real.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
129 tmp_real.val[1] = vmulq_f32(a_val.val[1], b_val.val[1]);
132 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[1]);
134 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
136 c_val.val[0] = vsubq_f32(tmp_real.val[0], tmp_real.val[1]);
137 c_val.val[1] = vaddq_f32(tmp_imag.val[0], tmp_imag.val[1]);
144 float32x4_t cA = vmlaq_f32(coeffs[0], coeffs[4], x);
145 float32x4_t cB = vmlaq_f32(coeffs[2], coeffs[6], x);
146 float32x4_t cC = vmlaq_f32(coeffs[1], coeffs[5], x);
147 float32x4_t cD = vmlaq_f32(coeffs[3], coeffs[7], x);
148 float32x4_t x2 = vmulq_f32(x, x);
149 float32x4_t x4 = vmulq_f32(x2, x2);
150 float32x4_t res = vmlaq_f32(vmlaq_f32(cA, cB, x2), vmlaq_f32(cC, cD, x2), x4);
158 const float32x4_t log_tab[8] = {
159 vdupq_n_f32(-2.29561495781f), vdupq_n_f32(-2.47071170807f),
160 vdupq_n_f32(-5.68692588806f), vdupq_n_f32(-0.165253549814f),
161 vdupq_n_f32(5.17591238022f), vdupq_n_f32(0.844007015228f),
162 vdupq_n_f32(4.58445882797f), vdupq_n_f32(0.0141278216615f),
165 const int32x4_t CONST_127 = vdupq_n_s32(127);
166 const float32x4_t CONST_LN2 = vdupq_n_f32(0.6931471805f);
169 int32x4_t m = vsubq_s32(
170 vreinterpretq_s32_u32(vshrq_n_u32(vreinterpretq_u32_f32(x), 23)), CONST_127);
172 vreinterpretq_f32_s32(vsubq_s32(vreinterpretq_s32_f32(x), vshlq_n_s32(m, 23)));
178 poly = vmlaq_f32(poly, vcvtq_f32_s32(m), CONST_LN2);
188 const float32x4_t c_minus_cephes_DP1 = vdupq_n_f32(-0.78515625);
189 const float32x4_t c_minus_cephes_DP2 = vdupq_n_f32(-2.4187564849853515625e-4);
190 const float32x4_t c_minus_cephes_DP3 = vdupq_n_f32(-3.77489497744594108e-8);
191 const float32x4_t c_sincof_p0 = vdupq_n_f32(-1.9515295891e-4);
192 const float32x4_t c_sincof_p1 = vdupq_n_f32(8.3321608736e-3);
193 const float32x4_t c_sincof_p2 = vdupq_n_f32(-1.6666654611e-1);
194 const float32x4_t c_coscof_p0 = vdupq_n_f32(2.443315711809948e-005);
195 const float32x4_t c_coscof_p1 = vdupq_n_f32(-1.388731625493765e-003);
196 const float32x4_t c_coscof_p2 = vdupq_n_f32(4.166664568298827e-002);
197 const float32x4_t c_cephes_FOPI = vdupq_n_f32(1.27323954473516);
199 const float32x4_t CONST_1 = vdupq_n_f32(1.f);
200 const float32x4_t CONST_1_2 = vdupq_n_f32(0.5f);
201 const float32x4_t CONST_0 = vdupq_n_f32(0.f);
202 const uint32x4_t CONST_2 = vdupq_n_u32(2);
203 const uint32x4_t CONST_4 = vdupq_n_u32(4);
207 uint32x4_t sign_mask_sin, sign_mask_cos;
208 sign_mask_sin = vcltq_f32(x, CONST_0);
211 float32x4_t y = vmulq_f32(x, c_cephes_FOPI);
214 emm2 = vcvtq_u32_f32(y);
216 emm2 = vaddq_u32(emm2, vdupq_n_u32(1));
217 emm2 = vandq_u32(emm2, vdupq_n_u32(~1));
218 y = vcvtq_f32_u32(emm2);
224 const uint32x4_t poly_mask = vtstq_u32(emm2, CONST_2);
227 x = vmlaq_f32(x, y, c_minus_cephes_DP1);
228 x = vmlaq_f32(x, y, c_minus_cephes_DP2);
229 x = vmlaq_f32(x, y, c_minus_cephes_DP3);
231 sign_mask_sin = veorq_u32(sign_mask_sin, vtstq_u32(emm2, CONST_4));
232 sign_mask_cos = vtstq_u32(vsubq_u32(emm2, CONST_2), CONST_4);
237 float32x4_t z = vmulq_f32(x, x);
239 y1 = vmlaq_f32(c_coscof_p1, z, c_coscof_p0);
240 y1 = vmlaq_f32(c_coscof_p2, z, y1);
241 y1 = vmulq_f32(y1, z);
242 y1 = vmulq_f32(y1, z);
243 y1 = vmlsq_f32(y1, z, CONST_1_2);
244 y1 = vaddq_f32(y1, CONST_1);
246 y2 = vmlaq_f32(c_sincof_p1, z, c_sincof_p0);
247 y2 = vmlaq_f32(c_sincof_p2, z, y2);
248 y2 = vmulq_f32(y2, z);
249 y2 = vmlaq_f32(x, x, y2);
252 const float32x4_t ys = vbslq_f32(poly_mask, y1, y2);
253 const float32x4_t yc = vbslq_f32(poly_mask, y2, y1);
255 float32x4x2_t sincos;
256 sincos.val[0] = vbslq_f32(sign_mask_sin, vnegq_f32(ys), ys);
257 sincos.val[1] = vbslq_f32(sign_mask_cos, yc, vnegq_f32(yc));
265 return sincos.val[0];
271 return sincos.val[1];
277 return vmulq_f32(sincos.val[0],
_vinvq_f32(sincos.val[1]));
286 aux = vmulq_f32(aux,
val);
287 aux = vsubq_f32(aux, acc);
288 aux = vmulq_f32(aux, aux);
290 return vfmaq_f32(sq_acc, aux, rec);
292 aux = vmulq_f32(aux, rec);
293 return vaddq_f32(sq_acc, aux);
val
Definition: volk_arch_defs.py:66
static float32x4_t _vinvsqrtq_f32(float32x4_t x)
Definition: volk_neon_intrinsics.h:96
static float32x4_t _vinvq_f32(float32x4_t x)
Definition: volk_neon_intrinsics.h:108
static float32x4x2_t _vmultiply_complexq_f32(float32x4x2_t a_val, float32x4x2_t b_val)
Definition: volk_neon_intrinsics.h:118
static float32x4_t _vsinq_f32(float32x4_t x)
Definition: volk_neon_intrinsics.h:262
static float32x4_t _vlogq_f32(float32x4_t x)
Definition: volk_neon_intrinsics.h:156
static float32x4_t _vcosq_f32(float32x4_t x)
Definition: volk_neon_intrinsics.h:268
static float32x4_t _vmagnitudesquaredq_f32(float32x4x2_t cmplxValue)
Definition: volk_neon_intrinsics.h:86
static float32x4_t _neon_accumulate_square_sum_f32(float32x4_t sq_acc, float32x4_t acc, float32x4_t val, float32x4_t rec, float32x4_t aux)
Definition: volk_neon_intrinsics.h:280
static float32x4x2_t _vsincosq_f32(float32x4_t x)
Definition: volk_neon_intrinsics.h:186
static float32x4_t _vtanq_f32(float32x4_t x)
Definition: volk_neon_intrinsics.h:274
static float32x4_t _vtaylor_polyq_f32(float32x4_t x, const float32x4_t coeffs[8])
Definition: volk_neon_intrinsics.h:142