Переструктурирован проект. Сурсы вынесены за этот гит. Здесь осталась только оболочка

также был тест компилятора mingw, следующие ошибки:
- переопределение __INT64_C, __UINT64_C
- и еше какие-то проблемы с intmax_t uintmax_t
This commit is contained in:
2025-03-25 13:26:09 +03:00
parent a043335d9b
commit 4556b453db
1453 changed files with 359122 additions and 280 deletions

View File

@@ -0,0 +1,200 @@
/******************************************************************************
* @file arm_sorting.h
* @brief Private header file for CMSIS DSP Library
* @version V1.7.0
* @date 2019
******************************************************************************/
/*
* Copyright (c) 2010-2019 Arm Limited or its affiliates. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _ARM_SORTING_H_
#define _ARM_SORTING_H_
#include "arm_math.h"
#ifdef __cplusplus
extern "C"
{
#endif
/**
* @param[in] S points to an instance of the sorting structure.
* @param[in] pSrc points to the block of input data.
* @param[out] pDst points to the block of output data.
* @param[in] blockSize number of samples to process.
*/
void arm_bubble_sort_f32(
const arm_sort_instance_f32 * S,
float32_t * pSrc,
float32_t * pDst,
uint32_t blockSize);
/**
* @param[in] S points to an instance of the sorting structure.
* @param[in] pSrc points to the block of input data.
* @param[out] pDst points to the block of output data.
* @param[in] blockSize number of samples to process.
*/
void arm_heap_sort_f32(
const arm_sort_instance_f32 * S,
float32_t * pSrc,
float32_t * pDst,
uint32_t blockSize);
/**
* @param[in] S points to an instance of the sorting structure.
* @param[in] pSrc points to the block of input data.
* @param[out] pDst points to the block of output data.
* @param[in] blockSize number of samples to process.
*/
void arm_insertion_sort_f32(
const arm_sort_instance_f32 * S,
float32_t *pSrc,
float32_t* pDst,
uint32_t blockSize);
/**
* @param[in] S points to an instance of the sorting structure.
* @param[in] pSrc points to the block of input data.
* @param[out] pDst points to the block of output data
* @param[in] blockSize number of samples to process.
*/
void arm_quick_sort_f32(
const arm_sort_instance_f32 * S,
float32_t * pSrc,
float32_t * pDst,
uint32_t blockSize);
/**
* @param[in] S points to an instance of the sorting structure.
* @param[in] pSrc points to the block of input data.
* @param[out] pDst points to the block of output data
* @param[in] blockSize number of samples to process.
*/
void arm_selection_sort_f32(
const arm_sort_instance_f32 * S,
float32_t * pSrc,
float32_t * pDst,
uint32_t blockSize);
/**
* @param[in] S points to an instance of the sorting structure.
* @param[in] pSrc points to the block of input data.
* @param[out] pDst points to the block of output data
* @param[in] blockSize number of samples to process.
*/
void arm_bitonic_sort_f32(
const arm_sort_instance_f32 * S,
float32_t * pSrc,
float32_t * pDst,
uint32_t blockSize);
#if defined(ARM_MATH_NEON)
#define vtrn256_128q(a, b) \
do { \
float32x4_t vtrn128_temp = a.val[1]; \
a.val[1] = b.val[0]; \
b.val[0] = vtrn128_temp ; \
} while (0)
#define vtrn128_64q(a, b) \
do { \
float32x2_t ab, cd, ef, gh; \
ab = vget_low_f32(a); \
ef = vget_low_f32(b); \
cd = vget_high_f32(a); \
gh = vget_high_f32(b); \
a = vcombine_f32(ab, ef); \
b = vcombine_f32(cd, gh); \
} while (0)
#define vtrn256_64q(a, b) \
do { \
float32x2_t a_0, a_1, a_2, a_3; \
float32x2_t b_0, b_1, b_2, b_3; \
a_0 = vget_low_f32(a.val[0]); \
a_1 = vget_high_f32(a.val[0]); \
a_2 = vget_low_f32(a.val[1]); \
a_3 = vget_high_f32(a.val[1]); \
b_0 = vget_low_f32(b.val[0]); \
b_1 = vget_high_f32(b.val[0]); \
b_2 = vget_low_f32(b.val[1]); \
b_3 = vget_high_f32(b.val[1]); \
a.val[0] = vcombine_f32(a_0, b_0); \
a.val[1] = vcombine_f32(a_2, b_2); \
b.val[0] = vcombine_f32(a_1, b_1); \
b.val[1] = vcombine_f32(a_3, b_3); \
} while (0)
#define vtrn128_32q(a, b) \
do { \
float32x4x2_t vtrn32_tmp = vtrnq_f32((a), (b)); \
(a) = vtrn32_tmp.val[0]; \
(b) = vtrn32_tmp.val[1]; \
} while (0)
#define vtrn256_32q(a, b) \
do { \
float32x4x2_t vtrn32_tmp_1 = vtrnq_f32((a.val[0]), (b.val[0])); \
float32x4x2_t vtrn32_tmp_2 = vtrnq_f32((a.val[1]), (b.val[1])); \
a.val[0] = vtrn32_tmp_1.val[0]; \
a.val[1] = vtrn32_tmp_2.val[0]; \
b.val[0] = vtrn32_tmp_1.val[1]; \
b.val[1] = vtrn32_tmp_2.val[1]; \
} while (0)
#define vminmaxq(a, b) \
do { \
float32x4_t minmax_tmp = (a); \
(a) = vminq_f32((a), (b)); \
(b) = vmaxq_f32(minmax_tmp, (b)); \
} while (0)
#define vminmax256q(a, b) \
do { \
float32x4x2_t minmax256_tmp = (a); \
a.val[0] = vminq_f32(a.val[0], b.val[0]); \
a.val[1] = vminq_f32(a.val[1], b.val[1]); \
b.val[0] = vmaxq_f32(minmax256_tmp.val[0], b.val[0]); \
b.val[1] = vmaxq_f32(minmax256_tmp.val[1], b.val[1]); \
} while (0)
#define vrev128q_f32(a) \
vcombine_f32(vrev64_f32(vget_high_f32(a)), vrev64_f32(vget_low_f32(a)))
#define vrev256q_f32(a) \
do { \
float32x4_t rev_tmp = vcombine_f32(vrev64_f32(vget_high_f32(a.val[0])), vrev64_f32(vget_low_f32(a.val[0]))); \
a.val[0] = vcombine_f32(vrev64_f32(vget_high_f32(a.val[1])), vrev64_f32(vget_low_f32(a.val[1]))); \
a.val[1] = rev_tmp; \
} while (0)
#define vldrev128q_f32(a, p) \
do { \
a = vld1q_f32(p); \
a = vrev128q_f32(a); \
} while (0)
#endif /* ARM_MATH_NEON */
#ifdef __cplusplus
}
#endif
#endif /* _ARM_SORTING_H */

View File

@@ -0,0 +1,325 @@
/******************************************************************************
* @file arm_vec_fft.h
* @brief Private header file for CMSIS DSP Library
* @version V1.7.0
* @date 07. January 2020
******************************************************************************/
/*
* Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _ARM_VEC_FFT_H_
#define _ARM_VEC_FFT_H_
#include "arm_math.h"
#include "arm_helium_utils.h"
#ifdef __cplusplus
extern "C"
{
#endif
#if (defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE)
#define MVE_CMPLX_ADD_A_ixB(A, B) vcaddq_rot90(A,B)
#define MVE_CMPLX_SUB_A_ixB(A,B) vcaddq_rot270(A,B)
#define MVE_CMPLX_MULT_FLT_AxB(A,B) vcmlaq_rot90(vcmulq(A, B), A, B)
#define MVE_CMPLX_MULT_FLT_Conj_AxB(A,B) vcmlaq_rot270(vcmulq(A, B), A, B)
#define MVE_CMPLX_MULT_FX_AxB(A,B,TyA) vqdmladhxq(vqdmlsdhq((TyA)vuninitializedq_s32(), A, B), A, B)
#define MVE_CMPLX_MULT_FX_AxConjB(A,B,TyA) vqdmladhq(vqdmlsdhxq((TyA)vuninitializedq_s32(), A, B), A, B)
#define MVE_CMPLX_ADD_FX_A_ixB(A, B) vhcaddq_rot90(A,B)
#define MVE_CMPLX_SUB_FX_A_ixB(A,B) vhcaddq_rot270(A,B)
/**
@brief In-place 32 bit reversal function for helium
@param[in,out] pSrc points to in-place buffer of unknown 32-bit data type
@param[in] bitRevLen bit reversal table length
@param[in] pBitRevTab points to bit reversal table
@return none
*/
__STATIC_INLINE void arm_bitreversal_32_inpl_mve(
uint32_t *pSrc,
const uint16_t bitRevLen,
const uint16_t *pBitRevTab)
{
uint64_t *src = (uint64_t *) pSrc;
int32_t blkCnt; /* loop counters */
uint32x4_t bitRevTabOff;
uint32x4_t one = vdupq_n_u32(1);
uint64x2_t inLow, inHigh;
uint64x2_t bitRevOff1Low, bitRevOff0Low;
uint64x2_t bitRevOff1High, bitRevOff0High;
/* load scheduling to increase gather load idx update / gather load distance */
bitRevTabOff = vldrhq_u32(pBitRevTab);
pBitRevTab += 4;
bitRevOff0Low = vmullbq_int_u32(bitRevTabOff, one);
bitRevOff0High = vmulltq_int_u32(bitRevTabOff, one);
blkCnt = bitRevLen / 8;
while (blkCnt > 0) {
bitRevTabOff = vldrhq_u32(pBitRevTab);
pBitRevTab += 4;
/* 64-bit index expansion */
bitRevOff1Low = vmullbq_int_u32(bitRevTabOff, one);
bitRevOff1High = vmulltq_int_u32(bitRevTabOff, one);
inLow = vldrdq_gather_offset_u64(src, bitRevOff0Low);
inHigh = vldrdq_gather_offset_u64(src, bitRevOff0High);
vstrdq_scatter_offset_u64(src, bitRevOff0Low, inHigh);
vstrdq_scatter_offset_u64(src, bitRevOff0High, inLow);
/* unrolled */
bitRevTabOff = vldrhq_u32(pBitRevTab);
pBitRevTab += 4;
bitRevOff0Low = vmullbq_int_u32(bitRevTabOff, one);
bitRevOff0High = vmulltq_int_u32(bitRevTabOff, one);
inLow = vldrdq_gather_offset_u64(src, bitRevOff1Low);
inHigh = vldrdq_gather_offset_u64(src, bitRevOff1High);
vstrdq_scatter_offset_u64(src, bitRevOff1Low, inHigh);
vstrdq_scatter_offset_u64(src, bitRevOff1High, inLow);
/*
* Decrement the blockSize loop counter
*/
blkCnt--;
}
if (bitRevLen & 7) {
/* FFT size = 16 */
inLow = vldrdq_gather_offset_u64(src, bitRevOff0Low);
inHigh = vldrdq_gather_offset_u64(src, bitRevOff0High);
vstrdq_scatter_offset_u64(src, bitRevOff0Low, inHigh);
vstrdq_scatter_offset_u64(src, bitRevOff0High, inLow);
}
}
/**
@brief In-place 16 bit reversal function for helium
@param[in,out] pSrc points to in-place buffer of unknown 16-bit data type
@param[in] bitRevLen bit reversal table length
@param[in] pBitRevTab points to bit reversal table
@return none
*/
__STATIC_INLINE void arm_bitreversal_16_inpl_mve(
uint16_t *pSrc,
const uint16_t bitRevLen,
const uint16_t *pBitRevTab)
{
uint32_t *src = (uint32_t *) pSrc;
int32_t blkCnt; /* loop counters */
uint32x4_t bitRevTabOff;
uint16x8_t one = vdupq_n_u16(1);
uint32x4_t bitRevOff1Low, bitRevOff0Low;
uint32x4_t bitRevOff1High, bitRevOff0High;
uint32x4_t inLow, inHigh;
/* load scheduling to increase gather load idx update / gather load distance */
bitRevTabOff = vldrhq_u16(pBitRevTab);
pBitRevTab += 8;
bitRevOff0Low = vmullbq_int_u16((uint16x8_t)bitRevTabOff, one);
bitRevOff0High = vmulltq_int_u16((uint16x8_t)bitRevTabOff, one);
bitRevOff0Low = vshrq_n_u16((uint16x8_t)bitRevOff0Low, 3);
bitRevOff0High = vshrq_n_u16((uint16x8_t)bitRevOff0High, 3);
blkCnt = (bitRevLen / 16);
while (blkCnt > 0) {
bitRevTabOff = vldrhq_u16(pBitRevTab);
pBitRevTab += 8;
bitRevOff1Low = vmullbq_int_u16((uint16x8_t)bitRevTabOff, one);
bitRevOff1High = vmulltq_int_u16((uint16x8_t)bitRevTabOff, one);
bitRevOff1Low = vshrq_n_u16((uint16x8_t)bitRevOff1Low, 3);
bitRevOff1High = vshrq_n_u16((uint16x8_t)bitRevOff1High, 3);
inLow = vldrwq_gather_shifted_offset_u32(src, bitRevOff0Low);
inHigh = vldrwq_gather_shifted_offset_u32(src, bitRevOff0High);
vstrwq_scatter_shifted_offset_u32(src, bitRevOff0Low, inHigh);
vstrwq_scatter_shifted_offset_u32(src, bitRevOff0High, inLow);
/* loop unrolling */
bitRevTabOff = vldrhq_u16(pBitRevTab);
pBitRevTab += 8;
bitRevOff0Low = vmullbq_int_u16((uint16x8_t)bitRevTabOff, one);
bitRevOff0High = vmulltq_int_u16((uint16x8_t)bitRevTabOff, one);
bitRevOff0Low = vshrq_n_u16((uint16x8_t)bitRevOff0Low, 3);
bitRevOff0High = vshrq_n_u16((uint16x8_t)bitRevOff0High, 3);
inLow = vldrwq_gather_shifted_offset_u32(src, bitRevOff1Low);
inHigh = vldrwq_gather_shifted_offset_u32(src, bitRevOff1High);
vstrwq_scatter_shifted_offset_u32(src, bitRevOff1Low, inHigh);
vstrwq_scatter_shifted_offset_u32(src, bitRevOff1High, inLow);
blkCnt--;
}
/* tail handling */
blkCnt = bitRevLen & 0xf;
if (blkCnt == 8) {
inLow = vldrwq_gather_shifted_offset_u32(src, bitRevOff0Low);
inHigh = vldrwq_gather_shifted_offset_u32(src, bitRevOff0High);
vstrwq_scatter_shifted_offset_u32(src, bitRevOff0Low, inHigh);
vstrwq_scatter_shifted_offset_u32(src, bitRevOff0High, inLow);
} else if (blkCnt == 12) {
/* FFT 16 special case */
mve_pred16_t p = vctp16q(4);
bitRevTabOff = vldrhq_z_u16(pBitRevTab, p);
inLow = vldrwq_gather_shifted_offset_u32(src, bitRevOff0Low);
inHigh = vldrwq_gather_shifted_offset_u32(src, bitRevOff0High);
vstrwq_scatter_shifted_offset_u32(src, bitRevOff0Low, inHigh);
vstrwq_scatter_shifted_offset_u32(src, bitRevOff0High, inLow);
bitRevOff0Low = vmullbq_int_u16((uint16x8_t)bitRevTabOff, one);
bitRevOff0High = vmulltq_int_u16((uint16x8_t)bitRevTabOff, one);
bitRevOff0Low = vshrq_n_u16((uint16x8_t)bitRevOff0Low, 3);
bitRevOff0High = vshrq_n_u16((uint16x8_t)bitRevOff0High, 3);
inLow = vldrwq_gather_shifted_offset_z_u32(src, bitRevOff0Low, p);
inHigh = vldrwq_gather_shifted_offset_z_u32(src, bitRevOff0High, p);
vstrwq_scatter_shifted_offset_p_u32(src, bitRevOff0Low, inHigh, p);
vstrwq_scatter_shifted_offset_p_u32(src, bitRevOff0High, inLow, p);
}
}
/**
@brief Out-of-place 32 bit reversal function for helium
@param[out] pDst points to destination buffer of unknown 32-bit data type
@param[in] pSrc points to input buffer of unknown 32-bit data type
@param[in] fftLen FFT length
@return none
*/
__STATIC_INLINE void arm_bitreversal_32_outpl_mve(void *pDst, void *pSrc, uint32_t fftLen)
{
uint32x4_t idxOffs0, idxOffs1, bitRevOffs0, bitRevOffs1;
uint32_t bitRevPos, blkCnt;
uint32_t *pDst32 = (uint32_t *) pDst;
/* fwd indexes */
idxOffs0 = vdupq_n_u32(0);
idxOffs1 = vdupq_n_u32(0);
idxOffs0[0] = 0; idxOffs0[2] = 4;
idxOffs1[0] = 8; idxOffs1[2] = 12;
bitRevPos = (31 - __CLZ(fftLen)) + 5;
blkCnt = fftLen >> 2;
/* issued earlier to increase gather load idx update / gather load distance */
/* bit-reverse fwd indexes */
bitRevOffs0 = vbrsrq(idxOffs0, bitRevPos);
bitRevOffs1 = vbrsrq(idxOffs1, bitRevPos);
while (blkCnt > 0) {
uint64x2_t vecIn;
vecIn = vldrdq_gather_offset_u64(pSrc, (uint64x2_t) bitRevOffs0);
idxOffs0 = idxOffs0 + 16;
vst1q(pDst32, (uint32x4_t) vecIn);
pDst32 += 4;
bitRevOffs0 = vbrsrq(idxOffs0, bitRevPos);
vecIn = vldrdq_gather_offset_u64(pSrc, (uint64x2_t) bitRevOffs1);
idxOffs1 = idxOffs1 + 16;
vst1q(pDst32, (uint32x4_t) vecIn);
pDst32 += 4;
bitRevOffs1 = vbrsrq(idxOffs1, bitRevPos);
blkCnt--;
}
}
/**
@brief Out-of-place 16 bit reversal function for helium
@param[out] pDst points to destination buffer of unknown 16-bit data type
@param[in] pSrc points to input buffer of unknown 16-bit data type
@param[in] fftLen FFT length
@return none
*/
__STATIC_INLINE void arm_bitreversal_16_outpl_mve(void *pDst, void *pSrc, uint32_t fftLen)
{
uint32x4_t idxOffs0, idxOffs1, bitRevOffs0, bitRevOffs1;
uint32_t bitRevPos, blkCnt;
uint16_t *pDst16 = (uint16_t *) pDst;
uint32_t incrIdx = 0;
/* fwd indexes */
idxOffs0 = vidupq_wb_u32(&incrIdx, 4); // {0, 4, 8, 12}
idxOffs1 = vidupq_wb_u32(&incrIdx, 4); // {16, 20, 24, 28}
bitRevPos = (31 - __CLZ(fftLen)) + 4;
blkCnt = fftLen >> 3;
/* issued earlier to increase gather load idx update / gather load distance */
/* bit-reverse fwd indexes */
bitRevOffs0 = vbrsrq(idxOffs0, bitRevPos);
bitRevOffs1 = vbrsrq(idxOffs1, bitRevPos);
while (blkCnt > 0) {
uint32x4_t vecIn;
vecIn = vldrwq_gather_offset_s32(pSrc, bitRevOffs0);
idxOffs0 = idxOffs0 + 32;
vst1q(pDst16, (uint16x8_t) vecIn);
pDst16 += 8;
bitRevOffs0 = vbrsrq(idxOffs0, bitRevPos);
vecIn = vldrwq_gather_offset_s32(pSrc, bitRevOffs1);
idxOffs1 = idxOffs1 + 32;
vst1q(pDst16, (uint16x8_t) vecIn);
pDst16 += 8;
bitRevOffs1 = vbrsrq(idxOffs1, bitRevPos);
blkCnt--;
}
}
#endif /* (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE)*/
#ifdef __cplusplus
}
#endif
#endif /* _ARM_VEC_FFT_H_ */

View File

@@ -0,0 +1,1586 @@
/******************************************************************************
* @file arm_vec_filtering.h
* @brief Private header file for CMSIS DSP Library
* @version V1.7.0
* @date 30. October 2019
******************************************************************************/
/*
* Copyright (c) 2010-2019 Arm Limited or its affiliates. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _ARM_VEC_FILTERING_H_
#define _ARM_VEC_FILTERING_H_
#include "arm_math.h"
#include "arm_helium_utils.h"
#ifdef __cplusplus
extern "C"
{
#endif
#if (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE)
#define MVE_INTR_CORR_QUAD_INC_X_FIXED_SIZE_F32(acc0, acc1, acc2, acc3, pX, pY, count)\
{ \
float32_t const *pSrcX, *pSrcY; \
f32x4_t acc0Vec, acc1Vec, acc2Vec, acc3Vec, xVec, yVec; \
uint32_t k; \
\
acc0Vec = vdupq_n_f32(0.0f); \
acc1Vec = vdupq_n_f32(0.0f); \
acc2Vec = vdupq_n_f32(0.0f); \
acc3Vec = vdupq_n_f32(0.0f); \
pSrcX = (float32_t const *) pX; \
pSrcY = (float32_t const *) pY; \
k = count >> 2; \
\
while (k > 0U) \
{ \
yVec = vld1q(pSrcY); \
pSrcY += 4; \
xVec = vldrwq_f32(&pSrcX[1]); \
acc1Vec = vfmaq_f32(acc1Vec, xVec, yVec); \
xVec = vldrwq_f32(&pSrcX[2]); \
acc2Vec = vfmaq_f32(acc2Vec, xVec, yVec); \
xVec = vldrwq_f32(&pSrcX[3]); \
acc3Vec = vfmaq_f32(acc3Vec, xVec, yVec); \
xVec = vld1q(pSrcX); \
pSrcX += 4; \
acc0Vec = vfmaq_f32(acc0Vec, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* loop + tail predication expected here */ \
k = count % 0x4U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp32q(k); \
yVec = vld1q(pSrcY); \
pSrcY += 4; \
xVec = vldrwq_f32(&pSrcX[1]); \
acc1Vec = vfmaq_m_f32(acc1Vec, xVec, yVec, p0); \
xVec = vldrwq_f32(&pSrcX[2]); \
acc2Vec = vfmaq_m_f32(acc2Vec, xVec, yVec, p0); \
xVec = vldrwq_f32(&pSrcX[3]); \
acc3Vec = vfmaq_m_f32(acc3Vec, xVec, yVec, p0); \
xVec = vld1q(pSrcX); \
pSrcX += 4; \
acc0Vec = vfmaq_m_f32(acc0Vec, xVec, yVec, p0); \
} \
\
acc0 = vecAddAcrossF32Mve(acc0Vec); \
acc1 = vecAddAcrossF32Mve(acc1Vec); \
acc2 = vecAddAcrossF32Mve(acc2Vec); \
acc3 = vecAddAcrossF32Mve(acc3Vec); \
}
#define MVE_INTR_CORR_SINGLE_F32(acc, pX, pY, count) \
{ \
float32_t const *pSrcX, *pSrcY; \
f32x4_t accVec, xVec, yVec; \
uint32_t k; \
\
accVec = vdupq_n_f32(0.0f); \
pSrcX = (float32_t const *) pX; \
pSrcY = (float32_t const *) pY; \
k = count >> 2; \
\
while (k > 0U) \
{ \
yVec = vld1q(pSrcY); \
pSrcY += 4; \
xVec = vld1q(pSrcX); \
pSrcX += 4; \
accVec = vfmaq_f32(accVec, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* Loop with tail predication expected here */ \
k = count % 0x4U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp32q(k); \
yVec = vld1q(pSrcY); \
pSrcY += 4; \
xVec = vld1q(pSrcX); \
pSrcX += 4; \
accVec = vfmaq_m_f32(accVec, xVec, yVec, p0);\
} \
acc = vecAddAcrossF32Mve(accVec); \
}
#define MVE_INTR_CORR_DUAL_INC_X_DEC_SIZE_F32(acc0, acc1, pX, pY, count)\
{ \
float32_t const *pSrcX, *pSrcY; \
f32x4_t acc0Vec, acc1Vec, xVec, yVec; \
uint32_t k; \
\
acc0Vec = vdupq_n_f32(0.0f); \
acc1Vec = vdupq_n_f32(0.0f); \
pSrcX = (float32_t const *) pX; \
pSrcY = (float32_t const *) pY; \
k = (count-1) >> 2; \
\
while (k > 0U) \
{ \
yVec = vld1q(pSrcY); \
pSrcY += 4; \
xVec = vldrwq_f32(&pSrcX[1]); \
acc1Vec = vfmaq_f32(acc1Vec, xVec, yVec); \
xVec = vld1q(pSrcX); \
pSrcX += 4; \
acc0Vec = vfmaq_f32(acc0Vec, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* use predication to finalize MAC sum */ \
/* acc1 requires exact number of sample (count-1) */ \
/* disable extra lanes in final MAC computation */ \
k = (count-1) % 0x4U; \
mve_pred16_t p0 = vctp32q(k); \
yVec = vld1q(pSrcY); \
pSrcY += 4; \
xVec = vldrwq_f32(&pSrcX[1]); \
acc1Vec = vfmaq_m_f32(acc1Vec, xVec, yVec, p0); \
/* acc0 requires 1 additional sample (count) */ \
/* so add 1 to unmask an extra lane in final MAC computation */ \
p0 = vctp32q(k+1); \
xVec = vld1q(pSrcX); \
pSrcX += 4; \
acc0Vec = vfmaq_m_f32(acc0Vec, xVec, yVec, p0); \
\
acc0 = vecAddAcrossF32Mve(acc0Vec); \
acc1 = vecAddAcrossF32Mve(acc1Vec); \
}
#define MVE_INTR_CORR_DUAL_INC_X_FIXED_SIZE_F32(acc0, acc1, pX, pY, count)\
{ \
float32_t const *pSrcX, *pSrcY; \
f32x4_t acc0Vec, acc1Vec, xVec, yVec; \
uint32_t k; \
\
acc0Vec = vdupq_n_f32(0.0f); \
acc1Vec = vdupq_n_f32(0.0f); \
pSrcX = (float32_t const *) pX; \
pSrcY = (float32_t const *) pY; \
k = count >> 2; \
\
while (k > 0U) \
{ \
yVec = vld1q(pSrcY); \
pSrcY += 4; \
xVec = vldrwq_f32(&pSrcX[1]); \
acc1Vec = vfmaq_f32(acc1Vec, xVec, yVec); \
xVec = vld1q(pSrcX); \
pSrcX += 4; \
acc0Vec = vfmaq_f32(acc0Vec, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* loop + tail predication expected here */ \
k = count % 0x4U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp32q(k); \
yVec = vld1q(pSrcY); \
pSrcY += 4; \
xVec = vldrwq_f32(&pSrcX[1]); \
acc1Vec = vfmaq_m_f32(acc1Vec, xVec, yVec, p0); \
xVec = vld1q(pSrcX); \
pSrcX += 4; \
acc0Vec = vfmaq_m_f32(acc0Vec, xVec, yVec, p0); \
} \
\
acc0 = vecAddAcrossF32Mve(acc0Vec); \
acc1 = vecAddAcrossF32Mve(acc1Vec); \
}
#define MVE_INTR_CORR_DUAL_DEC_Y_INC_SIZE_F32(acc0, acc1, pX, pY, count)\
{ \
float32_t const *pSrcX, *pSrcY; \
f32x4_t acc0Vec, acc1Vec, xVec, yVec; \
uint32_t k; \
\
acc0Vec = vdupq_n_f32(0.0f); \
acc1Vec = vdupq_n_f32(0.0f); \
pSrcX = (float32_t const *) pX; \
pSrcY = (float32_t const *) pY; \
k = count >> 2; \
while (k > 0U) \
{ \
xVec = vld1q(pSrcX); \
pSrcX += 4; \
yVec = vldrwq_f32(&pSrcY[-1]); \
acc1Vec = vfmaq_f32(acc1Vec, xVec, yVec); \
yVec = vld1q(pSrcY); \
pSrcY += 4; \
acc0Vec = vfmaq_f32(acc0Vec, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
k = count % 0x4U; \
/* use predication to finalize MAC sum */ \
/* acc1 requires 1 additional sample */ \
/* so add 1 to unmask an extra lane in final MAC computation */ \
mve_pred16_t p0 = vctp32q(k+1); \
xVec = vld1q(pSrcX); \
pSrcX += 4; \
yVec = vldrwq_f32(&pSrcY[-1]); \
acc1Vec = vfmaq_m_f32(acc1Vec, xVec, yVec,p0); \
/* acc0 requires exact number of sample */ \
/* disable extra lanes in final MAC computation */ \
p0 = vctp32q(k); \
yVec = vld1q(pSrcY); \
pSrcY += 4; \
acc0Vec = vfmaq_m_f32(acc0Vec, xVec, yVec,p0); \
\
acc0 = vecAddAcrossF32Mve(acc0Vec); \
acc1 = vecAddAcrossF32Mve(acc1Vec); \
}
#define MVE_INTR_CONV_DUAL_INC_X_DEC_SIZE_F32(acc0, acc1, pX, pY, count) \
{ \
float32_t const *pSrcX; \
f32x4_t acc0Vec, acc1Vec, xVec, yVec; \
uint32_t k; \
\
acc0Vec = vdupq_n_f32(0.0f); \
acc1Vec = vdupq_n_f32(0.0f); \
pSrcX = (float32_t const *) pX; \
k = (count - 1) >> 2; \
\
while (k > 0U) \
{ \
yVec = vldrwq_gather_shifted_offset_f32(pY, decrIdxVec); \
pY-=4; \
xVec = vldrwq_f32(&pSrcX[1]); \
acc1Vec = vfmaq_f32(acc1Vec, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 4; \
acc0Vec = vfmaq_f32(acc0Vec, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* Loop with tail predication expected here */ \
k = (count - 1) % 0x4U; \
mve_pred16_t p0 = vctp32q(k); \
yVec = vldrwq_gather_shifted_offset_f32(pY, decrIdxVec); \
xVec = vldrwq_f32(&pSrcX[1]); \
acc1Vec = vfmaq_m_f32(acc1Vec, xVec, yVec, p0); \
xVec = vld1q(pSrcX); pSrcX += 4; \
p0 = vctp32q(k+1); \
acc0Vec = vfmaq_m_f32(acc0Vec, xVec, yVec, p0); \
\
acc0 = vecAddAcrossF32Mve(acc0Vec); \
acc1 = vecAddAcrossF32Mve(acc1Vec); \
}
#define MVE_INTR_CONV_DUAL_INC_X_FIXED_SIZE_F32(acc0, acc1, pX, pY, count) \
{ \
float32_t const *pSrcX; \
f32x4_t acc0Vec, acc1Vec, xVec, yVec; \
uint32_t k; \
\
acc0Vec = vdupq_n_f32(0.0f); \
acc1Vec = vdupq_n_f32(0.0f); \
pSrcX = (float32_t const *) pX; \
k = count >> 2; \
\
while (k > 0U) \
{ \
yVec = vldrwq_gather_shifted_offset_f32(pY, decrIdxVec); \
pY-=4; \
xVec = vldrwq_f32(&pSrcX[1]); \
acc1Vec = vfmaq_f32(acc1Vec, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 4; \
acc0Vec = vfmaq_f32(acc0Vec, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* Loop with tail predication expected here */ \
k = count % 0x4U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp32q(k); \
yVec = vldrwq_gather_shifted_offset_f32(pY, decrIdxVec); \
xVec = vldrwq_f32(&pSrcX[1]); \
acc1Vec = vfmaq_m_f32(acc1Vec, xVec, yVec, p0); \
xVec = vld1q(pSrcX); pSrcX += 4; \
acc0Vec = vfmaq_m_f32(acc0Vec, xVec, yVec, p0); \
} \
acc0 = vecAddAcrossF32Mve(acc0Vec); \
acc1 = vecAddAcrossF32Mve(acc1Vec); \
}
#define MVE_INTR_CONV_DUAL_INC_Y_INC_SIZE_F32(acc0, acc1, pX, pY, count)\
{ \
float32_t const *pSrcX; \
const float32_t *pY1 = pY + 1; \
f32x4_t acc0Vec, acc1Vec, xVec, yVec; \
uint32_t k; \
\
acc0Vec = vdupq_n_f32(0.0f); \
acc1Vec = vdupq_n_f32(0.0f); \
pSrcX = (float32_t const *) pX; \
k = count >> 2; \
\
while (k > 0U) \
{ \
xVec = vld1q(pSrcX); pSrcX += 4; \
yVec = vldrwq_gather_shifted_offset_f32(pY, decrIdxVec); \
pY-=4; \
acc0Vec = vfmaq_f32(acc0Vec, xVec, yVec); \
yVec = vldrwq_gather_shifted_offset_f32(pY1, decrIdxVec); \
pY1-=4; \
acc1Vec = vfmaq_f32(acc1Vec, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
k = count % 0x4U; \
/* use predication to finalize MAC sum */ \
/* acc0 requires exact number of sample */ \
/* disable extra lanes in final MAC computation */ \
mve_pred16_t p0 = vctp32q(k); \
xVec = vld1q(pSrcX); pSrcX += 4; \
yVec = vldrwq_gather_shifted_offset_f32(pY, decrIdxVec); \
acc0Vec = vfmaq_m_f32(acc0Vec, xVec, yVec, p0); \
yVec = vldrwq_gather_shifted_offset_f32(pY1, decrIdxVec); \
/* acc1 requires 1 additional sample */ \
/* so add 1 to unmask an extra lane in final MAC computation */ \
p0 = vctp32q(k+1); \
acc1Vec = vfmaq_m_f32(acc1Vec, xVec, yVec, p0); \
\
acc0 = vecAddAcrossF32Mve(acc0Vec); \
acc1 = vecAddAcrossF32Mve(acc1Vec); \
}
#define MVE_INTR_CONV_SINGLE_F32(acc, pX, pY, count) \
{ \
float32_t const *pSrcX; \
f32x4_t accVec, xVec, yVec; \
uint32_t k; \
\
accVec = vdupq_n_f32(0.0f); \
pSrcX = (float32_t const *) pX; \
k = count >> 2; \
\
while (k > 0U) \
{ \
yVec = vldrwq_gather_shifted_offset_f32(pY, decrIdxVec); \
pY-=4; \
xVec = vld1q(pSrcX); pSrcX += 4; \
accVec = vfmaq_f32(accVec, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* Loop with tail predication expected here */ \
k = count % 0x4U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp32q(k); \
xVec = vld1q(pSrcX); pSrcX += 4; \
yVec = vldrwq_gather_shifted_offset_f32(pY, decrIdxVec); \
accVec = vfmaq_m_f32(accVec, xVec, yVec, p0); \
} \
acc = vecAddAcrossF32Mve(accVec); \
}
#endif /* (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE)*/
#if (defined(ARM_MATH_MVEI) || defined(ARM_MATH_HELIUM))
#define MVE_INTR_CONV_SINGLE_Q31(acc, pX, pY, count) \
{ \
q31_t const *pSrcX; \
q31x4_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q31_t const *) pX; \
k = count >> 2; \
\
while (k > 0U) \
{ \
yVec = vldrwq_gather_shifted_offset_s32(pY, decrIdxVec); \
pY-=4; \
xVec = vld1q(pSrcX); pSrcX += 4; \
acc = vmlaldavaq(acc, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* Loop with tail predication expected here */ \
k = count % 0x4U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp32q(k); \
xVec = vld1q(pSrcX); pSrcX += 4; \
yVec = vldrwq_gather_shifted_offset_s32(pY, decrIdxVec); \
acc = vmlaldavaq_p(acc, xVec, yVec, p0); \
} \
acc = asrl(acc, 31); \
}
#define MVE_INTR_CONV_DUAL_INC_Y_INC_SIZE_Q31(acc0, acc1, pX, pY, count)\
{ \
q31_t const *pSrcX; \
const q31_t *pY1 = pY + 1; \
q31x4_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q31_t const *) pX; \
k = count >> 2; \
\
while (k > 0U) \
{ \
xVec = vld1q(pSrcX); pSrcX += 4; \
yVec = vldrwq_gather_shifted_offset_s32(pY, decrIdxVec); \
pY-=4; \
acc0 = vmlaldavaq(acc0, xVec, yVec); \
yVec = vldrwq_gather_shifted_offset_s32(pY1, decrIdxVec); \
pY1-=4; \
acc1 = vmlaldavaq(acc1, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
k = count % 0x4U; \
/* use predication to finalize MAC sum */ \
/* acc0 requires exact number of sample */ \
/* disable extra lanes in final MAC computation */ \
mve_pred16_t p0 = vctp32q(k); \
xVec = vld1q(pSrcX); pSrcX += 4; \
yVec = vldrwq_gather_shifted_offset_s32(pY, decrIdxVec); \
acc0 = vmlaldavaq_p(acc0, xVec, yVec, p0); \
yVec = vldrwq_gather_shifted_offset_s32(pY1, decrIdxVec); \
/* acc1 requires 1 additional sample */ \
/* so add 1 to unmask an extra lane in final MAC computation */ \
p0 = vctp32q(k+1); \
acc1 = vmlaldavaq_p(acc1, xVec, yVec, p0); \
\
acc0 = asrl(acc0, 31); \
acc1 = asrl(acc1, 31); \
}
#define MVE_INTR_CONV_DUAL_INC_X_DEC_SIZE_Q31(acc0, acc1, pX, pY, count) \
{ \
q31_t const *pSrcX; \
q31x4_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q31_t const *) pX; \
k = (count-1) >> 2; \
\
while (k > 0U) \
{ \
yVec = vldrwq_gather_shifted_offset_s32(pY, decrIdxVec); \
pY-=4; \
xVec = vldrwq_s32(&pSrcX[1]); \
acc1 = vmlaldavaq(acc1, xVec, yVec); \
xVec = vld1q(pSrcX); \
pSrcX += 4; \
acc0 = vmlaldavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
k = (count - 1) % 0x4U; \
/* use predication to finalize MAC sum */ \
/* acc1 requires exact number of sample (count-1) */ \
/* disable extra lanes in final MAC computation */ \
mve_pred16_t p0 = vctp32q(k); \
yVec = vldrwq_gather_shifted_offset_s32(pY, decrIdxVec); \
xVec = vldrwq_s32(&pSrcX[1]); \
acc1 = vmlaldavaq_p(acc1, xVec, yVec, p0); \
/* acc0 requires 1 additional sample (count) */ \
/* so add 1 to unmask an extra lane in final MAC computation */ \
p0 = vctp32q(k+1); \
xVec = vld1q(pSrcX); \
pSrcX += 4; \
acc0 = vmlaldavaq_p(acc0, xVec, yVec, p0); \
\
acc0 = asrl(acc0, 31); \
acc1 = asrl(acc1, 31); \
}
#define MVE_INTR_CONV_DUAL_INC_X_FIXED_SIZE_Q31(acc0, acc1, pX, pY, count) \
{ \
q31_t const *pSrcX; \
q31x4_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q31_t const *) pX; \
k = count >> 2; \
\
while (k > 0U) \
{ \
yVec = vldrwq_gather_shifted_offset_s32(pY, decrIdxVec); \
pY-=4; \
xVec = vldrwq_s32(&pSrcX[1]); \
acc1 = vmlaldavaq(acc1, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 4; \
acc0 = vmlaldavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* Loop with tail predication expected here */ \
k = count % 0x4U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp32q(k); \
yVec = vldrwq_gather_shifted_offset_s32(pY, decrIdxVec); \
xVec = vldrwq_s32(&pSrcX[1]); \
acc1 = vmlaldavaq_p(acc1, xVec, yVec, p0); \
xVec = vld1q(pSrcX); pSrcX += 4; \
acc0 = vmlaldavaq_p(acc0, xVec, yVec, p0); \
} \
acc0 = asrl(acc0, 31); \
acc1 = asrl(acc1, 31); \
}
#define MVE_INTR_CONV_QUAD_INC_X_FIXED_SIZE_Q31(acc0, acc1, acc2, acc3, pX, pY, count) \
{ \
q31_t const *pSrcX; \
q31x4_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q31_t const *) pX; \
k = count >> 2; \
\
while (k > 0U) \
{ \
yVec = vldrwq_gather_shifted_offset_s32(pY, decrIdxVec); \
pY-=4; \
xVec = vldrwq_s32(&pSrcX[1]); \
acc1 = vmlaldavaq(acc1, xVec, yVec); \
xVec = vldrwq_s32(&pSrcX[2]); \
acc2 = vmlaldavaq(acc2, xVec, yVec); \
xVec = vldrwq_s32(&pSrcX[3]); \
acc3 = vmlaldavaq(acc3, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 4; \
acc0 = vmlaldavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* Loop with tail predication expected here */ \
k = count % 0x4U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp32q(k); \
yVec = vldrwq_gather_shifted_offset_s32(pY, decrIdxVec); \
xVec = vldrwq_s32(&pSrcX[1]); \
acc1 = vmlaldavaq_p(acc1, xVec, yVec, p0); \
xVec = vldrwq_s32(&pSrcX[2]); \
acc2 = vmlaldavaq_p(acc2, xVec, yVec, p0); \
xVec = vldrwq_s32(&pSrcX[3]); \
acc3 = vmlaldavaq_p(acc3, xVec, yVec, p0); \
xVec = vld1q(pSrcX); pSrcX += 4; \
acc0 = vmlaldavaq_p(acc0, xVec, yVec, p0); \
} \
acc0 = asrl(acc0, 31); \
acc1 = asrl(acc1, 31); \
acc2 = asrl(acc2, 31); \
acc3 = asrl(acc3, 31); \
}
#define MVE_INTR_CORR_DUAL_DEC_Y_INC_SIZE_Q31(acc0, acc1, pX, pY, count)\
{ \
q31_t const *pSrcX, *pSrcY; \
q31x4_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q31_t const *) pX; \
pSrcY = (q31_t const *) pY; \
k = count >> 2; \
\
while (k > 0U) \
{ \
xVec = vld1q(pSrcX); pSrcX += 4; \
yVec = vldrwq_s32(&pSrcY[-1]); \
acc1 = vmlaldavaq(acc1, xVec, yVec); \
yVec = vld1q(pSrcY); pSrcY += 4; \
acc0 = vmlaldavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
k = count % 0x4U; \
/* use predication to finalize MAC sum */ \
/* acc1 requires 1 additional sample */ \
/* so add 1 to unmask an extra lane in final MAC computation */ \
mve_pred16_t p0 = vctp32q(k+1); \
xVec = vld1q(pSrcX); pSrcX += 4; \
yVec = vldrwq_s32(&pSrcY[-1]); \
acc1 = vmlaldavaq_p(acc1, xVec, yVec,p0); \
/* acc0 requires exact number of sample */ \
/* disable extra lanes in final MAC computation */ \
p0 = vctp32q(k); \
yVec = vld1q(pSrcY); pSrcY += 4; \
acc0 = vmlaldavaq_p(acc0, xVec, yVec,p0); \
\
acc0 = asrl(acc0, 31); \
acc1 = asrl(acc1, 31); \
}
#define MVE_INTR_CORR_SINGLE_Q31(acc, pX, pY, count)\
{ \
q31_t const *pSrcX, *pSrcY; \
q31x4_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q31_t const *) pX; \
pSrcY = (q31_t const *) pY; \
k = count >> 2; \
\
while (k > 0U) \
{ \
xVec = vld1q(pSrcX); pSrcX += 4; \
yVec = vld1q(pSrcY); pSrcY += 4; \
acc = vmlaldavaq(acc, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* tail predication expected here */ \
k = count % 0x4U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp32q(k); \
xVec = vld1q(pSrcX); pSrcX += 4; \
yVec = vld1q(pSrcY); pSrcY += 4; \
acc = vmlaldavaq_p(acc, xVec, yVec, p0); \
} \
acc = asrl(acc, 31); \
}
#define MVE_INTR_CORR_QUAD_INC_X_FIXED_SIZE_Q31(acc0, acc1, acc2, acc3, pX, pY, count)\
{ \
q31_t const *pSrcX, *pSrcY; \
q31x4_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q31_t const *) pX; \
pSrcY = (q31_t const *) pY; \
k = count >> 2; \
\
while (k > 0U) \
{ \
yVec = vld1q(pSrcY); pSrcY += 4; \
xVec = vldrwq_s32(&pSrcX[1]); \
acc1 = vmlaldavaq(acc1, xVec, yVec); \
xVec = vldrwq_s32(&pSrcX[2]); \
acc2 = vmlaldavaq(acc2, xVec, yVec); \
xVec = vldrwq_s32(&pSrcX[3]); \
acc3 = vmlaldavaq(acc3, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 4; \
acc0 = vmlaldavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* loop + tail predication expected here */ \
k = count % 0x4U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp32q(k); \
yVec = vld1q(pSrcY); pSrcY += 4; \
xVec = vldrwq_s32(&pSrcX[1]); \
acc1 = vmlaldavaq_p(acc1, xVec, yVec, p0); \
xVec = vldrwq_s32(&pSrcX[2]); \
acc2 = vmlaldavaq_p(acc2, xVec, yVec, p0); \
xVec = vldrwq_s32(&pSrcX[3]); \
acc3 = vmlaldavaq_p(acc3, xVec, yVec, p0); \
xVec = vld1q(pSrcX); pSrcX += 4; \
acc0 = vmlaldavaq_p(acc0, xVec, yVec, p0); \
} \
\
acc0 = asrl(acc0, 31); \
acc1 = asrl(acc1, 31); \
acc2 = asrl(acc2, 31); \
acc3 = asrl(acc3, 31); \
}
#define MVE_INTR_CORR_DUAL_INC_X_FIXED_SIZE_Q31(acc0, acc1, pX, pY, count)\
{ \
q31_t const *pSrcX, *pSrcY; \
q31x4_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q31_t const *) pX; \
pSrcY = (q31_t const *) pY; \
k = count >> 2; \
\
while (k > 0U) \
{ \
yVec = vld1q(pSrcY); pSrcY += 4; \
xVec = vldrwq_s32(&pSrcX[1]); \
acc1 = vmlaldavaq(acc1, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 4; \
acc0 = vmlaldavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* loop + tail predication expected here */ \
k = count % 0x4U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp32q(k); \
yVec = vld1q(pSrcY); pSrcY += 4; \
xVec = vldrwq_s32(&pSrcX[1]); \
acc1 = vmlaldavaq_p(acc1, xVec, yVec, p0); \
xVec = vld1q(pSrcX); pSrcX += 4; \
acc0 = vmlaldavaq_p(acc0, xVec, yVec, p0); \
} \
\
acc0 = asrl(acc0, 31); \
acc1 = asrl(acc1, 31); \
}
#define MVE_INTR_CORR_DUAL_INC_X_DEC_SIZE_Q31(acc0, acc1, pX, pY, count)\
{ \
q31_t const *pSrcX, *pSrcY; \
q31x4_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q31_t const *) pX; \
pSrcY = (q31_t const *) pY; \
k = (count-1) >> 2; \
\
while (k > 0U) \
{ \
yVec = vld1q(pSrcY); pSrcY += 4; \
xVec = vldrwq_s32(&pSrcX[1]); \
acc1 = vmlaldavaq(acc1, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 4; \
acc0 = vmlaldavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* use predication to finalize MAC sum */ \
/* acc1 requires exact number of sample (count-1) */ \
/* disable extra lanes in final MAC computation */ \
k = (count-1) % 0x4U; \
mve_pred16_t p0 = vctp32q(k); \
yVec = vld1q(pSrcY); pSrcY += 4; \
xVec = vldrwq_s32(&pSrcX[1]); \
acc1 = vmlaldavaq_p(acc1, xVec, yVec, p0); \
/* acc0 requires 1 additional sample (count) */ \
/* so add 1 to unmask an extra lane in final MAC computation */ \
p0 = vctp32q(k+1); \
xVec = vld1q(pSrcX); pSrcX += 4; \
acc0 = vmlaldavaq_p(acc0, xVec, yVec, p0); \
\
acc0 = asrl(acc0, 31); \
acc1 = asrl(acc1, 31); \
}
#define MVE_INTR_CORR_DUAL_DEC_Y_INC_SIZE_Q15(acc0, acc1, pX, pY, count)\
{ \
q15_t const *pSrcX, *pSrcY; \
q15x8_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q15_t const *) pX; \
pSrcY = (q15_t const *) pY; \
k = count >> 3; \
while (k > 0U) \
{ \
xVec = vld1q(pSrcX); pSrcX += 8; \
yVec = vldrhq_s16(&pSrcY[-1]); \
acc1 = vmlaldavaq(acc1, xVec, yVec); \
yVec = vld1q(pSrcY); pSrcY += 8; \
acc0 = vmlaldavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
k = count % 0x8U; \
/* use predication to finalize MAC sum */ \
/* acc1 requires 1 additional sample */ \
/* so add 1 to unmask an extra lane in final MAC computation */ \
mve_pred16_t p0 = vctp16q(k+1); \
xVec = vld1q(pSrcX); pSrcX += 8; \
yVec = vldrhq_s16(&pSrcY[-1]); \
acc1 = vmlaldavaq_p(acc1, xVec, yVec,p0); \
/* acc0 requires exact number of sample */ \
/* disable extra lanes in final MAC computation */ \
p0 = vctp16q(k); \
yVec = vld1q(pSrcY); pSrcY += 8; \
acc0 = vmlaldavaq_p(acc0, xVec, yVec,p0); \
\
acc0 = asrl(acc0, 15); \
acc1 = asrl(acc1, 15); \
acc0 = __SSAT(acc0, 16); \
acc1 = __SSAT(acc1, 16); \
}
#define MVE_INTR_CORR_SINGLE_Q15(acc, pX, pY, count)\
{ \
q15_t const *pSrcX, *pSrcY; \
q15x8_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q15_t const *) pX; \
pSrcY = (q15_t const *) pY; \
k = count >> 3; \
while (k > 0U) \
{ \
xVec = vld1q(pSrcX); pSrcX += 8; \
yVec = vld1q(pSrcY); pSrcY += 8; \
acc = vmlaldavaq(acc, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* tail predication expected here */ \
k = count % 0x8U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp16q(k); \
xVec = vld1q(pSrcX); pSrcX += 8; \
yVec = vld1q(pSrcY); pSrcY += 8; \
acc = vmlaldavaq_p(acc, xVec, yVec, p0); \
} \
acc = asrl(acc, 15); \
acc = __SSAT(acc, 16); \
}
#define MVE_INTR_CORR_QUAD_INC_X_FIXED_SIZE_Q15(acc0, acc1, acc2, acc3, pX, pY, count)\
{ \
q15_t const *pSrcX, *pSrcY; \
q15x8_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q15_t const *) pX; \
pSrcY = (q15_t const *) pY; \
k = count >> 3; \
\
while (k > 0U) \
{ \
yVec = vld1q(pSrcY); pSrcY += 8; \
xVec = vldrhq_s16(&pSrcX[1]); \
acc1 = vmlaldavaq(acc1, xVec, yVec); \
xVec = vldrhq_s16(&pSrcX[2]); \
acc2 = vmlaldavaq(acc2, xVec, yVec); \
xVec = vldrhq_s16(&pSrcX[3]); \
acc3 = vmlaldavaq(acc3, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 8; \
acc0 = vmlaldavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* loop + tail predication expected here */ \
k = count % 0x8U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp16q(k); \
yVec = vld1q(pSrcY); pSrcY += 8; \
xVec = vldrhq_s16(&pSrcX[1]); \
acc1 = vmlaldavaq_p(acc1, xVec, yVec, p0); \
xVec = vldrhq_s16(&pSrcX[2]); \
acc2 = vmlaldavaq_p(acc2, xVec, yVec, p0); \
xVec = vldrhq_s16(&pSrcX[3]); \
acc3 = vmlaldavaq_p(acc3, xVec, yVec, p0); \
xVec = vld1q(pSrcX); pSrcX += 8; \
acc0 = vmlaldavaq_p(acc0, xVec, yVec, p0); \
} \
\
acc0 = asrl(acc0, 15); \
acc1 = asrl(acc1, 15); \
acc2 = asrl(acc2, 15); \
acc3 = asrl(acc3, 15); \
acc0 = __SSAT(acc0, 16); \
acc1 = __SSAT(acc1, 16); \
acc2 = __SSAT(acc2, 16); \
acc3 = __SSAT(acc3, 16); \
}
#define MVE_INTR_CORR_DUAL_INC_X_FIXED_SIZE_Q15(acc0, acc1, pX, pY, count)\
{ \
q15_t const *pSrcX, *pSrcY; \
q15x8_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q15_t const *) pX; \
pSrcY = (q15_t const *) pY; \
k = count >> 3; \
\
while (k > 0U) \
{ \
yVec = vld1q(pSrcY); pSrcY += 8; \
xVec = vldrhq_s16(&pSrcX[1]); \
acc1 = vmlaldavaq(acc1, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 8; \
acc0 = vmlaldavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* loop + tail predication expected here */ \
k = count % 0x8U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp16q(k); \
yVec = vld1q(pSrcY); pSrcY += 8; \
xVec = vldrhq_s16(&pSrcX[1]); \
acc1 = vmlaldavaq_p(acc1, xVec, yVec, p0); \
xVec = vld1q(pSrcX); pSrcX += 8; \
acc0 = vmlaldavaq_p(acc0, xVec, yVec, p0); \
} \
\
acc0 = asrl(acc0, 15); \
acc1 = asrl(acc1, 15); \
acc0 = __SSAT(acc0, 16); \
acc1 = __SSAT(acc1, 16); \
}
#define MVE_INTR_CORR_DUAL_INC_X_DEC_SIZE_Q15(acc0, acc1, pX, pY, count)\
{ \
q15_t const *pSrcX, *pSrcY; \
q15x8_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q15_t const *) pX; \
pSrcY = (q15_t const *) pY; \
k = (count-1) >> 3; \
\
while (k > 0U) \
{ \
yVec = vld1q(pSrcY); pSrcY += 8; \
xVec = vldrhq_s16(&pSrcX[1]); \
acc1 = vmlaldavaq(acc1, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 8; \
acc0 = vmlaldavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* use predication to finalize MAC sum */ \
/* acc1 requires exact number of sample (count-1) */ \
/* disable extra lanes in final MAC computation */ \
k = (count-1) % 0x8U; \
mve_pred16_t p0 = vctp16q(k); \
yVec = vld1q(pSrcY); pSrcY += 8; \
xVec = vldrhq_s16(&pSrcX[1]); \
acc1 = vmlaldavaq_p(acc1, xVec, yVec, p0); \
/* acc0 requires 1 additional sample (count) */ \
/* so add 1 to unmask an extra lane in final MAC computation */ \
p0 = vctp16q(k+1); \
xVec = vld1q(pSrcX); pSrcX += 8; \
acc0 = vmlaldavaq_p(acc0, xVec, yVec, p0); \
\
acc0 = asrl(acc0, 15); \
acc1 = asrl(acc1, 15); \
acc0 = __SSAT(acc0, 16); \
acc1 = __SSAT(acc1, 16); \
}
#define MVE_INTR_CONV_DUAL_INC_Y_INC_SIZE_Q15(acc0, acc1, pX, pY, count)\
{ \
q15_t const *pSrcX; \
const q15_t *pY1 = pY + 1; \
q15x8_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q15_t const *) pX; \
k = count >> 3; \
\
while (k > 0U) \
{ \
xVec = vld1q(pSrcX); pSrcX += 8; \
yVec = vldrhq_gather_shifted_offset_s16(pY, decrIdxVec); \
pY-=8; \
acc0 = vmlaldavaq(acc0, xVec, yVec); \
yVec = vldrhq_gather_shifted_offset_s16(pY1, decrIdxVec); \
pY1-=8; \
acc1 = vmlaldavaq(acc1, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
k = count % 0x8U; \
/* use predication to finalize MAC sum */ \
/* acc0 requires exact number of sample */ \
/* disable extra lanes in final MAC computation */ \
mve_pred16_t p0 = vctp16q(k); \
xVec = vld1q(pSrcX); pSrcX += 8; \
yVec = vldrhq_gather_shifted_offset_s16(pY, decrIdxVec); \
acc0 = vmlaldavaq_p(acc0, xVec, yVec, p0); \
yVec = vldrhq_gather_shifted_offset_s16(pY1, decrIdxVec); \
/* acc1 requires 1 additional sample */ \
/* so add 1 to unmask an extra lane in final MAC computation */ \
p0 = vctp16q(k+1); \
acc1 = vmlaldavaq_p(acc1, xVec, yVec, p0); \
\
acc0 = asrl(acc0, 15); \
acc1 = asrl(acc1, 15); \
acc0 = __SSAT(acc0, 16); \
acc1 = __SSAT(acc1, 16); \
}
#define MVE_INTR_CONV_SINGLE_Q15(acc, pX, pY, count) \
{ \
q15_t const *pSrcX; \
q15x8_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q15_t const *) pX; \
k = count >> 3; \
\
while (k > 0U) \
{ \
yVec = vldrhq_gather_shifted_offset_s16(pY, decrIdxVec); \
pY-=8; \
xVec = vld1q(pSrcX); pSrcX += 8; \
acc = vmlaldavaq(acc, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* Loop with tail predication expected here */ \
k = count % 0x8U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp16q(k); \
xVec = vld1q(pSrcX); pSrcX += 8; \
yVec = vldrhq_gather_shifted_offset_s16(pY, decrIdxVec); \
acc = vmlaldavaq_p(acc, xVec, yVec, p0); \
} \
acc = asrl(acc, 15); \
acc = __SSAT(acc, 16); \
}
#define MVE_INTR_CONV_QUAD_INC_X_FIXED_SIZE_Q15(acc0, acc1, acc2, acc3, pX, pY, count) \
{ \
q15_t const *pSrcX; \
q15x8_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q15_t const *) pX; \
k = count >> 3; \
\
while (k > 0U) \
{ \
yVec = vldrhq_gather_shifted_offset_s16(pY, decrIdxVec); \
pY-=8; \
xVec = vldrhq_s16(&pSrcX[1]); \
acc1 = vmlaldavaq(acc1, xVec, yVec); \
xVec = vldrhq_s16(&pSrcX[2]); \
acc2 = vmlaldavaq(acc2, xVec, yVec); \
xVec = vldrhq_s16(&pSrcX[3]); \
acc3 = vmlaldavaq(acc3, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 8; \
acc0 = vmlaldavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* Loop with tail predication expected here */ \
k = count % 0x8U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp16q(k); \
yVec = vldrhq_gather_shifted_offset_s16(pY, decrIdxVec); \
xVec = vldrhq_s16(&pSrcX[1]); \
acc1 = vmlaldavaq_p(acc1, xVec, yVec, p0); \
xVec = vldrhq_s16(&pSrcX[2]); \
acc2 = vmlaldavaq_p(acc2, xVec, yVec, p0); \
xVec = vldrhq_s16(&pSrcX[3]); \
acc3 = vmlaldavaq_p(acc3, xVec, yVec, p0); \
xVec = vld1q(pSrcX); pSrcX += 8; \
acc0 = vmlaldavaq_p(acc0, xVec, yVec, p0); \
} \
acc0 = asrl(acc0, 15); \
acc1 = asrl(acc1, 15); \
acc2 = asrl(acc2, 15); \
acc3 = asrl(acc3, 15); \
acc0 = __SSAT(acc0, 16); \
acc1 = __SSAT(acc1, 16); \
acc2 = __SSAT(acc2, 16); \
acc3 = __SSAT(acc3, 16); \
}
#define MVE_INTR_CONV_DUAL_INC_X_FIXED_SIZE_Q15(acc0, acc1, pX, pY, count) \
{ \
q15_t const *pSrcX; \
q15x8_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q15_t const *) pX; \
k = count >> 3; \
\
while (k > 0U) \
{ \
yVec = vldrhq_gather_shifted_offset_s16(pY, decrIdxVec); \
pY-=8; \
xVec = vldrhq_s16(&pSrcX[1]); \
acc1 = vmlaldavaq(acc1, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 8; \
acc0 = vmlaldavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* Loop with tail predication expected here */ \
k = count % 0x8U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp16q(k); \
yVec = vldrhq_gather_shifted_offset_s16(pY, decrIdxVec); \
xVec = vldrhq_s16(&pSrcX[1]); \
acc1 = vmlaldavaq_p(acc1, xVec, yVec, p0); \
xVec = vld1q(pSrcX); pSrcX += 8; \
acc0 = vmlaldavaq_p(acc0, xVec, yVec, p0); \
} \
acc0 = asrl(acc0, 15); \
acc1 = asrl(acc1, 15); \
acc0 = __SSAT(acc0, 16); \
acc1 = __SSAT(acc1, 16); \
}
#define MVE_INTR_CONV_DUAL_INC_X_DEC_SIZE_Q15(acc0, acc1, pX, pY, count) \
{ \
q15_t const *pSrcX; \
q15x8_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q15_t const *) pX; \
k = (count-1) >> 3; \
\
while (k > 0U) \
{ \
yVec = vldrhq_gather_shifted_offset_s16(pY, decrIdxVec); \
pY-=8; \
xVec = vldrhq_s16(&pSrcX[1]); \
acc1 = vmlaldavaq(acc1, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 8; \
acc0 = vmlaldavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
k = (count - 1) % 0x8U; \
/* use predication to finalize MAC sum */ \
/* acc1 requires exact number of sample (count-1) */ \
/* disable extra lanes in final MAC computation */ \
mve_pred16_t p0 = vctp16q(k); \
yVec = vldrhq_gather_shifted_offset_s16(pY, decrIdxVec); \
xVec = vldrhq_s16(&pSrcX[1]); \
acc1 = vmlaldavaq_p(acc1, xVec, yVec, p0); \
/* acc0 requires 1 additional sample (count) */ \
/* so add 1 to unmask an extra lane in final MAC computation */ \
p0 = vctp16q(k+1); \
xVec = vld1q(pSrcX); pSrcX += 8; \
acc0 = vmlaldavaq_p(acc0, xVec, yVec, p0); \
\
acc0 = asrl(acc0, 15); \
acc1 = asrl(acc1, 15); \
acc0 = __SSAT(acc0, 16); \
acc1 = __SSAT(acc1, 16); \
}
#define MVE_INTR_CORR_DUAL_DEC_Y_INC_SIZE_Q7(acc0, acc1, pX, pY, count)\
{ \
q7_t const *pSrcX, *pSrcY; \
q7x16_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q7_t const *) pX; \
pSrcY = (q7_t const *) pY; \
k = count >> 4; \
while (k > 0U) \
{ \
xVec = vld1q(pSrcX); pSrcX += 16; \
yVec = vldrbq_s8(&pSrcY[-1]); \
acc1 = vmladavaq(acc1, xVec, yVec); \
yVec = vld1q(pSrcY); pSrcY += 16; \
acc0 = vmladavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
k = count % 0x10U; \
/* use predication to finalize MAC sum */ \
/* acc1 requires 1 additional sample */ \
/* so add 1 to unmask an extra lane in final MAC computation */ \
mve_pred16_t p0 = vctp8q(k+1); \
xVec = vld1q(pSrcX); pSrcX += 16; \
yVec = vldrbq_s8(&pSrcY[-1]); \
acc1 = vmladavaq_p(acc1, xVec, yVec,p0); \
/* acc0 requires exact number of sample */ \
/* disable extra lanes in final MAC computation */ \
p0 = vctp8q(k); \
yVec = vld1q(pSrcY); pSrcY += 16; \
acc0 = vmladavaq_p(acc0, xVec, yVec,p0); \
\
acc0 = (acc0 >> 7); \
acc1 = (acc1 >> 7); \
acc0 = __SSAT(acc0, 8); \
acc1 = __SSAT(acc1, 8); \
}
#define MVE_INTR_CORR_SINGLE_Q7(acc, pX, pY, count)\
{ \
q7_t const *pSrcX, *pSrcY; \
q7x16_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q7_t const *) pX; \
pSrcY = (q7_t const *) pY; \
k = count >> 4; \
while (k > 0U) \
{ \
xVec = vld1q(pSrcX); pSrcX += 16; \
yVec = vld1q(pSrcY); pSrcY += 16; \
acc = vmladavaq(acc, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* tail predication expected here */ \
k = count % 0x10U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp8q(k); \
xVec = vld1q(pSrcX); pSrcX += 16; \
yVec = vld1q(pSrcY); pSrcY += 16; \
acc = vmladavaq_p(acc, xVec, yVec, p0); \
} \
acc =(acc >> 7); \
acc = __SSAT(acc, 8); \
}
#define MVE_INTR_CORR_QUAD_INC_X_FIXED_SIZE_Q7(acc0, acc1, acc2, acc3, pX, pY, count)\
{ \
q7_t const *pSrcX, *pSrcY; \
q7x16_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q7_t const *) pX; \
pSrcY = (q7_t const *) pY; \
k = count >> 4; \
\
while (k > 0U) \
{ \
yVec = vld1q(pSrcY); pSrcY += 16; \
xVec = vldrbq_s8(&pSrcX[1]); \
acc1 = vmladavaq(acc1, xVec, yVec); \
xVec = vldrbq_s8(&pSrcX[2]); \
acc2 = vmladavaq(acc2, xVec, yVec); \
xVec = vldrbq_s8(&pSrcX[3]); \
acc3 = vmladavaq(acc3, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 16; \
acc0 = vmladavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* loop + tail predication expected here */ \
k = count % 0x10U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp8q(k); \
yVec = vld1q(pSrcY); pSrcY += 16; \
xVec = vldrbq_s8(&pSrcX[1]); \
acc1 = vmladavaq_p(acc1, xVec, yVec, p0); \
xVec = vldrbq_s8(&pSrcX[2]); \
acc2 = vmladavaq_p(acc2, xVec, yVec, p0); \
xVec = vldrbq_s8(&pSrcX[3]); \
acc3 = vmladavaq_p(acc3, xVec, yVec, p0); \
xVec = vld1q(pSrcX); pSrcX += 16; \
acc0 = vmladavaq_p(acc0, xVec, yVec, p0); \
} \
\
acc0 = (acc0 >> 7); \
acc1 = (acc1 >> 7); \
acc2 = (acc2 >> 7); \
acc3 = (acc3 >> 7); \
acc0 = __SSAT(acc0, 8); \
acc1 = __SSAT(acc1, 8); \
acc2 = __SSAT(acc2, 8); \
acc3 = __SSAT(acc3, 8); \
}
#define MVE_INTR_CORR_DUAL_INC_X_FIXED_SIZE_Q7(acc0, acc1, pX, pY, count)\
{ \
q7_t const *pSrcX, *pSrcY; \
q7x16_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q7_t const *) pX; \
pSrcY = (q7_t const *) pY; \
k = count >> 4; \
\
while (k > 0U) \
{ \
yVec = vld1q(pSrcY); pSrcY += 16; \
xVec = vldrbq_s8(&pSrcX[1]); \
acc1 = vmladavaq(acc1, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 16; \
acc0 = vmladavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* loop + tail predication expected here */ \
k = count % 0x10U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp8q(k); \
yVec = vld1q(pSrcY); pSrcY += 16; \
xVec = vldrbq_s8(&pSrcX[1]); \
acc1 = vmladavaq_p(acc1, xVec, yVec, p0); \
xVec = vld1q(pSrcX); pSrcX += 16; \
acc0 = vmladavaq_p(acc0, xVec, yVec, p0); \
} \
\
acc0 = (acc0 >> 7); \
acc1 = (acc1 >> 7); \
acc0 = __SSAT(acc0, 8); \
acc1 = __SSAT(acc1, 8); \
}
#define MVE_INTR_CORR_DUAL_INC_X_DEC_SIZE_Q7(acc0, acc1, pX, pY, count)\
{ \
q7_t const *pSrcX, *pSrcY; \
q7x16_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q7_t const *) pX; \
pSrcY = (q7_t const *) pY; \
k = (count-1) >> 4; \
\
while (k > 0U) \
{ \
yVec = vld1q(pSrcY); pSrcY += 16; \
xVec = vldrbq_s8(&pSrcX[1]); \
acc1 = vmladavaq(acc1, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 16; \
acc0 = vmladavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* use predication to finalize MAC sum */ \
/* acc1 requires exact number of sample (count-1) */ \
/* disable extra lanes in final MAC computation */ \
k = (count-1) % 0x10U; \
mve_pred16_t p0 = vctp8q(k); \
yVec = vld1q(pSrcY); pSrcY += 16; \
xVec = vldrbq_s8(&pSrcX[1]); \
acc1 = vmladavaq_p(acc1, xVec, yVec, p0); \
/* acc0 requires 1 additional sample (count) */ \
/* so add 1 to unmask an extra lane in final MAC computation */ \
p0 = vctp8q(k+1); \
xVec = vld1q(pSrcX); pSrcX += 16; \
acc0 = vmladavaq_p(acc0, xVec, yVec, p0); \
\
acc0 = (acc0 >> 7); \
acc1 = (acc1 >> 7); \
acc0 = __SSAT(acc0, 8); \
acc1 = __SSAT(acc1, 8); \
}
#define MVE_INTR_CONV_DUAL_INC_Y_INC_SIZE_Q7(acc0, acc1, pX, pY, count)\
{ \
q7_t const *pSrcX; \
const q7_t *pY1 = pY + 1; \
q7x16_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q7_t const *) pX; \
k = count >> 4; \
\
while (k > 0U) \
{ \
xVec = vld1q(pSrcX); pSrcX += 16; \
yVec = vldrbq_gather_offset_s8(pY, decrIdxVec); \
pY-=16; \
acc0 = vmladavaq(acc0, xVec, yVec); \
yVec = vldrbq_gather_offset_s8(pY1, decrIdxVec); \
pY1-=16; \
acc1 = vmladavaq(acc1, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
k = count % 0x10U; \
/* use predication to finalize MAC sum */ \
/* acc0 requires exact number of sample */ \
/* disable extra lanes in final MAC computation */ \
mve_pred16_t p0 = vctp8q(k); \
xVec = vld1q(pSrcX); pSrcX += 16; \
yVec = vldrbq_gather_offset_s8(pY, decrIdxVec); \
acc0 = vmladavaq_p(acc0, xVec, yVec, p0); \
yVec = vldrbq_gather_offset_s8(pY1, decrIdxVec); \
/* acc1 requires 1 additional sample */ \
/* so add 1 to unmask an extra lane in final MAC computation */ \
p0 = vctp8q(k+1); \
acc1 = vmladavaq_p(acc1, xVec, yVec, p0); \
\
acc0 = (acc0 >> 7); \
acc1 = (acc1 >> 7); \
acc0 = __SSAT(acc0, 8); \
acc1 = __SSAT(acc1, 8); \
}
#define MVE_INTR_CONV_SINGLE_Q7(acc, pX, pY, count) \
{ \
q7_t const *pSrcX; \
q7x16_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q7_t const *) pX; \
k = count >> 4; \
\
while (k > 0U) \
{ \
yVec = vldrbq_gather_offset_s8(pY, decrIdxVec); \
pY-=16; \
xVec = vld1q(pSrcX); pSrcX += 16; \
acc = vmladavaq(acc, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* Loop with tail predication expected here */ \
k = count % 0x10U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp8q(k); \
xVec = vld1q(pSrcX); pSrcX += 16; \
yVec = vldrbq_gather_offset_s8(pY, decrIdxVec); \
acc = vmladavaq_p(acc, xVec, yVec, p0); \
} \
acc = __SSAT(acc >> 7, 8); \
}
#define MVE_INTR_CONV_QUAD_INC_X_FIXED_SIZE_Q7(acc0, acc1, acc2, acc3, pX, pY, count) \
{ \
q7_t const *pSrcX; \
q7x16_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q7_t const *) pX; \
k = count >> 4; \
\
while (k > 0U) \
{ \
yVec = vldrbq_gather_offset_s8(pY, decrIdxVec); \
pY-=16; \
xVec = vldrbq_s8(&pSrcX[1]); \
acc1 = vmladavaq(acc1, xVec, yVec); \
xVec = vldrbq_s8(&pSrcX[2]); \
acc2 = vmladavaq(acc2, xVec, yVec); \
xVec = vldrbq_s8(&pSrcX[3]); \
acc3 = vmladavaq(acc3, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 16; \
acc0 = vmladavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* Loop with tail predication expected here */ \
k = count % 0x10U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp8q(k); \
yVec = vldrbq_gather_offset_s8(pY, decrIdxVec); \
xVec = vldrbq_s8(&pSrcX[1]); \
acc1 = vmladavaq_p(acc1, xVec, yVec, p0); \
xVec = vldrbq_s8(&pSrcX[2]); \
acc2 = vmladavaq_p(acc2, xVec, yVec, p0); \
xVec = vldrbq_s8(&pSrcX[3]); \
acc3 = vmladavaq_p(acc3, xVec, yVec, p0); \
xVec = vld1q(pSrcX); pSrcX += 16; \
acc0 = vmladavaq_p(acc0, xVec, yVec, p0); \
} \
acc0 = __SSAT(acc0 >> 7, 8); \
acc1 = __SSAT(acc1 >> 7, 8); \
acc2 = __SSAT(acc2 >> 7, 8); \
acc3 = __SSAT(acc3 >> 7, 8); \
}
#define MVE_INTR_CONV_DUAL_INC_X_FIXED_SIZE_Q7(acc0, acc1, pX, pY, count) \
{ \
q7_t const *pSrcX; \
q7x16_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q7_t const *) pX; \
k = count >> 4; \
\
while (k > 0U) \
{ \
yVec = vldrbq_gather_offset_s8(pY, decrIdxVec); \
pY-=16; \
xVec = vldrbq_s8(&pSrcX[1]); \
acc1 = vmladavaq(acc1, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 16; \
acc0 = vmladavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
/* Loop with tail predication expected here */ \
k = count % 0x10U; \
if (k > 0U) \
{ \
mve_pred16_t p0 = vctp8q(k); \
yVec = vldrbq_gather_offset_s8(pY, decrIdxVec); \
xVec = vldrbq_s8(&pSrcX[1]); \
acc1 = vmladavaq_p(acc1, xVec, yVec, p0); \
xVec = vld1q(pSrcX); pSrcX += 16; \
acc0 = vmladavaq_p(acc0, xVec, yVec, p0); \
} \
acc0 = __SSAT(acc0 >> 7, 8); \
acc1 = __SSAT(acc1 >> 7, 8); \
}
#define MVE_INTR_CONV_DUAL_INC_X_DEC_SIZE_Q7(acc0, acc1, pX, pY, count) \
{ \
q7_t const *pSrcX; \
q7x16_t xVec, yVec; \
uint32_t k; \
\
pSrcX = (q7_t const *) pX; \
k = (count-1) >> 4; \
\
while (k > 0U) \
{ \
yVec = vldrbq_gather_offset_s8(pY, decrIdxVec); \
pY-=16; \
xVec = vldrbq_s8(&pSrcX[1]); \
acc1 = vmladavaq(acc1, xVec, yVec); \
xVec = vld1q(pSrcX); pSrcX += 16; \
acc0 = vmladavaq(acc0, xVec, yVec); \
/* Decrement the loop counter */ \
k--; \
} \
k = (count - 1) % 0x10U; \
/* use predication to finalize MAC sum */ \
/* acc1 requires exact number of sample (count-1) */ \
/* disable extra lanes in final MAC computation */ \
mve_pred16_t p0 = vctp8q(k); \
yVec = vldrbq_gather_offset_s8(pY, decrIdxVec); \
xVec = vldrbq_s8(&pSrcX[1]); \
acc1 = vmladavaq_p(acc1, xVec, yVec, p0); \
/* acc0 requires 1 additional sample (count) */ \
/* so add 1 to unmask an extra lane in final MAC computation */ \
p0 = vctp8q(k+1); \
xVec = vld1q(pSrcX); pSrcX += 16; \
acc0 = vmladavaq_p(acc0, xVec, yVec, p0); \
\
acc0 = (acc0 >> 7); \
acc1 = (acc1 >> 7); \
acc0 = __SSAT(acc0, 8); \
acc1 = __SSAT(acc1, 8); \
}
#endif /* (defined(ARM_MATH_MVEI) || defined(ARM_MATH_HELIUM)) */
#ifdef __cplusplus
}
#endif
#endif /* _ARM_VEC_FILTERING_H_ */