diff options
author | rihab kouki <rihab.kouki@st.com> | 2020-07-28 11:24:49 +0100 |
---|---|---|
committer | rihab kouki <rihab.kouki@st.com> | 2020-07-28 11:24:49 +0100 |
commit | 96d6da4e252b06dcfdc041e7df23e86161c33007 (patch) | |
tree | a262f59bb1db7ec7819acae435f5049cbe5e2354 /Core_A/Include/cmsis_armclang.h | |
parent | 9f95ff5b6ba01db09552b84a0ab79607060a2666 (diff) | |
download | st-cmsis-core-lowfat-master.tar.gz st-cmsis-core-lowfat-master.tar.bz2 st-cmsis-core-lowfat-master.zip |
Diffstat (limited to 'Core_A/Include/cmsis_armclang.h')
-rw-r--r-- | Core_A/Include/cmsis_armclang.h | 106 |
1 files changed, 94 insertions, 12 deletions
diff --git a/Core_A/Include/cmsis_armclang.h b/Core_A/Include/cmsis_armclang.h index 5883364..0a53c4e 100644 --- a/Core_A/Include/cmsis_armclang.h +++ b/Core_A/Include/cmsis_armclang.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_armclang.h * @brief CMSIS compiler specific macros, functions, instructions - * @version V1.0.2 - * @date 10. January 2018 + * @version V1.1.1 + * @date 15. May 2019 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2019 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -102,6 +102,9 @@ #ifndef __PACKED #define __PACKED __attribute__((packed)) #endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif /* ########################## Core Instruction Access ######################### */ /** @@ -214,7 +217,23 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) \param [in] value Value to count the leading zeros \return number of leading zeros in value */ -#define __CLZ (uint8_t)__builtin_clz +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +{ + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); +} /** \brief LDR Exclusive (8 bit) @@ -295,6 +314,68 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) */ #define __USAT __builtin_arm_usat +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + +#define __QADD8 __builtin_arm_qadd8 +#define __QSUB8 __builtin_arm_qsub8 +#define __QADD16 __builtin_arm_qadd16 +#define __SHADD16 __builtin_arm_shadd16 +#define __QSUB16 __builtin_arm_qsub16 +#define __SHSUB16 __builtin_arm_shsub16 +#define __QASX __builtin_arm_qasx +#define __SHASX __builtin_arm_shasx +#define __QSAX __builtin_arm_qsax +#define __SHSAX __builtin_arm_shsax +#define __SXTB16 __builtin_arm_sxtb16 +#define __SMUAD __builtin_arm_smuad +#define __SMUADX __builtin_arm_smuadx +#define __SMLAD __builtin_arm_smlad +#define __SMLADX __builtin_arm_smladx +#define __SMLALD __builtin_arm_smlald +#define __SMLALDX __builtin_arm_smlaldx +#define __SMUSD __builtin_arm_smusd +#define __SMUSDX __builtin_arm_smusdx +#define __SMLSDX __builtin_arm_smlsdx + + + +__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ + ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#endif /* (__ARM_FEATURE_DSP == 1) */ /* ########################### Core Function Access ########################### */ @@ -375,8 +456,8 @@ __STATIC_FORCEINLINE uint32_t __get_SP_usr() "MRS %0, cpsr \n" "CPS #0x1F \n" // no effect in USR mode "MOV %1, sp \n" - "MSR cpsr_c, %2 \n" // no effect in USR mode - "ISB" : "=r"(cpsr), "=r"(result) : "r"(cpsr) : "memory" + "MSR cpsr_c, %0 \n" // no effect in USR mode + "ISB" : "=r"(cpsr), "=r"(result) : : "memory" ); return result; } @@ -391,8 +472,8 @@ __STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack) "MRS %0, cpsr \n" "CPS #0x1F \n" // no effect in USR mode "MOV sp, %1 \n" - "MSR cpsr_c, %2 \n" // no effect in USR mode - "ISB" : "=r"(cpsr) : "r" (topOfProcStack), "r"(cpsr) : "memory" + "MSR cpsr_c, %0 \n" // no effect in USR mode + "ISB" : "=r"(cpsr) : "r" (topOfProcStack) : "memory" ); } @@ -493,10 +574,11 @@ __STATIC_INLINE void __FPU_Enable(void) #endif //Initialise FPSCR to a known state - " VMRS R2,FPSCR \n" - " LDR R3,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. - " AND R2,R2,R3 \n" - " VMSR FPSCR,R2 " + " VMRS R1,FPSCR \n" + " LDR R2,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. + " AND R1,R1,R2 \n" + " VMSR FPSCR,R1 " + : : : "cc", "r1", "r2" ); } |