From 96d6da4e252b06dcfdc041e7df23e86161c33007 Mon Sep 17 00:00:00 2001 From: rihab kouki Date: Tue, 28 Jul 2020 11:24:49 +0100 Subject: Official ARM version: v5.6.0 --- Core_A/Include/cmsis_gcc.h | 157 +++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 146 insertions(+), 11 deletions(-) (limited to 'Core_A/Include/cmsis_gcc.h') diff --git a/Core_A/Include/cmsis_gcc.h b/Core_A/Include/cmsis_gcc.h index 4f46462..0c407c2 100644 --- a/Core_A/Include/cmsis_gcc.h +++ b/Core_A/Include/cmsis_gcc.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_gcc.h * @brief CMSIS compiler specific macros, functions, instructions - * @version V1.0.2 - * @date 09. April 2018 + * @version V1.2.0 + * @date 17. May 2019 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2019 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -37,8 +37,9 @@ #endif /* CMSIS compiler specific defines */ + #ifndef __ASM - #define __ASM asm + #define __ASM __asm #endif #ifndef __INLINE #define __INLINE inline @@ -104,6 +105,123 @@ #ifndef __ALIGNED #define __ALIGNED(x) __attribute__((aligned(x))) #endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif + + +__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + + + /* ########################## Core Instruction Access ######################### */ /** @@ -171,7 +289,7 @@ __STATIC_FORCEINLINE uint32_t __REV(uint32_t value) #else uint32_t result; - __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + __ASM volatile ("rev %0, %1" : "=r" (result) : "r" (value) ); return result; #endif } @@ -204,7 +322,7 @@ __STATIC_FORCEINLINE int16_t __REVSH(int16_t value) #else int16_t result; - __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + __ASM volatile ("revsh %0, %1" : "=r" (result) : "r" (value) ); return result; #endif } @@ -267,7 +385,23 @@ __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) \param [in] value Value to count the leading zeros \return number of leading zeros in value */ -#define __CLZ (uint8_t)__builtin_clz +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +{ + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); +} /** \brief LDR Exclusive (8 bit) @@ -667,10 +801,11 @@ __STATIC_INLINE void __FPU_Enable(void) #endif //Initialise FPSCR to a known state - " VMRS R2,FPSCR \n" - " LDR R3,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. - " AND R2,R2,R3 \n" - " VMSR FPSCR,R2 " + " VMRS R1,FPSCR \n" + " LDR R2,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. + " AND R1,R1,R2 \n" + " VMSR FPSCR,R1 " + : : : "cc", "r1", "r2" ); } -- cgit