diff options
Diffstat (limited to 'Core_A/Include')
-rw-r--r-- | Core_A/Include/cmsis_armcc.h | 17 | ||||
-rw-r--r-- | Core_A/Include/cmsis_armclang.h | 106 | ||||
-rw-r--r-- | Core_A/Include/cmsis_compiler.h | 12 | ||||
-rw-r--r-- | Core_A/Include/cmsis_gcc.h | 157 | ||||
-rw-r--r-- | Core_A/Include/cmsis_iccarm.h | 26 | ||||
-rw-r--r-- | Core_A/Include/core_ca.h | 15 |
6 files changed, 288 insertions, 45 deletions
diff --git a/Core_A/Include/cmsis_armcc.h b/Core_A/Include/cmsis_armcc.h index 313d743..ec17393 100644 --- a/Core_A/Include/cmsis_armcc.h +++ b/Core_A/Include/cmsis_armcc.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_armcc.h * @brief CMSIS compiler specific macros, functions, instructions - * @version V1.0.2 - * @date 10. January 2018 + * @version V1.0.3 + * @date 15. May 2019 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2019 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -86,6 +86,9 @@ #ifndef __PACKED #define __PACKED __attribute__((packed)) #endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __memory_changed() +#endif /* ########################## Core Instruction Access ######################### */ /** @@ -533,10 +536,10 @@ __STATIC_INLINE __ASM void __FPU_Enable(void) ENDIF //Initialise FPSCR to a known state - VMRS R2,FPSCR - LDR R3,=0x00086060 //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. - AND R2,R2,R3 - VMSR FPSCR,R2 + VMRS R1,FPSCR + LDR R2,=0x00086060 //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. + AND R1,R1,R2 + VMSR FPSCR,R1 BX LR } diff --git a/Core_A/Include/cmsis_armclang.h b/Core_A/Include/cmsis_armclang.h index 5883364..0a53c4e 100644 --- a/Core_A/Include/cmsis_armclang.h +++ b/Core_A/Include/cmsis_armclang.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_armclang.h * @brief CMSIS compiler specific macros, functions, instructions - * @version V1.0.2 - * @date 10. January 2018 + * @version V1.1.1 + * @date 15. May 2019 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2019 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -102,6 +102,9 @@ #ifndef __PACKED #define __PACKED __attribute__((packed)) #endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif /* ########################## Core Instruction Access ######################### */ /** @@ -214,7 +217,23 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) \param [in] value Value to count the leading zeros \return number of leading zeros in value */ -#define __CLZ (uint8_t)__builtin_clz +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +{ + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); +} /** \brief LDR Exclusive (8 bit) @@ -295,6 +314,68 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) */ #define __USAT __builtin_arm_usat +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + +#define __QADD8 __builtin_arm_qadd8 +#define __QSUB8 __builtin_arm_qsub8 +#define __QADD16 __builtin_arm_qadd16 +#define __SHADD16 __builtin_arm_shadd16 +#define __QSUB16 __builtin_arm_qsub16 +#define __SHSUB16 __builtin_arm_shsub16 +#define __QASX __builtin_arm_qasx +#define __SHASX __builtin_arm_shasx +#define __QSAX __builtin_arm_qsax +#define __SHSAX __builtin_arm_shsax +#define __SXTB16 __builtin_arm_sxtb16 +#define __SMUAD __builtin_arm_smuad +#define __SMUADX __builtin_arm_smuadx +#define __SMLAD __builtin_arm_smlad +#define __SMLADX __builtin_arm_smladx +#define __SMLALD __builtin_arm_smlald +#define __SMLALDX __builtin_arm_smlaldx +#define __SMUSD __builtin_arm_smusd +#define __SMUSDX __builtin_arm_smusdx +#define __SMLSDX __builtin_arm_smlsdx + + + +__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ + ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#endif /* (__ARM_FEATURE_DSP == 1) */ /* ########################### Core Function Access ########################### */ @@ -375,8 +456,8 @@ __STATIC_FORCEINLINE uint32_t __get_SP_usr() "MRS %0, cpsr \n" "CPS #0x1F \n" // no effect in USR mode "MOV %1, sp \n" - "MSR cpsr_c, %2 \n" // no effect in USR mode - "ISB" : "=r"(cpsr), "=r"(result) : "r"(cpsr) : "memory" + "MSR cpsr_c, %0 \n" // no effect in USR mode + "ISB" : "=r"(cpsr), "=r"(result) : : "memory" ); return result; } @@ -391,8 +472,8 @@ __STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack) "MRS %0, cpsr \n" "CPS #0x1F \n" // no effect in USR mode "MOV sp, %1 \n" - "MSR cpsr_c, %2 \n" // no effect in USR mode - "ISB" : "=r"(cpsr) : "r" (topOfProcStack), "r"(cpsr) : "memory" + "MSR cpsr_c, %0 \n" // no effect in USR mode + "ISB" : "=r"(cpsr) : "r" (topOfProcStack) : "memory" ); } @@ -493,10 +574,11 @@ __STATIC_INLINE void __FPU_Enable(void) #endif //Initialise FPSCR to a known state - " VMRS R2,FPSCR \n" - " LDR R3,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. - " AND R2,R2,R3 \n" - " VMSR FPSCR,R2 " + " VMRS R1,FPSCR \n" + " LDR R2,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. + " AND R1,R1,R2 \n" + " VMSR FPSCR,R1 " + : : : "cc", "r1", "r2" ); } diff --git a/Core_A/Include/cmsis_compiler.h b/Core_A/Include/cmsis_compiler.h index b00c6ba..dfd07a2 100644 --- a/Core_A/Include/cmsis_compiler.h +++ b/Core_A/Include/cmsis_compiler.h @@ -98,6 +98,10 @@ #ifndef __PACKED #define __PACKED __attribute__((packed)) #endif + #ifndef __COMPILER_BARRIER + #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored. + #define __COMPILER_BARRIER() (void)0 + #endif /* @@ -144,6 +148,10 @@ #ifndef __PACKED #define __PACKED __packed__ #endif + #ifndef __COMPILER_BARRIER + #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored. + #define __COMPILER_BARRIER() (void)0 + #endif /* @@ -190,6 +198,10 @@ #ifndef __PACKED #define __PACKED @packed #endif + #ifndef __COMPILER_BARRIER + #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored. + #define __COMPILER_BARRIER() (void)0 + #endif #else diff --git a/Core_A/Include/cmsis_gcc.h b/Core_A/Include/cmsis_gcc.h index 4f46462..0c407c2 100644 --- a/Core_A/Include/cmsis_gcc.h +++ b/Core_A/Include/cmsis_gcc.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_gcc.h * @brief CMSIS compiler specific macros, functions, instructions - * @version V1.0.2 - * @date 09. April 2018 + * @version V1.2.0 + * @date 17. May 2019 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2019 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -37,8 +37,9 @@ #endif /* CMSIS compiler specific defines */ + #ifndef __ASM - #define __ASM asm + #define __ASM __asm #endif #ifndef __INLINE #define __INLINE inline @@ -104,6 +105,123 @@ #ifndef __ALIGNED #define __ALIGNED(x) __attribute__((aligned(x))) #endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif + + +__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + + + /* ########################## Core Instruction Access ######################### */ /** @@ -171,7 +289,7 @@ __STATIC_FORCEINLINE uint32_t __REV(uint32_t value) #else uint32_t result; - __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + __ASM volatile ("rev %0, %1" : "=r" (result) : "r" (value) ); return result; #endif } @@ -204,7 +322,7 @@ __STATIC_FORCEINLINE int16_t __REVSH(int16_t value) #else int16_t result; - __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + __ASM volatile ("revsh %0, %1" : "=r" (result) : "r" (value) ); return result; #endif } @@ -267,7 +385,23 @@ __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) \param [in] value Value to count the leading zeros \return number of leading zeros in value */ -#define __CLZ (uint8_t)__builtin_clz +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +{ + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); +} /** \brief LDR Exclusive (8 bit) @@ -667,10 +801,11 @@ __STATIC_INLINE void __FPU_Enable(void) #endif //Initialise FPSCR to a known state - " VMRS R2,FPSCR \n" - " LDR R3,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. - " AND R2,R2,R3 \n" - " VMSR FPSCR,R2 " + " VMRS R1,FPSCR \n" + " LDR R2,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. + " AND R1,R1,R2 \n" + " VMSR FPSCR,R1 " + : : : "cc", "r1", "r2" ); } diff --git a/Core_A/Include/cmsis_iccarm.h b/Core_A/Include/cmsis_iccarm.h index bb0248d..7d44107 100644 --- a/Core_A/Include/cmsis_iccarm.h +++ b/Core_A/Include/cmsis_iccarm.h @@ -1,13 +1,14 @@ /**************************************************************************//** * @file cmsis_iccarm.h * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file - * @version V5.0.6 - * @date 02. March 2018 + * @version V5.0.7 + * @date 15. May 2019 ******************************************************************************/ //------------------------------------------------------------------------------ // // Copyright (c) 2017-2018 IAR Systems +// Copyright (c) 2018-2019 Arm Limited // // Licensed under the Apache License, Version 2.0 (the "License") // you may not use this file except in compliance with the License. @@ -69,6 +70,10 @@ #define __ASM __asm #endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif + #ifndef __INLINE #define __INLINE inline #endif @@ -109,7 +114,12 @@ #endif #ifndef __RESTRICT - #define __RESTRICT __restrict + #if __ICCARM_V8 + #define __RESTRICT __restrict + #else + /* Needs IAR language extensions */ + #define __RESTRICT restrict + #endif #endif #ifndef __STATIC_INLINE @@ -542,10 +552,12 @@ void __FPU_Enable(void) #endif //Initialise FPSCR to a known state - " VMRS R2,FPSCR \n" - " MOV32 R3,#0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. - " AND R2,R2,R3 \n" - " VMSR FPSCR,R2 \n"); + " VMRS R1,FPSCR \n" + " MOV32 R2,#0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. + " AND R1,R1,R2 \n" + " VMSR FPSCR,R1 \n" + : : : "cc", "r1", "r2" + ); } diff --git a/Core_A/Include/core_ca.h b/Core_A/Include/core_ca.h index dbe9794..0bef549 100644 --- a/Core_A/Include/core_ca.h +++ b/Core_A/Include/core_ca.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_ca.h * @brief CMSIS Cortex-A Core Peripheral Access Layer Header File - * @version V1.0.1 - * @date 07. May 2018 + * @version V1.0.2 + * @date 12. November 2018 ******************************************************************************/ /* - * Copyright (c) 2009-2017 ARM Limited. All rights reserved. + * Copyright (c) 2009-2018 ARM Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -28,13 +28,12 @@ #pragma clang system_header /* treat file as system include file */ #endif -#ifdef __cplusplus - extern "C" { -#endif - #ifndef __CORE_CA_H_GENERIC #define __CORE_CA_H_GENERIC +#ifdef __cplusplus + extern "C" { +#endif /******************************************************************************* * CMSIS definitions @@ -59,7 +58,7 @@ #endif #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) - #if defined __ARM_PCS_VFP + #if defined __ARM_FP #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) #define __FPU_USED 1U #else |