From f7de54fc6fa6b40dfa2dfbe4c2a8ee933affa126 Mon Sep 17 00:00:00 2001 From: JanHenrik Date: Wed, 1 Apr 2020 00:40:03 +0200 Subject: added files --- .../Drivers/CMSIS/Core_A/Include/cmsis_armcc.h | 544 ++++ .../Drivers/CMSIS/Core_A/Include/cmsis_armclang.h | 503 ++++ .../Drivers/CMSIS/Core_A/Include/cmsis_compiler.h | 201 ++ .../Drivers/CMSIS/Core_A/Include/cmsis_cp15.h | 514 ++++ .../Drivers/CMSIS/Core_A/Include/cmsis_gcc.h | 679 +++++ .../Drivers/CMSIS/Core_A/Include/cmsis_iccarm.h | 559 +++++ midi-dials/Drivers/CMSIS/Core_A/Include/core_ca.h | 2614 ++++++++++++++++++++ midi-dials/Drivers/CMSIS/Core_A/Include/irq_ctrl.h | 186 ++ .../Drivers/CMSIS/Core_A/Source/irq_ctrl_gic.c | 410 +++ 9 files changed, 6210 insertions(+) create mode 100644 midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_armcc.h create mode 100644 midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_armclang.h create mode 100644 midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_compiler.h create mode 100644 midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_cp15.h create mode 100644 midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_gcc.h create mode 100644 midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_iccarm.h create mode 100644 midi-dials/Drivers/CMSIS/Core_A/Include/core_ca.h create mode 100644 midi-dials/Drivers/CMSIS/Core_A/Include/irq_ctrl.h create mode 100644 midi-dials/Drivers/CMSIS/Core_A/Source/irq_ctrl_gic.c (limited to 'midi-dials/Drivers/CMSIS/Core_A') diff --git a/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_armcc.h b/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_armcc.h new file mode 100644 index 0000000..b2ccb1f --- /dev/null +++ b/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_armcc.h @@ -0,0 +1,544 @@ +/**************************************************************************//** + * @file cmsis_armcc.h + * @brief CMSIS compiler specific macros, functions, instructions + * @version V1.0.2 + * @date 10. January 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_ARMCC_H +#define __CMSIS_ARMCC_H + +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 400677) + #error "Please use Arm Compiler Toolchain V4.0.677 or later!" +#endif + +/* CMSIS compiler control architecture macros */ +#if (defined (__TARGET_ARCH_7_A ) && (__TARGET_ARCH_7_A == 1)) + #define __ARM_ARCH_7A__ 1 +#endif + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE __inline +#endif +#ifndef __FORCEINLINE + #define __FORCEINLINE __forceinline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static __inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE static __forceinline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __declspec(noreturn) +#endif +#ifndef CMSIS_DEPRECATED + #define CMSIS_DEPRECATED __attribute__((deprecated)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed)) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT __packed struct +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #define __UNALIGNED_UINT16_WRITE(addr, val) ((*((__packed uint16_t *)(addr))) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #define __UNALIGNED_UINT16_READ(addr) (*((const __packed uint16_t *)(addr))) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #define __UNALIGNED_UINT32_WRITE(addr, val) ((*((__packed uint32_t *)(addr))) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #define __UNALIGNED_UINT32_READ(addr) (*((const __packed uint32_t *)(addr))) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed)) +#endif + +/* ########################## Core Instruction Access ######################### */ +/** + \brief No Operation + */ +#define __NOP __nop + +/** + \brief Wait For Interrupt + */ +#define __WFI __wfi + +/** + \brief Wait For Event + */ +#define __WFE __wfe + +/** + \brief Send Event + */ +#define __SEV __sev + +/** + \brief Instruction Synchronization Barrier + */ +#define __ISB() do {\ + __schedule_barrier();\ + __isb(0xF);\ + __schedule_barrier();\ + } while (0U) + +/** + \brief Data Synchronization Barrier + */ +#define __DSB() do {\ + __schedule_barrier();\ + __dsb(0xF);\ + __schedule_barrier();\ + } while (0U) + +/** + \brief Data Memory Barrier + */ +#define __DMB() do {\ + __schedule_barrier();\ + __dmb(0xF);\ + __schedule_barrier();\ + } while (0U) + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV __rev + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value) +{ + rev16 r0, r0 + bx lr +} +#endif + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int16_t __REVSH(int16_t value) +{ + revsh r0, r0 + bx lr +} +#endif + +/** + \brief Rotate Right in unsigned value (32 bit) + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +#define __ROR __ror + +/** + \brief Breakpoint + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __breakpoint(value) + +/** + \brief Reverse bit order of value + \param [in] value Value to reverse + \return Reversed value + */ +#define __RBIT __rbit + +/** + \brief Count leading zeros + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +#define __CLZ __clz + +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXB(ptr) ((uint8_t ) __ldrex(ptr)) +#else + #define __LDREXB(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint8_t ) __ldrex(ptr)) _Pragma("pop") +#endif + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXH(ptr) ((uint16_t) __ldrex(ptr)) +#else + #define __LDREXH(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint16_t) __ldrex(ptr)) _Pragma("pop") +#endif + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXW(ptr) ((uint32_t ) __ldrex(ptr)) +#else + #define __LDREXW(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint32_t ) __ldrex(ptr)) _Pragma("pop") +#endif + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXB(value, ptr) __strex(value, ptr) +#else + #define __STREXB(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXH(value, ptr) __strex(value, ptr) +#else + #define __STREXH(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXW(value, ptr) __strex(value, ptr) +#else + #define __STREXW(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __clrex + + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT __ssat + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT __usat + +/* ########################### Core Function Access ########################### */ + +/** + \brief Get FPSCR (Floating Point Status/Control) + \return Floating Point Status/Control register value + */ +__STATIC_INLINE uint32_t __get_FPSCR(void) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + register uint32_t __regfpscr __ASM("fpscr"); + return(__regfpscr); +#else + return(0U); +#endif +} + +/** + \brief Set FPSCR (Floating Point Status/Control) + \param [in] fpscr Floating Point Status/Control value to set + */ +__STATIC_INLINE void __set_FPSCR(uint32_t fpscr) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + register uint32_t __regfpscr __ASM("fpscr"); + __regfpscr = (fpscr); +#else + (void)fpscr; +#endif +} + +/** \brief Get CPSR (Current Program Status Register) + \return CPSR Register value + */ +__STATIC_INLINE uint32_t __get_CPSR(void) +{ + register uint32_t __regCPSR __ASM("cpsr"); + return(__regCPSR); +} + + +/** \brief Set CPSR (Current Program Status Register) + \param [in] cpsr CPSR value to set + */ +__STATIC_INLINE void __set_CPSR(uint32_t cpsr) +{ + register uint32_t __regCPSR __ASM("cpsr"); + __regCPSR = cpsr; +} + +/** \brief Get Mode + \return Processor Mode + */ +__STATIC_INLINE uint32_t __get_mode(void) +{ + return (__get_CPSR() & 0x1FU); +} + +/** \brief Set Mode + \param [in] mode Mode value to set + */ +__STATIC_INLINE __ASM void __set_mode(uint32_t mode) +{ + MOV r1, lr + MSR CPSR_C, r0 + BX r1 +} + +/** \brief Get Stack Pointer + \return Stack Pointer + */ +__STATIC_INLINE __ASM uint32_t __get_SP(void) +{ + MOV r0, sp + BX lr +} + +/** \brief Set Stack Pointer + \param [in] stack Stack Pointer value to set + */ +__STATIC_INLINE __ASM void __set_SP(uint32_t stack) +{ + MOV sp, r0 + BX lr +} + + +/** \brief Get USR/SYS Stack Pointer + \return USR/SYSStack Pointer + */ +__STATIC_INLINE __ASM uint32_t __get_SP_usr(void) +{ + ARM + PRESERVE8 + + MRS R1, CPSR + CPS #0x1F ;no effect in USR mode + MOV R0, SP + MSR CPSR_c, R1 ;no effect in USR mode + ISB + BX LR +} + +/** \brief Set USR/SYS Stack Pointer + \param [in] topOfProcStack USR/SYS Stack Pointer value to set + */ +__STATIC_INLINE __ASM void __set_SP_usr(uint32_t topOfProcStack) +{ + ARM + PRESERVE8 + + MRS R1, CPSR + CPS #0x1F ;no effect in USR mode + MOV SP, R0 + MSR CPSR_c, R1 ;no effect in USR mode + ISB + BX LR +} + +/** \brief Get FPEXC (Floating Point Exception Control Register) + \return Floating Point Exception Control Register value + */ +__STATIC_INLINE uint32_t __get_FPEXC(void) +{ +#if (__FPU_PRESENT == 1) + register uint32_t __regfpexc __ASM("fpexc"); + return(__regfpexc); +#else + return(0); +#endif +} + +/** \brief Set FPEXC (Floating Point Exception Control Register) + \param [in] fpexc Floating Point Exception Control value to set + */ +__STATIC_INLINE void __set_FPEXC(uint32_t fpexc) +{ +#if (__FPU_PRESENT == 1) + register uint32_t __regfpexc __ASM("fpexc"); + __regfpexc = (fpexc); +#endif +} + +/* + * Include common core functions to access Coprocessor 15 registers + */ + +#define __get_CP(cp, op1, Rt, CRn, CRm, op2) do { register volatile uint32_t tmp __ASM("cp" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2); (Rt) = tmp; } while(0) +#define __set_CP(cp, op1, Rt, CRn, CRm, op2) do { register volatile uint32_t tmp __ASM("cp" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2); tmp = (Rt); } while(0) +#define __get_CP64(cp, op1, Rt, CRm) \ + do { \ + uint32_t ltmp, htmp; \ + __ASM volatile("MRRC p" # cp ", " # op1 ", ltmp, htmp, c" # CRm); \ + (Rt) = ((((uint64_t)htmp) << 32U) | ((uint64_t)ltmp)); \ + } while(0) + +#define __set_CP64(cp, op1, Rt, CRm) \ + do { \ + const uint64_t tmp = (Rt); \ + const uint32_t ltmp = (uint32_t)(tmp); \ + const uint32_t htmp = (uint32_t)(tmp >> 32U); \ + __ASM volatile("MCRR p" # cp ", " # op1 ", ltmp, htmp, c" # CRm); \ + } while(0) + +#include "cmsis_cp15.h" + +/** \brief Enable Floating Point Unit + + Critical section, called from undef handler, so systick is disabled + */ +__STATIC_INLINE __ASM void __FPU_Enable(void) +{ + ARM + + //Permit access to VFP/NEON, registers by modifying CPACR + MRC p15,0,R1,c1,c0,2 + ORR R1,R1,#0x00F00000 + MCR p15,0,R1,c1,c0,2 + + //Ensure that subsequent instructions occur in the context of VFP/NEON access permitted + ISB + + //Enable VFP/NEON + VMRS R1,FPEXC + ORR R1,R1,#0x40000000 + VMSR FPEXC,R1 + + //Initialise VFP/NEON registers to 0 + MOV R2,#0 + + //Initialise D16 registers to 0 + VMOV D0, R2,R2 + VMOV D1, R2,R2 + VMOV D2, R2,R2 + VMOV D3, R2,R2 + VMOV D4, R2,R2 + VMOV D5, R2,R2 + VMOV D6, R2,R2 + VMOV D7, R2,R2 + VMOV D8, R2,R2 + VMOV D9, R2,R2 + VMOV D10,R2,R2 + VMOV D11,R2,R2 + VMOV D12,R2,R2 + VMOV D13,R2,R2 + VMOV D14,R2,R2 + VMOV D15,R2,R2 + + IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32 + //Initialise D32 registers to 0 + VMOV D16,R2,R2 + VMOV D17,R2,R2 + VMOV D18,R2,R2 + VMOV D19,R2,R2 + VMOV D20,R2,R2 + VMOV D21,R2,R2 + VMOV D22,R2,R2 + VMOV D23,R2,R2 + VMOV D24,R2,R2 + VMOV D25,R2,R2 + VMOV D26,R2,R2 + VMOV D27,R2,R2 + VMOV D28,R2,R2 + VMOV D29,R2,R2 + VMOV D30,R2,R2 + VMOV D31,R2,R2 + ENDIF + + //Initialise FPSCR to a known state + VMRS R2,FPSCR + LDR R3,=0x00086060 //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. + AND R2,R2,R3 + VMSR FPSCR,R2 + + BX LR +} + +#endif /* __CMSIS_ARMCC_H */ diff --git a/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_armclang.h b/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_armclang.h new file mode 100644 index 0000000..e0de5a4 --- /dev/null +++ b/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_armclang.h @@ -0,0 +1,503 @@ +/**************************************************************************//** + * @file cmsis_armclang.h + * @brief CMSIS compiler specific macros, functions, instructions + * @version V1.0.2 + * @date 10. January 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_ARMCLANG_H +#define __CMSIS_ARMCLANG_H + +#pragma clang system_header /* treat file as system include file */ + +#ifndef __ARM_COMPAT_H +#include /* Compatibility header for Arm Compiler 5 intrinsics */ +#endif + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE __inline +#endif +#ifndef __FORCEINLINE + #define __FORCEINLINE __attribute__((always_inline)) +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static __inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef CMSIS_DEPRECATED + #define CMSIS_DEPRECATED __attribute__((deprecated)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */ + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */ + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed)) +#endif + +/* ########################## Core Instruction Access ######################### */ +/** + \brief No Operation + */ +#define __NOP __builtin_arm_nop + +/** + \brief Wait For Interrupt + */ +#define __WFI __builtin_arm_wfi + +/** + \brief Wait For Event + */ +#define __WFE __builtin_arm_wfe + +/** + \brief Send Event + */ +#define __SEV __builtin_arm_sev + +/** + \brief Instruction Synchronization Barrier + */ +#define __ISB() do {\ + __schedule_barrier();\ + __builtin_arm_isb(0xF);\ + __schedule_barrier();\ + } while (0U) + +/** + \brief Data Synchronization Barrier + */ +#define __DSB() do {\ + __schedule_barrier();\ + __builtin_arm_dsb(0xF);\ + __schedule_barrier();\ + } while (0U) + +/** + \brief Data Memory Barrier + */ +#define __DMB() do {\ + __schedule_barrier();\ + __builtin_arm_dmb(0xF);\ + __schedule_barrier();\ + } while (0U) + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV(value) __builtin_bswap32(value) + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV16(value) __ROR(__REV(value), 16) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REVSH(value) (int16_t)__builtin_bswap16(value) + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +{ + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); +} + + +/** + \brief Breakpoint + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + +/** + \brief Reverse bit order of value + \param [in] value Value to reverse + \return Reversed value + */ +#define __RBIT __builtin_arm_rbit + +/** + \brief Count leading zeros + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +#define __CLZ (uint8_t)__builtin_clz + +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDREXB (uint8_t)__builtin_arm_ldrex + + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDREXH (uint16_t)__builtin_arm_ldrex + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDREXW (uint32_t)__builtin_arm_ldrex + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXB (uint32_t)__builtin_arm_strex + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXH (uint32_t)__builtin_arm_strex + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXW (uint32_t)__builtin_arm_strex + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __builtin_arm_clrex + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT __builtin_arm_ssat + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT __builtin_arm_usat + + +/* ########################### Core Function Access ########################### */ + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +#define __get_FPSCR __builtin_arm_get_fpscr + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +#define __set_FPSCR __builtin_arm_set_fpscr + +/** \brief Get CPSR Register + \return CPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CPSR(void) +{ + uint32_t result; + __ASM volatile("MRS %0, cpsr" : "=r" (result) ); + return(result); +} + +/** \brief Set CPSR Register + \param [in] cpsr CPSR value to set + */ +__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr) +{ +__ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "memory"); +} + +/** \brief Get Mode + \return Processor Mode + */ +__STATIC_FORCEINLINE uint32_t __get_mode(void) +{ + return (__get_CPSR() & 0x1FU); +} + +/** \brief Set Mode + \param [in] mode Mode value to set + */ +__STATIC_FORCEINLINE void __set_mode(uint32_t mode) +{ + __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory"); +} + +/** \brief Get Stack Pointer + \return Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP() +{ + uint32_t result; + __ASM volatile("MOV %0, sp" : "=r" (result) : : "memory"); + return result; +} + +/** \brief Set Stack Pointer + \param [in] stack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP(uint32_t stack) +{ + __ASM volatile("MOV sp, %0" : : "r" (stack) : "memory"); +} + +/** \brief Get USR/SYS Stack Pointer + \return USR/SYS Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP_usr() +{ + uint32_t cpsr; + uint32_t result; + __ASM volatile( + "MRS %0, cpsr \n" + "CPS #0x1F \n" // no effect in USR mode + "MOV %1, sp \n" + "MSR cpsr_c, %2 \n" // no effect in USR mode + "ISB" : "=r"(cpsr), "=r"(result) : "r"(cpsr) : "memory" + ); + return result; +} + +/** \brief Set USR/SYS Stack Pointer + \param [in] topOfProcStack USR/SYS Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack) +{ + uint32_t cpsr; + __ASM volatile( + "MRS %0, cpsr \n" + "CPS #0x1F \n" // no effect in USR mode + "MOV sp, %1 \n" + "MSR cpsr_c, %2 \n" // no effect in USR mode + "ISB" : "=r"(cpsr) : "r" (topOfProcStack), "r"(cpsr) : "memory" + ); +} + +/** \brief Get FPEXC + \return Floating Point Exception Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPEXC(void) +{ +#if (__FPU_PRESENT == 1) + uint32_t result; + __ASM volatile("VMRS %0, fpexc" : "=r" (result) : : "memory"); + return(result); +#else + return(0); +#endif +} + +/** \brief Set FPEXC + \param [in] fpexc Floating Point Exception Control value to set + */ +__STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc) +{ +#if (__FPU_PRESENT == 1) + __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory"); +#endif +} + +/* + * Include common core functions to access Coprocessor 15 registers + */ + +#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" ) +#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" ) +#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" ) +#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) + +#include "cmsis_cp15.h" + +/** \brief Enable Floating Point Unit + + Critical section, called from undef handler, so systick is disabled + */ +__STATIC_INLINE void __FPU_Enable(void) +{ + __ASM volatile( + //Permit access to VFP/NEON, registers by modifying CPACR + " MRC p15,0,R1,c1,c0,2 \n" + " ORR R1,R1,#0x00F00000 \n" + " MCR p15,0,R1,c1,c0,2 \n" + + //Ensure that subsequent instructions occur in the context of VFP/NEON access permitted + " ISB \n" + + //Enable VFP/NEON + " VMRS R1,FPEXC \n" + " ORR R1,R1,#0x40000000 \n" + " VMSR FPEXC,R1 \n" + + //Initialise VFP/NEON registers to 0 + " MOV R2,#0 \n" + + //Initialise D16 registers to 0 + " VMOV D0, R2,R2 \n" + " VMOV D1, R2,R2 \n" + " VMOV D2, R2,R2 \n" + " VMOV D3, R2,R2 \n" + " VMOV D4, R2,R2 \n" + " VMOV D5, R2,R2 \n" + " VMOV D6, R2,R2 \n" + " VMOV D7, R2,R2 \n" + " VMOV D8, R2,R2 \n" + " VMOV D9, R2,R2 \n" + " VMOV D10,R2,R2 \n" + " VMOV D11,R2,R2 \n" + " VMOV D12,R2,R2 \n" + " VMOV D13,R2,R2 \n" + " VMOV D14,R2,R2 \n" + " VMOV D15,R2,R2 \n" + +#if __ARM_NEON == 1 + //Initialise D32 registers to 0 + " VMOV D16,R2,R2 \n" + " VMOV D17,R2,R2 \n" + " VMOV D18,R2,R2 \n" + " VMOV D19,R2,R2 \n" + " VMOV D20,R2,R2 \n" + " VMOV D21,R2,R2 \n" + " VMOV D22,R2,R2 \n" + " VMOV D23,R2,R2 \n" + " VMOV D24,R2,R2 \n" + " VMOV D25,R2,R2 \n" + " VMOV D26,R2,R2 \n" + " VMOV D27,R2,R2 \n" + " VMOV D28,R2,R2 \n" + " VMOV D29,R2,R2 \n" + " VMOV D30,R2,R2 \n" + " VMOV D31,R2,R2 \n" +#endif + + //Initialise FPSCR to a known state + " VMRS R2,FPSCR \n" + " LDR R3,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. + " AND R2,R2,R3 \n" + " VMSR FPSCR,R2 " + ); +} + +#endif /* __CMSIS_ARMCLANG_H */ diff --git a/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_compiler.h b/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_compiler.h new file mode 100644 index 0000000..cd14cbc --- /dev/null +++ b/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_compiler.h @@ -0,0 +1,201 @@ +/**************************************************************************//** + * @file cmsis_compiler.h + * @brief CMSIS compiler specific macros, functions, instructions + * @version V1.0.2 + * @date 10. January 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_COMPILER_H +#define __CMSIS_COMPILER_H + +#include + +/* + * Arm Compiler 4/5 + */ +#if defined ( __CC_ARM ) + #include "cmsis_armcc.h" + + +/* + * Arm Compiler 6 (armclang) + */ +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #include "cmsis_armclang.h" + + +/* + * GNU Compiler + */ +#elif defined ( __GNUC__ ) + #include "cmsis_gcc.h" + + +/* + * IAR Compiler + */ +#elif defined ( __ICCARM__ ) + #include "cmsis_iccarm.h" + + +/* + * TI Arm Compiler + */ +#elif defined ( __TI_ARM__ ) + #include + + #ifndef __ASM + #define __ASM __asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __STATIC_INLINE + #endif + #ifndef __NO_RETURN + #define __NO_RETURN __attribute__((noreturn)) + #endif + #ifndef CMSIS_DEPRECATED + #define CMSIS_DEPRECATED __attribute__((deprecated)) + #endif + #ifndef __USED + #define __USED __attribute__((used)) + #endif + #ifndef __WEAK + #define __WEAK __attribute__((weak)) + #endif + #ifndef __UNALIGNED_UINT32 + struct __attribute__((packed)) T_UINT32 { uint32_t v; }; + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) + #endif + #ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) + #endif + #ifndef __PACKED + #define __PACKED __attribute__((packed)) + #endif + + +/* + * TASKING Compiler + */ +#elif defined ( __TASKING__ ) + /* + * The CMSIS functions have been implemented as intrinsics in the compiler. + * Please use "carm -?i" to get an up to date list of all intrinsics, + * Including the CMSIS ones. + */ + + #ifndef __ASM + #define __ASM __asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __STATIC_INLINE + #endif + #ifndef __NO_RETURN + #define __NO_RETURN __attribute__((noreturn)) + #endif + #ifndef CMSIS_DEPRECATED + #define CMSIS_DEPRECATED __attribute__((deprecated)) + #endif + #ifndef __USED + #define __USED __attribute__((used)) + #endif + #ifndef __WEAK + #define __WEAK __attribute__((weak)) + #endif + #ifndef __UNALIGNED_UINT32 + struct __packed__ T_UINT32 { uint32_t v; }; + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) + #endif + #ifndef __ALIGNED + #define __ALIGNED(x) __align(x) + #endif + #ifndef __PACKED + #define __PACKED __packed__ + #endif + + +/* + * COSMIC Compiler + */ +#elif defined ( __CSMC__ ) + #include + + #ifndef __ASM + #define __ASM _asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __STATIC_INLINE + #endif + #ifndef __NO_RETURN + // NO RETURN is automatically detected hence no warning here + #define __NO_RETURN + #endif + #ifndef __USED + #warning No compiler specific solution for __USED. __USED is ignored. + #define __USED + #endif + #ifndef CMSIS_DEPRECATED + #warning No compiler specific solution for CMSIS_DEPRECATED. CMSIS_DEPRECATED is ignored. + #define CMSIS_DEPRECATED + #endif + #ifndef __WEAK + #define __WEAK __weak + #endif + #ifndef __UNALIGNED_UINT32 + @packed struct T_UINT32 { uint32_t v; }; + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) + #endif + #ifndef __ALIGNED + #warning No compiler specific solution for __ALIGNED. __ALIGNED is ignored. + #define __ALIGNED(x) + #endif + #ifndef __PACKED + #define __PACKED @packed + #endif + + +#else + #error Unknown compiler. +#endif + + +#endif /* __CMSIS_COMPILER_H */ + diff --git a/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_cp15.h b/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_cp15.h new file mode 100644 index 0000000..75e57b8 --- /dev/null +++ b/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_cp15.h @@ -0,0 +1,514 @@ +/**************************************************************************//** + * @file cmsis_cp15.h + * @brief CMSIS compiler specific macros, functions, instructions + * @version V1.0.1 + * @date 07. Sep 2017 + ******************************************************************************/ +/* + * Copyright (c) 2009-2017 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CMSIS_CP15_H +#define __CMSIS_CP15_H + +/** \brief Get ACTLR + \return Auxiliary Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_ACTLR(void) +{ + uint32_t result; + __get_CP(15, 0, result, 1, 0, 1); + return(result); +} + +/** \brief Set ACTLR + \param [in] actlr Auxiliary Control value to set + */ +__STATIC_FORCEINLINE void __set_ACTLR(uint32_t actlr) +{ + __set_CP(15, 0, actlr, 1, 0, 1); +} + +/** \brief Get CPACR + \return Coprocessor Access Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_CPACR(void) +{ + uint32_t result; + __get_CP(15, 0, result, 1, 0, 2); + return result; +} + +/** \brief Set CPACR + \param [in] cpacr Coprocessor Access Control value to set + */ +__STATIC_FORCEINLINE void __set_CPACR(uint32_t cpacr) +{ + __set_CP(15, 0, cpacr, 1, 0, 2); +} + +/** \brief Get DFSR + \return Data Fault Status Register value + */ +__STATIC_FORCEINLINE uint32_t __get_DFSR(void) +{ + uint32_t result; + __get_CP(15, 0, result, 5, 0, 0); + return result; +} + +/** \brief Set DFSR + \param [in] dfsr Data Fault Status value to set + */ +__STATIC_FORCEINLINE void __set_DFSR(uint32_t dfsr) +{ + __set_CP(15, 0, dfsr, 5, 0, 0); +} + +/** \brief Get IFSR + \return Instruction Fault Status Register value + */ +__STATIC_FORCEINLINE uint32_t __get_IFSR(void) +{ + uint32_t result; + __get_CP(15, 0, result, 5, 0, 1); + return result; +} + +/** \brief Set IFSR + \param [in] ifsr Instruction Fault Status value to set + */ +__STATIC_FORCEINLINE void __set_IFSR(uint32_t ifsr) +{ + __set_CP(15, 0, ifsr, 5, 0, 1); +} + +/** \brief Get ISR + \return Interrupt Status Register value + */ +__STATIC_FORCEINLINE uint32_t __get_ISR(void) +{ + uint32_t result; + __get_CP(15, 0, result, 12, 1, 0); + return result; +} + +/** \brief Get CBAR + \return Configuration Base Address register value + */ +__STATIC_FORCEINLINE uint32_t __get_CBAR(void) +{ + uint32_t result; + __get_CP(15, 4, result, 15, 0, 0); + return result; +} + +/** \brief Get TTBR0 + + This function returns the value of the Translation Table Base Register 0. + + \return Translation Table Base Register 0 value + */ +__STATIC_FORCEINLINE uint32_t __get_TTBR0(void) +{ + uint32_t result; + __get_CP(15, 0, result, 2, 0, 0); + return result; +} + +/** \brief Set TTBR0 + + This function assigns the given value to the Translation Table Base Register 0. + + \param [in] ttbr0 Translation Table Base Register 0 value to set + */ +__STATIC_FORCEINLINE void __set_TTBR0(uint32_t ttbr0) +{ + __set_CP(15, 0, ttbr0, 2, 0, 0); +} + +/** \brief Get DACR + + This function returns the value of the Domain Access Control Register. + + \return Domain Access Control Register value + */ +__STATIC_FORCEINLINE uint32_t __get_DACR(void) +{ + uint32_t result; + __get_CP(15, 0, result, 3, 0, 0); + return result; +} + +/** \brief Set DACR + + This function assigns the given value to the Domain Access Control Register. + + \param [in] dacr Domain Access Control Register value to set + */ +__STATIC_FORCEINLINE void __set_DACR(uint32_t dacr) +{ + __set_CP(15, 0, dacr, 3, 0, 0); +} + +/** \brief Set SCTLR + + This function assigns the given value to the System Control Register. + + \param [in] sctlr System Control Register value to set + */ +__STATIC_FORCEINLINE void __set_SCTLR(uint32_t sctlr) +{ + __set_CP(15, 0, sctlr, 1, 0, 0); +} + +/** \brief Get SCTLR + \return System Control Register value + */ +__STATIC_FORCEINLINE uint32_t __get_SCTLR(void) +{ + uint32_t result; + __get_CP(15, 0, result, 1, 0, 0); + return result; +} + +/** \brief Set ACTRL + \param [in] actrl Auxiliary Control Register value to set + */ +__STATIC_FORCEINLINE void __set_ACTRL(uint32_t actrl) +{ + __set_CP(15, 0, actrl, 1, 0, 1); +} + +/** \brief Get ACTRL + \return Auxiliary Control Register value + */ +__STATIC_FORCEINLINE uint32_t __get_ACTRL(void) +{ + uint32_t result; + __get_CP(15, 0, result, 1, 0, 1); + return result; +} + +/** \brief Get MPIDR + + This function returns the value of the Multiprocessor Affinity Register. + + \return Multiprocessor Affinity Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MPIDR(void) +{ + uint32_t result; + __get_CP(15, 0, result, 0, 0, 5); + return result; +} + +/** \brief Get VBAR + + This function returns the value of the Vector Base Address Register. + + \return Vector Base Address Register + */ +__STATIC_FORCEINLINE uint32_t __get_VBAR(void) +{ + uint32_t result; + __get_CP(15, 0, result, 12, 0, 0); + return result; +} + +/** \brief Set VBAR + + This function assigns the given value to the Vector Base Address Register. + + \param [in] vbar Vector Base Address Register value to set + */ +__STATIC_FORCEINLINE void __set_VBAR(uint32_t vbar) +{ + __set_CP(15, 0, vbar, 12, 0, 0); +} + +/** \brief Get MVBAR + + This function returns the value of the Monitor Vector Base Address Register. + + \return Monitor Vector Base Address Register + */ +__STATIC_FORCEINLINE uint32_t __get_MVBAR(void) +{ + uint32_t result; + __get_CP(15, 0, result, 12, 0, 1); + return result; +} + +/** \brief Set MVBAR + + This function assigns the given value to the Monitor Vector Base Address Register. + + \param [in] mvbar Monitor Vector Base Address Register value to set + */ +__STATIC_FORCEINLINE void __set_MVBAR(uint32_t mvbar) +{ + __set_CP(15, 0, mvbar, 12, 0, 1); +} + +#if (defined(__CORTEX_A) && (__CORTEX_A == 7U) && \ + defined(__TIM_PRESENT) && (__TIM_PRESENT == 1U)) || \ + defined(DOXYGEN) + +/** \brief Set CNTFRQ + + This function assigns the given value to PL1 Physical Timer Counter Frequency Register (CNTFRQ). + + \param [in] value CNTFRQ Register value to set +*/ +__STATIC_FORCEINLINE void __set_CNTFRQ(uint32_t value) +{ + __set_CP(15, 0, value, 14, 0, 0); +} + +/** \brief Get CNTFRQ + + This function returns the value of the PL1 Physical Timer Counter Frequency Register (CNTFRQ). + + \return CNTFRQ Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CNTFRQ(void) +{ + uint32_t result; + __get_CP(15, 0, result, 14, 0 , 0); + return result; +} + +/** \brief Set CNTP_TVAL + + This function assigns the given value to PL1 Physical Timer Value Register (CNTP_TVAL). + + \param [in] value CNTP_TVAL Register value to set +*/ +__STATIC_FORCEINLINE void __set_CNTP_TVAL(uint32_t value) +{ + __set_CP(15, 0, value, 14, 2, 0); +} + +/** \brief Get CNTP_TVAL + + This function returns the value of the PL1 Physical Timer Value Register (CNTP_TVAL). + + \return CNTP_TVAL Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CNTP_TVAL(void) +{ + uint32_t result; + __get_CP(15, 0, result, 14, 2, 0); + return result; +} + +/** \brief Get CNTPCT + + This function returns the value of the 64 bits PL1 Physical Count Register (CNTPCT). + + \return CNTPCT Register value + */ +__STATIC_FORCEINLINE uint64_t __get_CNTPCT(void) +{ + uint64_t result; + __get_CP64(15, 0, result, 14); + return result; +} + +/** \brief Set CNTP_CVAL + + This function assigns the given value to 64bits PL1 Physical Timer CompareValue Register (CNTP_CVAL). + + \param [in] value CNTP_CVAL Register value to set +*/ +__STATIC_FORCEINLINE void __set_CNTP_CVAL(uint64_t value) +{ + __set_CP64(15, 2, value, 14); +} + +/** \brief Get CNTP_CVAL + + This function returns the value of the 64 bits PL1 Physical Timer CompareValue Register (CNTP_CVAL). + + \return CNTP_CVAL Register value + */ +__STATIC_FORCEINLINE uint64_t __get_CNTP_CVAL(void) +{ + uint64_t result; + __get_CP64(15, 2, result, 14); + return result; +} + +/** \brief Set CNTP_CTL + + This function assigns the given value to PL1 Physical Timer Control Register (CNTP_CTL). + + \param [in] value CNTP_CTL Register value to set +*/ +__STATIC_FORCEINLINE void __set_CNTP_CTL(uint32_t value) +{ + __set_CP(15, 0, value, 14, 2, 1); +} + +/** \brief Get CNTP_CTL register + \return CNTP_CTL Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CNTP_CTL(void) +{ + uint32_t result; + __get_CP(15, 0, result, 14, 2, 1); + return result; +} + +#endif + +/** \brief Set TLBIALL + + TLB Invalidate All + */ +__STATIC_FORCEINLINE void __set_TLBIALL(uint32_t value) +{ + __set_CP(15, 0, value, 8, 7, 0); +} + +/** \brief Set BPIALL. + + Branch Predictor Invalidate All + */ +__STATIC_FORCEINLINE void __set_BPIALL(uint32_t value) +{ + __set_CP(15, 0, value, 7, 5, 6); +} + +/** \brief Set ICIALLU + + Instruction Cache Invalidate All + */ +__STATIC_FORCEINLINE void __set_ICIALLU(uint32_t value) +{ + __set_CP(15, 0, value, 7, 5, 0); +} + +/** \brief Set DCCMVAC + + Data cache clean + */ +__STATIC_FORCEINLINE void __set_DCCMVAC(uint32_t value) +{ + __set_CP(15, 0, value, 7, 10, 1); +} + +/** \brief Set DCIMVAC + + Data cache invalidate + */ +__STATIC_FORCEINLINE void __set_DCIMVAC(uint32_t value) +{ + __set_CP(15, 0, value, 7, 6, 1); +} + +/** \brief Set DCCIMVAC + + Data cache clean and invalidate + */ +__STATIC_FORCEINLINE void __set_DCCIMVAC(uint32_t value) +{ + __set_CP(15, 0, value, 7, 14, 1); +} + +/** \brief Set CSSELR + */ +__STATIC_FORCEINLINE void __set_CSSELR(uint32_t value) +{ +// __ASM volatile("MCR p15, 2, %0, c0, c0, 0" : : "r"(value) : "memory"); + __set_CP(15, 2, value, 0, 0, 0); +} + +/** \brief Get CSSELR + \return CSSELR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CSSELR(void) +{ + uint32_t result; +// __ASM volatile("MRC p15, 2, %0, c0, c0, 0" : "=r"(result) : : "memory"); + __get_CP(15, 2, result, 0, 0, 0); + return result; +} + +/** \brief Set CCSIDR + \deprecated CCSIDR itself is read-only. Use __set_CSSELR to select cache level instead. + */ +CMSIS_DEPRECATED +__STATIC_FORCEINLINE void __set_CCSIDR(uint32_t value) +{ + __set_CSSELR(value); +} + +/** \brief Get CCSIDR + \return CCSIDR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CCSIDR(void) +{ + uint32_t result; +// __ASM volatile("MRC p15, 1, %0, c0, c0, 0" : "=r"(result) : : "memory"); + __get_CP(15, 1, result, 0, 0, 0); + return result; +} + +/** \brief Get CLIDR + \return CLIDR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CLIDR(void) +{ + uint32_t result; +// __ASM volatile("MRC p15, 1, %0, c0, c0, 1" : "=r"(result) : : "memory"); + __get_CP(15, 1, result, 0, 0, 1); + return result; +} + +/** \brief Set DCISW + */ +__STATIC_FORCEINLINE void __set_DCISW(uint32_t value) +{ +// __ASM volatile("MCR p15, 0, %0, c7, c6, 2" : : "r"(value) : "memory") + __set_CP(15, 0, value, 7, 6, 2); +} + +/** \brief Set DCCSW + */ +__STATIC_FORCEINLINE void __set_DCCSW(uint32_t value) +{ +// __ASM volatile("MCR p15, 0, %0, c7, c10, 2" : : "r"(value) : "memory") + __set_CP(15, 0, value, 7, 10, 2); +} + +/** \brief Set DCCISW + */ +__STATIC_FORCEINLINE void __set_DCCISW(uint32_t value) +{ +// __ASM volatile("MCR p15, 0, %0, c7, c14, 2" : : "r"(value) : "memory") + __set_CP(15, 0, value, 7, 14, 2); +} + +#endif diff --git a/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_gcc.h b/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_gcc.h new file mode 100644 index 0000000..fb1442c --- /dev/null +++ b/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_gcc.h @@ -0,0 +1,679 @@ +/**************************************************************************//** + * @file cmsis_gcc.h + * @brief CMSIS compiler specific macros, functions, instructions + * @version V1.0.2 + * @date 09. April 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_GCC_H +#define __CMSIS_GCC_H + +/* ignore some GCC warnings */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wsign-conversion" +#pragma GCC diagnostic ignored "-Wconversion" +#pragma GCC diagnostic ignored "-Wunused-parameter" + +/* Fallback for __has_builtin */ +#ifndef __has_builtin + #define __has_builtin(x) (0) +#endif + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM asm +#endif +#ifndef __INLINE + #define __INLINE inline +#endif +#ifndef __FORCEINLINE + #define __FORCEINLINE __attribute__((always_inline)) +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef CMSIS_DEPRECATED + #define CMSIS_DEPRECATED __attribute__((deprecated)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */ + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */ + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif + +/* ########################## Core Instruction Access ######################### */ +/** + \brief No Operation + */ +#define __NOP() __ASM volatile ("nop") + +/** + \brief Wait For Interrupt + */ +#define __WFI() __ASM volatile ("wfi") + +/** + \brief Wait For Event + */ +#define __WFE() __ASM volatile ("wfe") + +/** + \brief Send Event + */ +#define __SEV() __ASM volatile ("sev") + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +__STATIC_FORCEINLINE void __ISB(void) +{ + __ASM volatile ("isb 0xF":::"memory"); +} + + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +__STATIC_FORCEINLINE void __DSB(void) +{ + __ASM volatile ("dsb 0xF":::"memory"); +} + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +__STATIC_FORCEINLINE void __DMB(void) +{ + __ASM volatile ("dmb 0xF":::"memory"); +} + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) +{ +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) + return __builtin_bswap32(value); +#else + uint32_t result; + + __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +#endif +} + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".rev16_text"))) __STATIC_INLINE uint32_t __REV16(uint32_t value) +{ + uint32_t result; + __ASM volatile("rev16 %0, %1" : "=r" (result) : "r" (value)); + return result; +} +#endif + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) +{ +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + return (int16_t)__builtin_bswap16(value); +#else + int16_t result; + + __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +#endif +} + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +{ + op2 %= 32U; + if (op2 == 0U) { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); +} + + +/** + \brief Breakpoint + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) +{ + uint32_t result; + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) + __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) ); +#else + int32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ + + result = value; /* r will be reversed bits of v; first get LSB of v */ + for (value >>= 1U; value; value >>= 1U) + { + result <<= 1U; + result |= value & 1U; + s--; + } + result <<= s; /* shift when v's highest bits are zero */ +#endif + return result; +} + +/** + \brief Count leading zeros + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +#define __CLZ (uint8_t)__builtin_clz + +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); +#endif + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); +#endif + return ((uint16_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) +{ + uint32_t result; + + __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); + return(result); +} + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) +{ + uint32_t result; + + __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return(result); +} + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) +{ + uint32_t result; + + __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return(result); +} + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) +{ + uint32_t result; + + __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); + return(result); +} + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +__STATIC_FORCEINLINE void __CLREX(void) +{ + __ASM volatile ("clrex" ::: "memory"); +} + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT(ARG1,ARG2) \ +__extension__ \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT(ARG1,ARG2) \ +__extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + +/* ########################### Core Function Access ########################### */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value +*/ +__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) +{ + #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + #if __has_builtin(__builtin_arm_get_fpscr) + // Re-enable using built-in when GCC has been fixed + // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) + /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ + return __builtin_arm_get_fpscr(); + #else + uint32_t result; + + __ASM volatile ("VMRS %0, fpscr" : "=r" (result) ); + return(result); + #endif + #else + return(0U); + #endif +} + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set +*/ +__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) +{ + #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + #if __has_builtin(__builtin_arm_set_fpscr) + // Re-enable using built-in when GCC has been fixed + // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) + /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ + __builtin_arm_set_fpscr(fpscr); + #else + __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory"); + #endif + #else + (void)fpscr; + #endif +} + +/** \brief Get CPSR Register + \return CPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CPSR(void) +{ + uint32_t result; + __ASM volatile("MRS %0, cpsr" : "=r" (result) ); + return(result); +} + +/** \brief Set CPSR Register + \param [in] cpsr CPSR value to set + */ +__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr) +{ +__ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "memory"); +} + +/** \brief Get Mode + \return Processor Mode + */ +__STATIC_FORCEINLINE uint32_t __get_mode(void) +{ + return (__get_CPSR() & 0x1FU); +} + +/** \brief Set Mode + \param [in] mode Mode value to set + */ +__STATIC_FORCEINLINE void __set_mode(uint32_t mode) +{ + __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory"); +} + +/** \brief Get Stack Pointer + \return Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP(void) +{ + uint32_t result; + __ASM volatile("MOV %0, sp" : "=r" (result) : : "memory"); + return result; +} + +/** \brief Set Stack Pointer + \param [in] stack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP(uint32_t stack) +{ + __ASM volatile("MOV sp, %0" : : "r" (stack) : "memory"); +} + +/** \brief Get USR/SYS Stack Pointer + \return USR/SYS Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP_usr(void) +{ + uint32_t cpsr = __get_CPSR(); + uint32_t result; + __ASM volatile( + "CPS #0x1F \n" + "MOV %0, sp " : "=r"(result) : : "memory" + ); + __set_CPSR(cpsr); + __ISB(); + return result; +} + +/** \brief Set USR/SYS Stack Pointer + \param [in] topOfProcStack USR/SYS Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack) +{ + uint32_t cpsr = __get_CPSR(); + __ASM volatile( + "CPS #0x1F \n" + "MOV sp, %0 " : : "r" (topOfProcStack) : "memory" + ); + __set_CPSR(cpsr); + __ISB(); +} + +/** \brief Get FPEXC + \return Floating Point Exception Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPEXC(void) +{ +#if (__FPU_PRESENT == 1) + uint32_t result; + __ASM volatile("VMRS %0, fpexc" : "=r" (result) ); + return(result); +#else + return(0); +#endif +} + +/** \brief Set FPEXC + \param [in] fpexc Floating Point Exception Control value to set + */ +__STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc) +{ +#if (__FPU_PRESENT == 1) + __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory"); +#endif +} + +/* + * Include common core functions to access Coprocessor 15 registers + */ + +#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" ) +#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" ) +#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" ) +#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) + +#include "cmsis_cp15.h" + +/** \brief Enable Floating Point Unit + + Critical section, called from undef handler, so systick is disabled + */ +__STATIC_INLINE void __FPU_Enable(void) +{ + __ASM volatile( + //Permit access to VFP/NEON, registers by modifying CPACR + " MRC p15,0,R1,c1,c0,2 \n" + " ORR R1,R1,#0x00F00000 \n" + " MCR p15,0,R1,c1,c0,2 \n" + + //Ensure that subsequent instructions occur in the context of VFP/NEON access permitted + " ISB \n" + + //Enable VFP/NEON + " VMRS R1,FPEXC \n" + " ORR R1,R1,#0x40000000 \n" + " VMSR FPEXC,R1 \n" + + //Initialise VFP/NEON registers to 0 + " MOV R2,#0 \n" + + //Initialise D16 registers to 0 + " VMOV D0, R2,R2 \n" + " VMOV D1, R2,R2 \n" + " VMOV D2, R2,R2 \n" + " VMOV D3, R2,R2 \n" + " VMOV D4, R2,R2 \n" + " VMOV D5, R2,R2 \n" + " VMOV D6, R2,R2 \n" + " VMOV D7, R2,R2 \n" + " VMOV D8, R2,R2 \n" + " VMOV D9, R2,R2 \n" + " VMOV D10,R2,R2 \n" + " VMOV D11,R2,R2 \n" + " VMOV D12,R2,R2 \n" + " VMOV D13,R2,R2 \n" + " VMOV D14,R2,R2 \n" + " VMOV D15,R2,R2 \n" + +#if (defined(__ARM_NEON) && (__ARM_NEON == 1)) + //Initialise D32 registers to 0 + " VMOV D16,R2,R2 \n" + " VMOV D17,R2,R2 \n" + " VMOV D18,R2,R2 \n" + " VMOV D19,R2,R2 \n" + " VMOV D20,R2,R2 \n" + " VMOV D21,R2,R2 \n" + " VMOV D22,R2,R2 \n" + " VMOV D23,R2,R2 \n" + " VMOV D24,R2,R2 \n" + " VMOV D25,R2,R2 \n" + " VMOV D26,R2,R2 \n" + " VMOV D27,R2,R2 \n" + " VMOV D28,R2,R2 \n" + " VMOV D29,R2,R2 \n" + " VMOV D30,R2,R2 \n" + " VMOV D31,R2,R2 \n" +#endif + + //Initialise FPSCR to a known state + " VMRS R2,FPSCR \n" + " LDR R3,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. + " AND R2,R2,R3 \n" + " VMSR FPSCR,R2 " + ); +} + +#pragma GCC diagnostic pop + +#endif /* __CMSIS_GCC_H */ diff --git a/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_iccarm.h b/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_iccarm.h new file mode 100644 index 0000000..c46c397 --- /dev/null +++ b/midi-dials/Drivers/CMSIS/Core_A/Include/cmsis_iccarm.h @@ -0,0 +1,559 @@ +/**************************************************************************//** + * @file cmsis_iccarm.h + * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file + * @version V5.0.6 + * @date 02. March 2018 + ******************************************************************************/ + +//------------------------------------------------------------------------------ +// +// Copyright (c) 2017-2018 IAR Systems +// +// Licensed under the Apache License, Version 2.0 (the "License") +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//------------------------------------------------------------------------------ + + +#ifndef __CMSIS_ICCARM_H__ +#define __CMSIS_ICCARM_H__ + +#ifndef __ICCARM__ + #error This file should only be compiled by ICCARM +#endif + +#pragma system_include + +#define __IAR_FT _Pragma("inline=forced") __intrinsic + +#if (__VER__ >= 8000000) + #define __ICCARM_V8 1 +#else + #define __ICCARM_V8 0 +#endif + +#pragma language=extended + +#ifndef __ALIGNED + #if __ICCARM_V8 + #define __ALIGNED(x) __attribute__((aligned(x))) + #elif (__VER__ >= 7080000) + /* Needs IAR language extensions */ + #define __ALIGNED(x) __attribute__((aligned(x))) + #else + #warning No compiler specific solution for __ALIGNED.__ALIGNED is ignored. + #define __ALIGNED(x) + #endif +#endif + + +/* Define compiler macros for CPU architecture, used in CMSIS 5. + */ +#if __ARM_ARCH_7A__ +/* Macro already defined */ +#else + #if defined(__ARM7A__) + #define __ARM_ARCH_7A__ 1 + #endif +#endif + +#ifndef __ASM + #define __ASM __asm +#endif + +#ifndef __INLINE + #define __INLINE inline +#endif + +#ifndef __NO_RETURN + #if __ICCARM_V8 + #define __NO_RETURN __attribute__((__noreturn__)) + #else + #define __NO_RETURN _Pragma("object_attribute=__noreturn") + #endif +#endif + +#ifndef __PACKED + /* Needs IAR language extensions */ + #if __ICCARM_V8 + #define __PACKED __attribute__((packed, aligned(1))) + #else + #define __PACKED __packed + #endif +#endif + +#ifndef __PACKED_STRUCT + /* Needs IAR language extensions */ + #if __ICCARM_V8 + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) + #else + #define __PACKED_STRUCT __packed struct + #endif +#endif + +#ifndef __PACKED_UNION + /* Needs IAR language extensions */ + #if __ICCARM_V8 + #define __PACKED_UNION union __attribute__((packed, aligned(1))) + #else + #define __PACKED_UNION __packed union + #endif +#endif + +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif + +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline +#endif + +#ifndef __FORCEINLINE + #define __FORCEINLINE _Pragma("inline=forced") +#endif + +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE +#endif + +#ifndef CMSIS_DEPRECATED + #define CMSIS_DEPRECATED __attribute__((deprecated)) +#endif + +#ifndef __UNALIGNED_UINT16_READ + #pragma language=save + #pragma language=extended + __IAR_FT uint16_t __iar_uint16_read(void const *ptr) + { + return *(__packed uint16_t*)(ptr); + } + #pragma language=restore + #define __UNALIGNED_UINT16_READ(PTR) __iar_uint16_read(PTR) +#endif + + +#ifndef __UNALIGNED_UINT16_WRITE + #pragma language=save + #pragma language=extended + __IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val) + { + *(__packed uint16_t*)(ptr) = val;; + } + #pragma language=restore + #define __UNALIGNED_UINT16_WRITE(PTR,VAL) __iar_uint16_write(PTR,VAL) +#endif + +#ifndef __UNALIGNED_UINT32_READ + #pragma language=save + #pragma language=extended + __IAR_FT uint32_t __iar_uint32_read(void const *ptr) + { + return *(__packed uint32_t*)(ptr); + } + #pragma language=restore + #define __UNALIGNED_UINT32_READ(PTR) __iar_uint32_read(PTR) +#endif + +#ifndef __UNALIGNED_UINT32_WRITE + #pragma language=save + #pragma language=extended + __IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val) + { + *(__packed uint32_t*)(ptr) = val;; + } + #pragma language=restore + #define __UNALIGNED_UINT32_WRITE(PTR,VAL) __iar_uint32_write(PTR,VAL) +#endif + +#if 0 +#ifndef __UNALIGNED_UINT32 /* deprecated */ + #pragma language=save + #pragma language=extended + __packed struct __iar_u32 { uint32_t v; }; + #pragma language=restore + #define __UNALIGNED_UINT32(PTR) (((struct __iar_u32 *)(PTR))->v) +#endif +#endif + +#ifndef __USED + #if __ICCARM_V8 + #define __USED __attribute__((used)) + #else + #define __USED _Pragma("__root") + #endif +#endif + +#ifndef __WEAK + #if __ICCARM_V8 + #define __WEAK __attribute__((weak)) + #else + #define __WEAK _Pragma("__weak") + #endif +#endif + + +#ifndef __ICCARM_INTRINSICS_VERSION__ + #define __ICCARM_INTRINSICS_VERSION__ 0 +#endif + +#if __ICCARM_INTRINSICS_VERSION__ == 2 + + #if defined(__CLZ) + #undef __CLZ + #endif + #if defined(__REVSH) + #undef __REVSH + #endif + #if defined(__RBIT) + #undef __RBIT + #endif + #if defined(__SSAT) + #undef __SSAT + #endif + #if defined(__USAT) + #undef __USAT + #endif + + #include "iccarm_builtin.h" + + #define __enable_irq __iar_builtin_enable_interrupt + #define __disable_irq __iar_builtin_disable_interrupt + #define __enable_fault_irq __iar_builtin_enable_fiq + #define __disable_fault_irq __iar_builtin_disable_fiq + #define __arm_rsr __iar_builtin_rsr + #define __arm_wsr __iar_builtin_wsr + + #if __FPU_PRESENT + #define __get_FPSCR() (__arm_rsr("FPSCR")) + #else + #define __get_FPSCR() ( 0 ) + #endif + + #define __set_FPSCR(VALUE) (__arm_wsr("FPSCR", VALUE)) + + #define __get_CPSR() (__arm_rsr("CPSR")) + #define __get_mode() (__get_CPSR() & 0x1FU) + + #define __set_CPSR(VALUE) (__arm_wsr("CPSR", (VALUE))) + #define __set_mode(VALUE) (__arm_wsr("CPSR_c", (VALUE))) + + + #define __get_FPEXC() (__arm_rsr("FPEXC")) + #define __set_FPEXC(VALUE) (__arm_wsr("FPEXC", VALUE)) + + #define __get_CP(cp, op1, RT, CRn, CRm, op2) \ + ((RT) = __arm_rsr("p" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2)) + + #define __set_CP(cp, op1, RT, CRn, CRm, op2) \ + (__arm_wsr("p" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2, (RT))) + + #define __get_CP64(cp, op1, Rt, CRm) \ + __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" ) + + #define __set_CP64(cp, op1, Rt, CRm) \ + __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) + + #include "cmsis_cp15.h" + + #define __NOP __iar_builtin_no_operation + + #define __CLZ __iar_builtin_CLZ + #define __CLREX __iar_builtin_CLREX + + #define __DMB __iar_builtin_DMB + #define __DSB __iar_builtin_DSB + #define __ISB __iar_builtin_ISB + + #define __LDREXB __iar_builtin_LDREXB + #define __LDREXH __iar_builtin_LDREXH + #define __LDREXW __iar_builtin_LDREX + + #define __RBIT __iar_builtin_RBIT + #define __REV __iar_builtin_REV + #define __REV16 __iar_builtin_REV16 + + __IAR_FT int16_t __REVSH(int16_t val) + { + return (int16_t) __iar_builtin_REVSH(val); + } + + #define __ROR __iar_builtin_ROR + #define __RRX __iar_builtin_RRX + + #define __SEV __iar_builtin_SEV + + #define __SSAT __iar_builtin_SSAT + + #define __STREXB __iar_builtin_STREXB + #define __STREXH __iar_builtin_STREXH + #define __STREXW __iar_builtin_STREX + + #define __USAT __iar_builtin_USAT + + #define __WFE __iar_builtin_WFE + #define __WFI __iar_builtin_WFI + + #define __SADD8 __iar_builtin_SADD8 + #define __QADD8 __iar_builtin_QADD8 + #define __SHADD8 __iar_builtin_SHADD8 + #define __UADD8 __iar_builtin_UADD8 + #define __UQADD8 __iar_builtin_UQADD8 + #define __UHADD8 __iar_builtin_UHADD8 + #define __SSUB8 __iar_builtin_SSUB8 + #define __QSUB8 __iar_builtin_QSUB8 + #define __SHSUB8 __iar_builtin_SHSUB8 + #define __USUB8 __iar_builtin_USUB8 + #define __UQSUB8 __iar_builtin_UQSUB8 + #define __UHSUB8 __iar_builtin_UHSUB8 + #define __SADD16 __iar_builtin_SADD16 + #define __QADD16 __iar_builtin_QADD16 + #define __SHADD16 __iar_builtin_SHADD16 + #define __UADD16 __iar_builtin_UADD16 + #define __UQADD16 __iar_builtin_UQADD16 + #define __UHADD16 __iar_builtin_UHADD16 + #define __SSUB16 __iar_builtin_SSUB16 + #define __QSUB16 __iar_builtin_QSUB16 + #define __SHSUB16 __iar_builtin_SHSUB16 + #define __USUB16 __iar_builtin_USUB16 + #define __UQSUB16 __iar_builtin_UQSUB16 + #define __UHSUB16 __iar_builtin_UHSUB16 + #define __SASX __iar_builtin_SASX + #define __QASX __iar_builtin_QASX + #define __SHASX __iar_builtin_SHASX + #define __UASX __iar_builtin_UASX + #define __UQASX __iar_builtin_UQASX + #define __UHASX __iar_builtin_UHASX + #define __SSAX __iar_builtin_SSAX + #define __QSAX __iar_builtin_QSAX + #define __SHSAX __iar_builtin_SHSAX + #define __USAX __iar_builtin_USAX + #define __UQSAX __iar_builtin_UQSAX + #define __UHSAX __iar_builtin_UHSAX + #define __USAD8 __iar_builtin_USAD8 + #define __USADA8 __iar_builtin_USADA8 + #define __SSAT16 __iar_builtin_SSAT16 + #define __USAT16 __iar_builtin_USAT16 + #define __UXTB16 __iar_builtin_UXTB16 + #define __UXTAB16 __iar_builtin_UXTAB16 + #define __SXTB16 __iar_builtin_SXTB16 + #define __SXTAB16 __iar_builtin_SXTAB16 + #define __SMUAD __iar_builtin_SMUAD + #define __SMUADX __iar_builtin_SMUADX + #define __SMMLA __iar_builtin_SMMLA + #define __SMLAD __iar_builtin_SMLAD + #define __SMLADX __iar_builtin_SMLADX + #define __SMLALD __iar_builtin_SMLALD + #define __SMLALDX __iar_builtin_SMLALDX + #define __SMUSD __iar_builtin_SMUSD + #define __SMUSDX __iar_builtin_SMUSDX + #define __SMLSD __iar_builtin_SMLSD + #define __SMLSDX __iar_builtin_SMLSDX + #define __SMLSLD __iar_builtin_SMLSLD + #define __SMLSLDX __iar_builtin_SMLSLDX + #define __SEL __iar_builtin_SEL + #define __QADD __iar_builtin_QADD + #define __QSUB __iar_builtin_QSUB + #define __PKHBT __iar_builtin_PKHBT + #define __PKHTB __iar_builtin_PKHTB + +#else /* __ICCARM_INTRINSICS_VERSION__ == 2 */ + + #if !__FPU_PRESENT + #define __get_FPSCR __cmsis_iar_get_FPSR_not_active + #endif + + #ifdef __INTRINSICS_INCLUDED + #error intrinsics.h is already included previously! + #endif + + #include + + #if !__FPU_PRESENT + #define __get_FPSCR() (0) + #endif + + #pragma diag_suppress=Pe940 + #pragma diag_suppress=Pe177 + + #define __enable_irq __enable_interrupt + #define __disable_irq __disable_interrupt + #define __enable_fault_irq __enable_fiq + #define __disable_fault_irq __disable_fiq + #define __NOP __no_operation + + #define __get_xPSR __get_PSR + + __IAR_FT void __set_mode(uint32_t mode) + { + __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory"); + } + + __IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr) + { + return __LDREX((unsigned long *)ptr); + } + + __IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr) + { + return __STREX(value, (unsigned long *)ptr); + } + + + __IAR_FT uint32_t __RRX(uint32_t value) + { + uint32_t result; + __ASM("RRX %0, %1" : "=r"(result) : "r" (value) : "cc"); + return(result); + } + + + __IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2) + { + return (op1 >> op2) | (op1 << ((sizeof(op1)*8)-op2)); + } + + __IAR_FT uint32_t __get_FPEXC(void) + { + #if (__FPU_PRESENT == 1) + uint32_t result; + __ASM volatile("VMRS %0, fpexc" : "=r" (result) : : "memory"); + return(result); + #else + return(0); + #endif + } + + __IAR_FT void __set_FPEXC(uint32_t fpexc) + { + #if (__FPU_PRESENT == 1) + __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory"); + #endif + } + + + #define __get_CP(cp, op1, Rt, CRn, CRm, op2) \ + __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" ) + #define __set_CP(cp, op1, Rt, CRn, CRm, op2) \ + __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" ) + #define __get_CP64(cp, op1, Rt, CRm) \ + __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" ) + #define __set_CP64(cp, op1, Rt, CRm) \ + __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) + + #include "cmsis_cp15.h" + +#endif /* __ICCARM_INTRINSICS_VERSION__ == 2 */ + +#define __BKPT(value) __asm volatile ("BKPT %0" : : "i"(value)) + + +__IAR_FT uint32_t __get_SP_usr(void) +{ + uint32_t cpsr; + uint32_t result; + __ASM volatile( + "MRS %0, cpsr \n" + "CPS #0x1F \n" // no effect in USR mode + "MOV %1, sp \n" + "MSR cpsr_c, %2 \n" // no effect in USR mode + "ISB" : "=r"(cpsr), "=r"(result) : "r"(cpsr) : "memory" + ); + return result; +} + +__IAR_FT void __set_SP_usr(uint32_t topOfProcStack) +{ + uint32_t cpsr; + __ASM volatile( + "MRS %0, cpsr \n" + "CPS #0x1F \n" // no effect in USR mode + "MOV sp, %1 \n" + "MSR cpsr_c, %2 \n" // no effect in USR mode + "ISB" : "=r"(cpsr) : "r" (topOfProcStack), "r"(cpsr) : "memory" + ); +} + +#define __get_mode() (__get_CPSR() & 0x1FU) + +__STATIC_INLINE +void __FPU_Enable(void) +{ + __ASM volatile( + //Permit access to VFP/NEON, registers by modifying CPACR + " MRC p15,0,R1,c1,c0,2 \n" + " ORR R1,R1,#0x00F00000 \n" + " MCR p15,0,R1,c1,c0,2 \n" + + //Ensure that subsequent instructions occur in the context of VFP/NEON access permitted + " ISB \n" + + //Enable VFP/NEON + " VMRS R1,FPEXC \n" + " ORR R1,R1,#0x40000000 \n" + " VMSR FPEXC,R1 \n" + + //Initialise VFP/NEON registers to 0 + " MOV R2,#0 \n" + + //Initialise D16 registers to 0 + " VMOV D0, R2,R2 \n" + " VMOV D1, R2,R2 \n" + " VMOV D2, R2,R2 \n" + " VMOV D3, R2,R2 \n" + " VMOV D4, R2,R2 \n" + " VMOV D5, R2,R2 \n" + " VMOV D6, R2,R2 \n" + " VMOV D7, R2,R2 \n" + " VMOV D8, R2,R2 \n" + " VMOV D9, R2,R2 \n" + " VMOV D10,R2,R2 \n" + " VMOV D11,R2,R2 \n" + " VMOV D12,R2,R2 \n" + " VMOV D13,R2,R2 \n" + " VMOV D14,R2,R2 \n" + " VMOV D15,R2,R2 \n" + +#ifdef __ARM_ADVANCED_SIMD__ + //Initialise D32 registers to 0 + " VMOV D16,R2,R2 \n" + " VMOV D17,R2,R2 \n" + " VMOV D18,R2,R2 \n" + " VMOV D19,R2,R2 \n" + " VMOV D20,R2,R2 \n" + " VMOV D21,R2,R2 \n" + " VMOV D22,R2,R2 \n" + " VMOV D23,R2,R2 \n" + " VMOV D24,R2,R2 \n" + " VMOV D25,R2,R2 \n" + " VMOV D26,R2,R2 \n" + " VMOV D27,R2,R2 \n" + " VMOV D28,R2,R2 \n" + " VMOV D29,R2,R2 \n" + " VMOV D30,R2,R2 \n" + " VMOV D31,R2,R2 \n" +#endif + + //Initialise FPSCR to a known state + " VMRS R2,FPSCR \n" + " MOV32 R3,#0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. + " AND R2,R2,R3 \n" + " VMSR FPSCR,R2 \n"); +} + + + +#undef __IAR_FT +#undef __ICCARM_V8 + +#pragma diag_default=Pe940 +#pragma diag_default=Pe177 + +#endif /* __CMSIS_ICCARM_H__ */ diff --git a/midi-dials/Drivers/CMSIS/Core_A/Include/core_ca.h b/midi-dials/Drivers/CMSIS/Core_A/Include/core_ca.h new file mode 100644 index 0000000..c7b451f --- /dev/null +++ b/midi-dials/Drivers/CMSIS/Core_A/Include/core_ca.h @@ -0,0 +1,2614 @@ +/**************************************************************************//** + * @file core_ca.h + * @brief CMSIS Cortex-A Core Peripheral Access Layer Header File + * @version V1.0.1 + * @date 07. May 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2017 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifdef __cplusplus + extern "C" { +#endif + +#ifndef __CORE_CA_H_GENERIC +#define __CORE_CA_H_GENERIC + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ + +/* CMSIS CA definitions */ +#define __CA_CMSIS_VERSION_MAIN (1U) /*!< \brief [31:16] CMSIS-Core(A) main version */ +#define __CA_CMSIS_VERSION_SUB (1U) /*!< \brief [15:0] CMSIS-Core(A) sub version */ +#define __CA_CMSIS_VERSION ((__CA_CMSIS_VERSION_MAIN << 16U) | \ + __CA_CMSIS_VERSION_SUB ) /*!< \brief CMSIS-Core(A) version number */ + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #if (__FPU_PRESENT == 1) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if (__FPU_PRESENT == 1) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TMS470__ ) + #if defined __TI_VFP_SUPPORT__ + #if (__FPU_PRESENT == 1) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if (__FPU_PRESENT == 1) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if (__FPU_PRESENT == 1) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CA_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CA_H_DEPENDANT +#define __CORE_CA_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + + /* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CA_REV + #define __CA_REV 0x0000U + #warning "__CA_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __GIC_PRESENT + #define __GIC_PRESENT 1U + #warning "__GIC_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __TIM_PRESENT + #define __TIM_PRESENT 1U + #warning "__TIM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __L2C_PRESENT + #define __L2C_PRESENT 0U + #warning "__L2C_PRESENT not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +#ifdef __cplusplus + #define __I volatile /*!< \brief Defines 'read only' permissions */ +#else + #define __I volatile const /*!< \brief Defines 'read only' permissions */ +#endif +#define __O volatile /*!< \brief Defines 'write only' permissions */ +#define __IO volatile /*!< \brief Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*!< \brief Defines 'read only' structure member permissions */ +#define __OM volatile /*!< \brief Defines 'write only' structure member permissions */ +#define __IOM volatile /*!< \brief Defines 'read / write' structure member permissions */ +#define RESERVED(N, T) T RESERVED##N; // placeholder struct members used for "reserved" areas + + /******************************************************************************* + * Register Abstraction + Core Register contain: + - CPSR + - CP15 Registers + - L2C-310 Cache Controller + - Generic Interrupt Controller Distributor + - Generic Interrupt Controller Interface + ******************************************************************************/ + +/* Core Register CPSR */ +typedef union +{ + struct + { + uint32_t M:5; /*!< \brief bit: 0.. 4 Mode field */ + uint32_t T:1; /*!< \brief bit: 5 Thumb execution state bit */ + uint32_t F:1; /*!< \brief bit: 6 FIQ mask bit */ + uint32_t I:1; /*!< \brief bit: 7 IRQ mask bit */ + uint32_t A:1; /*!< \brief bit: 8 Asynchronous abort mask bit */ + uint32_t E:1; /*!< \brief bit: 9 Endianness execution state bit */ + uint32_t IT1:6; /*!< \brief bit: 10..15 If-Then execution state bits 2-7 */ + uint32_t GE:4; /*!< \brief bit: 16..19 Greater than or Equal flags */ + RESERVED(0:4, uint32_t) + uint32_t J:1; /*!< \brief bit: 24 Jazelle bit */ + uint32_t IT0:2; /*!< \brief bit: 25..26 If-Then execution state bits 0-1 */ + uint32_t Q:1; /*!< \brief bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< \brief bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< \brief bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< \brief bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< \brief bit: 31 Negative condition code flag */ + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} CPSR_Type; + + + +/* CPSR Register Definitions */ +#define CPSR_N_Pos 31U /*!< \brief CPSR: N Position */ +#define CPSR_N_Msk (1UL << CPSR_N_Pos) /*!< \brief CPSR: N Mask */ + +#define CPSR_Z_Pos 30U /*!< \brief CPSR: Z Position */ +#define CPSR_Z_Msk (1UL << CPSR_Z_Pos) /*!< \brief CPSR: Z Mask */ + +#define CPSR_C_Pos 29U /*!< \brief CPSR: C Position */ +#define CPSR_C_Msk (1UL << CPSR_C_Pos) /*!< \brief CPSR: C Mask */ + +#define CPSR_V_Pos 28U /*!< \brief CPSR: V Position */ +#define CPSR_V_Msk (1UL << CPSR_V_Pos) /*!< \brief CPSR: V Mask */ + +#define CPSR_Q_Pos 27U /*!< \brief CPSR: Q Position */ +#define CPSR_Q_Msk (1UL << CPSR_Q_Pos) /*!< \brief CPSR: Q Mask */ + +#define CPSR_IT0_Pos 25U /*!< \brief CPSR: IT0 Position */ +#define CPSR_IT0_Msk (3UL << CPSR_IT0_Pos) /*!< \brief CPSR: IT0 Mask */ + +#define CPSR_J_Pos 24U /*!< \brief CPSR: J Position */ +#define CPSR_J_Msk (1UL << CPSR_J_Pos) /*!< \brief CPSR: J Mask */ + +#define CPSR_GE_Pos 16U /*!< \brief CPSR: GE Position */ +#define CPSR_GE_Msk (0xFUL << CPSR_GE_Pos) /*!< \brief CPSR: GE Mask */ + +#define CPSR_IT1_Pos 10U /*!< \brief CPSR: IT1 Position */ +#define CPSR_IT1_Msk (0x3FUL << CPSR_IT1_Pos) /*!< \brief CPSR: IT1 Mask */ + +#define CPSR_E_Pos 9U /*!< \brief CPSR: E Position */ +#define CPSR_E_Msk (1UL << CPSR_E_Pos) /*!< \brief CPSR: E Mask */ + +#define CPSR_A_Pos 8U /*!< \brief CPSR: A Position */ +#define CPSR_A_Msk (1UL << CPSR_A_Pos) /*!< \brief CPSR: A Mask */ + +#define CPSR_I_Pos 7U /*!< \brief CPSR: I Position */ +#define CPSR_I_Msk (1UL << CPSR_I_Pos) /*!< \brief CPSR: I Mask */ + +#define CPSR_F_Pos 6U /*!< \brief CPSR: F Position */ +#define CPSR_F_Msk (1UL << CPSR_F_Pos) /*!< \brief CPSR: F Mask */ + +#define CPSR_T_Pos 5U /*!< \brief CPSR: T Position */ +#define CPSR_T_Msk (1UL << CPSR_T_Pos) /*!< \brief CPSR: T Mask */ + +#define CPSR_M_Pos 0U /*!< \brief CPSR: M Position */ +#define CPSR_M_Msk (0x1FUL << CPSR_M_Pos) /*!< \brief CPSR: M Mask */ + +#define CPSR_M_USR 0x10U /*!< \brief CPSR: M User mode (PL0) */ +#define CPSR_M_FIQ 0x11U /*!< \brief CPSR: M Fast Interrupt mode (PL1) */ +#define CPSR_M_IRQ 0x12U /*!< \brief CPSR: M Interrupt mode (PL1) */ +#define CPSR_M_SVC 0x13U /*!< \brief CPSR: M Supervisor mode (PL1) */ +#define CPSR_M_MON 0x16U /*!< \brief CPSR: M Monitor mode (PL1) */ +#define CPSR_M_ABT 0x17U /*!< \brief CPSR: M Abort mode (PL1) */ +#define CPSR_M_HYP 0x1AU /*!< \brief CPSR: M Hypervisor mode (PL2) */ +#define CPSR_M_UND 0x1BU /*!< \brief CPSR: M Undefined mode (PL1) */ +#define CPSR_M_SYS 0x1FU /*!< \brief CPSR: M System mode (PL1) */ + +/* CP15 Register SCTLR */ +typedef union +{ + struct + { + uint32_t M:1; /*!< \brief bit: 0 MMU enable */ + uint32_t A:1; /*!< \brief bit: 1 Alignment check enable */ + uint32_t C:1; /*!< \brief bit: 2 Cache enable */ + RESERVED(0:2, uint32_t) + uint32_t CP15BEN:1; /*!< \brief bit: 5 CP15 barrier enable */ + RESERVED(1:1, uint32_t) + uint32_t B:1; /*!< \brief bit: 7 Endianness model */ + RESERVED(2:2, uint32_t) + uint32_t SW:1; /*!< \brief bit: 10 SWP and SWPB enable */ + uint32_t Z:1; /*!< \brief bit: 11 Branch prediction enable */ + uint32_t I:1; /*!< \brief bit: 12 Instruction cache enable */ + uint32_t V:1; /*!< \brief bit: 13 Vectors bit */ + uint32_t RR:1; /*!< \brief bit: 14 Round Robin select */ + RESERVED(3:2, uint32_t) + uint32_t HA:1; /*!< \brief bit: 17 Hardware Access flag enable */ + RESERVED(4:1, uint32_t) + uint32_t WXN:1; /*!< \brief bit: 19 Write permission implies XN */ + uint32_t UWXN:1; /*!< \brief bit: 20 Unprivileged write permission implies PL1 XN */ + uint32_t FI:1; /*!< \brief bit: 21 Fast interrupts configuration enable */ + uint32_t U:1; /*!< \brief bit: 22 Alignment model */ + RESERVED(5:1, uint32_t) + uint32_t VE:1; /*!< \brief bit: 24 Interrupt Vectors Enable */ + uint32_t EE:1; /*!< \brief bit: 25 Exception Endianness */ + RESERVED(6:1, uint32_t) + uint32_t NMFI:1; /*!< \brief bit: 27 Non-maskable FIQ (NMFI) support */ + uint32_t TRE:1; /*!< \brief bit: 28 TEX remap enable. */ + uint32_t AFE:1; /*!< \brief bit: 29 Access flag enable */ + uint32_t TE:1; /*!< \brief bit: 30 Thumb Exception enable */ + RESERVED(7:1, uint32_t) + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} SCTLR_Type; + +#define SCTLR_TE_Pos 30U /*!< \brief SCTLR: TE Position */ +#define SCTLR_TE_Msk (1UL << SCTLR_TE_Pos) /*!< \brief SCTLR: TE Mask */ + +#define SCTLR_AFE_Pos 29U /*!< \brief SCTLR: AFE Position */ +#define SCTLR_AFE_Msk (1UL << SCTLR_AFE_Pos) /*!< \brief SCTLR: AFE Mask */ + +#define SCTLR_TRE_Pos 28U /*!< \brief SCTLR: TRE Position */ +#define SCTLR_TRE_Msk (1UL << SCTLR_TRE_Pos) /*!< \brief SCTLR: TRE Mask */ + +#define SCTLR_NMFI_Pos 27U /*!< \brief SCTLR: NMFI Position */ +#define SCTLR_NMFI_Msk (1UL << SCTLR_NMFI_Pos) /*!< \brief SCTLR: NMFI Mask */ + +#define SCTLR_EE_Pos 25U /*!< \brief SCTLR: EE Position */ +#define SCTLR_EE_Msk (1UL << SCTLR_EE_Pos) /*!< \brief SCTLR: EE Mask */ + +#define SCTLR_VE_Pos 24U /*!< \brief SCTLR: VE Position */ +#define SCTLR_VE_Msk (1UL << SCTLR_VE_Pos) /*!< \brief SCTLR: VE Mask */ + +#define SCTLR_U_Pos 22U /*!< \brief SCTLR: U Position */ +#define SCTLR_U_Msk (1UL << SCTLR_U_Pos) /*!< \brief SCTLR: U Mask */ + +#define SCTLR_FI_Pos 21U /*!< \brief SCTLR: FI Position */ +#define SCTLR_FI_Msk (1UL << SCTLR_FI_Pos) /*!< \brief SCTLR: FI Mask */ + +#define SCTLR_UWXN_Pos 20U /*!< \brief SCTLR: UWXN Position */ +#define SCTLR_UWXN_Msk (1UL << SCTLR_UWXN_Pos) /*!< \brief SCTLR: UWXN Mask */ + +#define SCTLR_WXN_Pos 19U /*!< \brief SCTLR: WXN Position */ +#define SCTLR_WXN_Msk (1UL << SCTLR_WXN_Pos) /*!< \brief SCTLR: WXN Mask */ + +#define SCTLR_HA_Pos 17U /*!< \brief SCTLR: HA Position */ +#define SCTLR_HA_Msk (1UL << SCTLR_HA_Pos) /*!< \brief SCTLR: HA Mask */ + +#define SCTLR_RR_Pos 14U /*!< \brief SCTLR: RR Position */ +#define SCTLR_RR_Msk (1UL << SCTLR_RR_Pos) /*!< \brief SCTLR: RR Mask */ + +#define SCTLR_V_Pos 13U /*!< \brief SCTLR: V Position */ +#define SCTLR_V_Msk (1UL << SCTLR_V_Pos) /*!< \brief SCTLR: V Mask */ + +#define SCTLR_I_Pos 12U /*!< \brief SCTLR: I Position */ +#define SCTLR_I_Msk (1UL << SCTLR_I_Pos) /*!< \brief SCTLR: I Mask */ + +#define SCTLR_Z_Pos 11U /*!< \brief SCTLR: Z Position */ +#define SCTLR_Z_Msk (1UL << SCTLR_Z_Pos) /*!< \brief SCTLR: Z Mask */ + +#define SCTLR_SW_Pos 10U /*!< \brief SCTLR: SW Position */ +#define SCTLR_SW_Msk (1UL << SCTLR_SW_Pos) /*!< \brief SCTLR: SW Mask */ + +#define SCTLR_B_Pos 7U /*!< \brief SCTLR: B Position */ +#define SCTLR_B_Msk (1UL << SCTLR_B_Pos) /*!< \brief SCTLR: B Mask */ + +#define SCTLR_CP15BEN_Pos 5U /*!< \brief SCTLR: CP15BEN Position */ +#define SCTLR_CP15BEN_Msk (1UL << SCTLR_CP15BEN_Pos) /*!< \brief SCTLR: CP15BEN Mask */ + +#define SCTLR_C_Pos 2U /*!< \brief SCTLR: C Position */ +#define SCTLR_C_Msk (1UL << SCTLR_C_Pos) /*!< \brief SCTLR: C Mask */ + +#define SCTLR_A_Pos 1U /*!< \brief SCTLR: A Position */ +#define SCTLR_A_Msk (1UL << SCTLR_A_Pos) /*!< \brief SCTLR: A Mask */ + +#define SCTLR_M_Pos 0U /*!< \brief SCTLR: M Position */ +#define SCTLR_M_Msk (1UL << SCTLR_M_Pos) /*!< \brief SCTLR: M Mask */ + +/* CP15 Register ACTLR */ +typedef union +{ +#if __CORTEX_A == 5 || defined(DOXYGEN) + /** \brief Structure used for bit access on Cortex-A5 */ + struct + { + uint32_t FW:1; /*!< \brief bit: 0 Cache and TLB maintenance broadcast */ + RESERVED(0:5, uint32_t) + uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */ + uint32_t EXCL:1; /*!< \brief bit: 7 Exclusive L1/L2 cache control */ + RESERVED(1:2, uint32_t) + uint32_t DODMBS:1; /*!< \brief bit: 10 Disable optimized data memory barrier behavior */ + uint32_t DWBST:1; /*!< \brief bit: 11 AXI data write bursts to Normal memory */ + uint32_t RADIS:1; /*!< \brief bit: 12 L1 Data Cache read-allocate mode disable */ + uint32_t L1PCTL:2; /*!< \brief bit:13..14 L1 Data prefetch control */ + uint32_t BP:2; /*!< \brief bit:16..15 Branch prediction policy */ + uint32_t RSDIS:1; /*!< \brief bit: 17 Disable return stack operation */ + uint32_t BTDIS:1; /*!< \brief bit: 18 Disable indirect Branch Target Address Cache (BTAC) */ + RESERVED(3:9, uint32_t) + uint32_t DBDI:1; /*!< \brief bit: 28 Disable branch dual issue */ + RESERVED(7:3, uint32_t) + } b; +#endif +#if __CORTEX_A == 7 || defined(DOXYGEN) + /** \brief Structure used for bit access on Cortex-A7 */ + struct + { + RESERVED(0:6, uint32_t) + uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */ + RESERVED(1:3, uint32_t) + uint32_t DODMBS:1; /*!< \brief bit: 10 Disable optimized data memory barrier behavior */ + uint32_t L2RADIS:1; /*!< \brief bit: 11 L2 Data Cache read-allocate mode disable */ + uint32_t L1RADIS:1; /*!< \brief bit: 12 L1 Data Cache read-allocate mode disable */ + uint32_t L1PCTL:2; /*!< \brief bit:13..14 L1 Data prefetch control */ + uint32_t DDVM:1; /*!< \brief bit: 15 Disable Distributed Virtual Memory (DVM) transactions */ + RESERVED(3:12, uint32_t) + uint32_t DDI:1; /*!< \brief bit: 28 Disable dual issue */ + RESERVED(7:3, uint32_t) + } b; +#endif +#if __CORTEX_A == 9 || defined(DOXYGEN) + /** \brief Structure used for bit access on Cortex-A9 */ + struct + { + uint32_t FW:1; /*!< \brief bit: 0 Cache and TLB maintenance broadcast */ + RESERVED(0:1, uint32_t) + uint32_t L1PE:1; /*!< \brief bit: 2 Dside prefetch */ + uint32_t WFLZM:1; /*!< \brief bit: 3 Cache and TLB maintenance broadcast */ + RESERVED(1:2, uint32_t) + uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */ + uint32_t EXCL:1; /*!< \brief bit: 7 Exclusive L1/L2 cache control */ + uint32_t AOW:1; /*!< \brief bit: 8 Enable allocation in one cache way only */ + uint32_t PARITY:1; /*!< \brief bit: 9 Support for parity checking, if implemented */ + RESERVED(7:22, uint32_t) + } b; +#endif + uint32_t w; /*!< \brief Type used for word access */ +} ACTLR_Type; + +#define ACTLR_DDI_Pos 28U /*!< \brief ACTLR: DDI Position */ +#define ACTLR_DDI_Msk (1UL << ACTLR_DDI_Pos) /*!< \brief ACTLR: DDI Mask */ + +#define ACTLR_DBDI_Pos 28U /*!< \brief ACTLR: DBDI Position */ +#define ACTLR_DBDI_Msk (1UL << ACTLR_DBDI_Pos) /*!< \brief ACTLR: DBDI Mask */ + +#define ACTLR_BTDIS_Pos 18U /*!< \brief ACTLR: BTDIS Position */ +#define ACTLR_BTDIS_Msk (1UL << ACTLR_BTDIS_Pos) /*!< \brief ACTLR: BTDIS Mask */ + +#define ACTLR_RSDIS_Pos 17U /*!< \brief ACTLR: RSDIS Position */ +#define ACTLR_RSDIS_Msk (1UL << ACTLR_RSDIS_Pos) /*!< \brief ACTLR: RSDIS Mask */ + +#define ACTLR_BP_Pos 15U /*!< \brief ACTLR: BP Position */ +#define ACTLR_BP_Msk (3UL << ACTLR_BP_Pos) /*!< \brief ACTLR: BP Mask */ + +#define ACTLR_DDVM_Pos 15U /*!< \brief ACTLR: DDVM Position */ +#define ACTLR_DDVM_Msk (1UL << ACTLR_DDVM_Pos) /*!< \brief ACTLR: DDVM Mask */ + +#define ACTLR_L1PCTL_Pos 13U /*!< \brief ACTLR: L1PCTL Position */ +#define ACTLR_L1PCTL_Msk (3UL << ACTLR_L1PCTL_Pos) /*!< \brief ACTLR: L1PCTL Mask */ + +#define ACTLR_RADIS_Pos 12U /*!< \brief ACTLR: RADIS Position */ +#define ACTLR_RADIS_Msk (1UL << ACTLR_RADIS_Pos) /*!< \brief ACTLR: RADIS Mask */ + +#define ACTLR_L1RADIS_Pos 12U /*!< \brief ACTLR: L1RADIS Position */ +#define ACTLR_L1RADIS_Msk (1UL << ACTLR_L1RADIS_Pos) /*!< \brief ACTLR: L1RADIS Mask */ + +#define ACTLR_DWBST_Pos 11U /*!< \brief ACTLR: DWBST Position */ +#define ACTLR_DWBST_Msk (1UL << ACTLR_DWBST_Pos) /*!< \brief ACTLR: DWBST Mask */ + +#define ACTLR_L2RADIS_Pos 11U /*!< \brief ACTLR: L2RADIS Position */ +#define ACTLR_L2RADIS_Msk (1UL << ACTLR_L2RADIS_Pos) /*!< \brief ACTLR: L2RADIS Mask */ + +#define ACTLR_DODMBS_Pos 10U /*!< \brief ACTLR: DODMBS Position */ +#define ACTLR_DODMBS_Msk (1UL << ACTLR_DODMBS_Pos) /*!< \brief ACTLR: DODMBS Mask */ + +#define ACTLR_PARITY_Pos 9U /*!< \brief ACTLR: PARITY Position */ +#define ACTLR_PARITY_Msk (1UL << ACTLR_PARITY_Pos) /*!< \brief ACTLR: PARITY Mask */ + +#define ACTLR_AOW_Pos 8U /*!< \brief ACTLR: AOW Position */ +#define ACTLR_AOW_Msk (1UL << ACTLR_AOW_Pos) /*!< \brief ACTLR: AOW Mask */ + +#define ACTLR_EXCL_Pos 7U /*!< \brief ACTLR: EXCL Position */ +#define ACTLR_EXCL_Msk (1UL << ACTLR_EXCL_Pos) /*!< \brief ACTLR: EXCL Mask */ + +#define ACTLR_SMP_Pos 6U /*!< \brief ACTLR: SMP Position */ +#define ACTLR_SMP_Msk (1UL << ACTLR_SMP_Pos) /*!< \brief ACTLR: SMP Mask */ + +#define ACTLR_WFLZM_Pos 3U /*!< \brief ACTLR: WFLZM Position */ +#define ACTLR_WFLZM_Msk (1UL << ACTLR_WFLZM_Pos) /*!< \brief ACTLR: WFLZM Mask */ + +#define ACTLR_L1PE_Pos 2U /*!< \brief ACTLR: L1PE Position */ +#define ACTLR_L1PE_Msk (1UL << ACTLR_L1PE_Pos) /*!< \brief ACTLR: L1PE Mask */ + +#define ACTLR_FW_Pos 0U /*!< \brief ACTLR: FW Position */ +#define ACTLR_FW_Msk (1UL << ACTLR_FW_Pos) /*!< \brief ACTLR: FW Mask */ + +/* CP15 Register CPACR */ +typedef union +{ + struct + { + uint32_t CP0:2; /*!< \brief bit: 0..1 Access rights for coprocessor 0 */ + uint32_t CP1:2; /*!< \brief bit: 2..3 Access rights for coprocessor 1 */ + uint32_t CP2:2; /*!< \brief bit: 4..5 Access rights for coprocessor 2 */ + uint32_t CP3:2; /*!< \brief bit: 6..7 Access rights for coprocessor 3 */ + uint32_t CP4:2; /*!< \brief bit: 8..9 Access rights for coprocessor 4 */ + uint32_t CP5:2; /*!< \brief bit:10..11 Access rights for coprocessor 5 */ + uint32_t CP6:2; /*!< \brief bit:12..13 Access rights for coprocessor 6 */ + uint32_t CP7:2; /*!< \brief bit:14..15 Access rights for coprocessor 7 */ + uint32_t CP8:2; /*!< \brief bit:16..17 Access rights for coprocessor 8 */ + uint32_t CP9:2; /*!< \brief bit:18..19 Access rights for coprocessor 9 */ + uint32_t CP10:2; /*!< \brief bit:20..21 Access rights for coprocessor 10 */ + uint32_t CP11:2; /*!< \brief bit:22..23 Access rights for coprocessor 11 */ + uint32_t CP12:2; /*!< \brief bit:24..25 Access rights for coprocessor 11 */ + uint32_t CP13:2; /*!< \brief bit:26..27 Access rights for coprocessor 11 */ + uint32_t TRCDIS:1; /*!< \brief bit: 28 Disable CP14 access to trace registers */ + RESERVED(0:1, uint32_t) + uint32_t D32DIS:1; /*!< \brief bit: 30 Disable use of registers D16-D31 of the VFP register file */ + uint32_t ASEDIS:1; /*!< \brief bit: 31 Disable Advanced SIMD Functionality */ + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} CPACR_Type; + +#define CPACR_ASEDIS_Pos 31U /*!< \brief CPACR: ASEDIS Position */ +#define CPACR_ASEDIS_Msk (1UL << CPACR_ASEDIS_Pos) /*!< \brief CPACR: ASEDIS Mask */ + +#define CPACR_D32DIS_Pos 30U /*!< \brief CPACR: D32DIS Position */ +#define CPACR_D32DIS_Msk (1UL << CPACR_D32DIS_Pos) /*!< \brief CPACR: D32DIS Mask */ + +#define CPACR_TRCDIS_Pos 28U /*!< \brief CPACR: D32DIS Position */ +#define CPACR_TRCDIS_Msk (1UL << CPACR_D32DIS_Pos) /*!< \brief CPACR: D32DIS Mask */ + +#define CPACR_CP_Pos_(n) (n*2U) /*!< \brief CPACR: CPn Position */ +#define CPACR_CP_Msk_(n) (3UL << CPACR_CP_Pos_(n)) /*!< \brief CPACR: CPn Mask */ + +#define CPACR_CP_NA 0U /*!< \brief CPACR CPn field: Access denied. */ +#define CPACR_CP_PL1 1U /*!< \brief CPACR CPn field: Accessible from PL1 only. */ +#define CPACR_CP_FA 3U /*!< \brief CPACR CPn field: Full access. */ + +/* CP15 Register DFSR */ +typedef union +{ + struct + { + uint32_t FS0:4; /*!< \brief bit: 0.. 3 Fault Status bits bit 0-3 */ + uint32_t Domain:4; /*!< \brief bit: 4.. 7 Fault on which domain */ + RESERVED(0:1, uint32_t) + uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */ + uint32_t FS1:1; /*!< \brief bit: 10 Fault Status bits bit 4 */ + uint32_t WnR:1; /*!< \brief bit: 11 Write not Read bit */ + uint32_t ExT:1; /*!< \brief bit: 12 External abort type */ + uint32_t CM:1; /*!< \brief bit: 13 Cache maintenance fault */ + RESERVED(1:18, uint32_t) + } s; /*!< \brief Structure used for bit access in short format */ + struct + { + uint32_t STATUS:5; /*!< \brief bit: 0.. 5 Fault Status bits */ + RESERVED(0:3, uint32_t) + uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */ + RESERVED(1:1, uint32_t) + uint32_t WnR:1; /*!< \brief bit: 11 Write not Read bit */ + uint32_t ExT:1; /*!< \brief bit: 12 External abort type */ + uint32_t CM:1; /*!< \brief bit: 13 Cache maintenance fault */ + RESERVED(2:18, uint32_t) + } l; /*!< \brief Structure used for bit access in long format */ + uint32_t w; /*!< \brief Type used for word access */ +} DFSR_Type; + +#define DFSR_CM_Pos 13U /*!< \brief DFSR: CM Position */ +#define DFSR_CM_Msk (1UL << DFSR_CM_Pos) /*!< \brief DFSR: CM Mask */ + +#define DFSR_Ext_Pos 12U /*!< \brief DFSR: Ext Position */ +#define DFSR_Ext_Msk (1UL << DFSR_Ext_Pos) /*!< \brief DFSR: Ext Mask */ + +#define DFSR_WnR_Pos 11U /*!< \brief DFSR: WnR Position */ +#define DFSR_WnR_Msk (1UL << DFSR_WnR_Pos) /*!< \brief DFSR: WnR Mask */ + +#define DFSR_FS1_Pos 10U /*!< \brief DFSR: FS1 Position */ +#define DFSR_FS1_Msk (1UL << DFSR_FS1_Pos) /*!< \brief DFSR: FS1 Mask */ + +#define DFSR_LPAE_Pos 9U /*!< \brief DFSR: LPAE Position */ +#define DFSR_LPAE_Msk (1UL << DFSR_LPAE_Pos) /*!< \brief DFSR: LPAE Mask */ + +#define DFSR_Domain_Pos 4U /*!< \brief DFSR: Domain Position */ +#define DFSR_Domain_Msk (0xFUL << DFSR_Domain_Pos) /*!< \brief DFSR: Domain Mask */ + +#define DFSR_FS0_Pos 0U /*!< \brief DFSR: FS0 Position */ +#define DFSR_FS0_Msk (0xFUL << DFSR_FS0_Pos) /*!< \brief DFSR: FS0 Mask */ + +#define DFSR_STATUS_Pos 0U /*!< \brief DFSR: STATUS Position */ +#define DFSR_STATUS_Msk (0x3FUL << DFSR_STATUS_Pos) /*!< \brief DFSR: STATUS Mask */ + +/* CP15 Register IFSR */ +typedef union +{ + struct + { + uint32_t FS0:4; /*!< \brief bit: 0.. 3 Fault Status bits bit 0-3 */ + RESERVED(0:5, uint32_t) + uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */ + uint32_t FS1:1; /*!< \brief bit: 10 Fault Status bits bit 4 */ + RESERVED(1:1, uint32_t) + uint32_t ExT:1; /*!< \brief bit: 12 External abort type */ + RESERVED(2:19, uint32_t) + } s; /*!< \brief Structure used for bit access in short format */ + struct + { + uint32_t STATUS:6; /*!< \brief bit: 0.. 5 Fault Status bits */ + RESERVED(0:3, uint32_t) + uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */ + RESERVED(1:2, uint32_t) + uint32_t ExT:1; /*!< \brief bit: 12 External abort type */ + RESERVED(2:19, uint32_t) + } l; /*!< \brief Structure used for bit access in long format */ + uint32_t w; /*!< \brief Type used for word access */ +} IFSR_Type; + +#define IFSR_ExT_Pos 12U /*!< \brief IFSR: ExT Position */ +#define IFSR_ExT_Msk (1UL << IFSR_ExT_Pos) /*!< \brief IFSR: ExT Mask */ + +#define IFSR_FS1_Pos 10U /*!< \brief IFSR: FS1 Position */ +#define IFSR_FS1_Msk (1UL << IFSR_FS1_Pos) /*!< \brief IFSR: FS1 Mask */ + +#define IFSR_LPAE_Pos 9U /*!< \brief IFSR: LPAE Position */ +#define IFSR_LPAE_Msk (0x1UL << IFSR_LPAE_Pos) /*!< \brief IFSR: LPAE Mask */ + +#define IFSR_FS0_Pos 0U /*!< \brief IFSR: FS0 Position */ +#define IFSR_FS0_Msk (0xFUL << IFSR_FS0_Pos) /*!< \brief IFSR: FS0 Mask */ + +#define IFSR_STATUS_Pos 0U /*!< \brief IFSR: STATUS Position */ +#define IFSR_STATUS_Msk (0x3FUL << IFSR_STATUS_Pos) /*!< \brief IFSR: STATUS Mask */ + +/* CP15 Register ISR */ +typedef union +{ + struct + { + RESERVED(0:6, uint32_t) + uint32_t F:1; /*!< \brief bit: 6 FIQ pending bit */ + uint32_t I:1; /*!< \brief bit: 7 IRQ pending bit */ + uint32_t A:1; /*!< \brief bit: 8 External abort pending bit */ + RESERVED(1:23, uint32_t) + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} ISR_Type; + +#define ISR_A_Pos 13U /*!< \brief ISR: A Position */ +#define ISR_A_Msk (1UL << ISR_A_Pos) /*!< \brief ISR: A Mask */ + +#define ISR_I_Pos 12U /*!< \brief ISR: I Position */ +#define ISR_I_Msk (1UL << ISR_I_Pos) /*!< \brief ISR: I Mask */ + +#define ISR_F_Pos 11U /*!< \brief ISR: F Position */ +#define ISR_F_Msk (1UL << ISR_F_Pos) /*!< \brief ISR: F Mask */ + +/* DACR Register */ +#define DACR_D_Pos_(n) (2U*n) /*!< \brief DACR: Dn Position */ +#define DACR_D_Msk_(n) (3UL << DACR_D_Pos_(n)) /*!< \brief DACR: Dn Mask */ +#define DACR_Dn_NOACCESS 0U /*!< \brief DACR Dn field: No access */ +#define DACR_Dn_CLIENT 1U /*!< \brief DACR Dn field: Client */ +#define DACR_Dn_MANAGER 3U /*!< \brief DACR Dn field: Manager */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param [in] field Name of the register bit field. + \param [in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param [in] field Name of the register bit field. + \param [in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + + +/** + \brief Union type to access the L2C_310 Cache Controller. +*/ +#if (__L2C_PRESENT == 1U) || defined(DOXYGEN) +typedef struct +{ + __IM uint32_t CACHE_ID; /*!< \brief Offset: 0x0000 (R/ ) Cache ID Register */ + __IM uint32_t CACHE_TYPE; /*!< \brief Offset: 0x0004 (R/ ) Cache Type Register */ + RESERVED(0[0x3e], uint32_t) + __IOM uint32_t CONTROL; /*!< \brief Offset: 0x0100 (R/W) Control Register */ + __IOM uint32_t AUX_CNT; /*!< \brief Offset: 0x0104 (R/W) Auxiliary Control */ + RESERVED(1[0x3e], uint32_t) + __IOM uint32_t EVENT_CONTROL; /*!< \brief Offset: 0x0200 (R/W) Event Counter Control */ + __IOM uint32_t EVENT_COUNTER1_CONF; /*!< \brief Offset: 0x0204 (R/W) Event Counter 1 Configuration */ + __IOM uint32_t EVENT_COUNTER0_CONF; /*!< \brief Offset: 0x0208 (R/W) Event Counter 1 Configuration */ + RESERVED(2[0x2], uint32_t) + __IOM uint32_t INTERRUPT_MASK; /*!< \brief Offset: 0x0214 (R/W) Interrupt Mask */ + __IM uint32_t MASKED_INT_STATUS; /*!< \brief Offset: 0x0218 (R/ ) Masked Interrupt Status */ + __IM uint32_t RAW_INT_STATUS; /*!< \brief Offset: 0x021c (R/ ) Raw Interrupt Status */ + __OM uint32_t INTERRUPT_CLEAR; /*!< \brief Offset: 0x0220 ( /W) Interrupt Clear */ + RESERVED(3[0x143], uint32_t) + __IOM uint32_t CACHE_SYNC; /*!< \brief Offset: 0x0730 (R/W) Cache Sync */ + RESERVED(4[0xf], uint32_t) + __IOM uint32_t INV_LINE_PA; /*!< \brief Offset: 0x0770 (R/W) Invalidate Line By PA */ + RESERVED(6[2], uint32_t) + __IOM uint32_t INV_WAY; /*!< \brief Offset: 0x077c (R/W) Invalidate by Way */ + RESERVED(5[0xc], uint32_t) + __IOM uint32_t CLEAN_LINE_PA; /*!< \brief Offset: 0x07b0 (R/W) Clean Line by PA */ + RESERVED(7[1], uint32_t) + __IOM uint32_t CLEAN_LINE_INDEX_WAY; /*!< \brief Offset: 0x07b8 (R/W) Clean Line by Index/Way */ + __IOM uint32_t CLEAN_WAY; /*!< \brief Offset: 0x07bc (R/W) Clean by Way */ + RESERVED(8[0xc], uint32_t) + __IOM uint32_t CLEAN_INV_LINE_PA; /*!< \brief Offset: 0x07f0 (R/W) Clean and Invalidate Line by PA */ + RESERVED(9[1], uint32_t) + __IOM uint32_t CLEAN_INV_LINE_INDEX_WAY; /*!< \brief Offset: 0x07f8 (R/W) Clean and Invalidate Line by Index/Way */ + __IOM uint32_t CLEAN_INV_WAY; /*!< \brief Offset: 0x07fc (R/W) Clean and Invalidate by Way */ + RESERVED(10[0x40], uint32_t) + __IOM uint32_t DATA_LOCK_0_WAY; /*!< \brief Offset: 0x0900 (R/W) Data Lockdown 0 by Way */ + __IOM uint32_t INST_LOCK_0_WAY; /*!< \brief Offset: 0x0904 (R/W) Instruction Lockdown 0 by Way */ + __IOM uint32_t DATA_LOCK_1_WAY; /*!< \brief Offset: 0x0908 (R/W) Data Lockdown 1 by Way */ + __IOM uint32_t INST_LOCK_1_WAY; /*!< \brief Offset: 0x090c (R/W) Instruction Lockdown 1 by Way */ + __IOM uint32_t DATA_LOCK_2_WAY; /*!< \brief Offset: 0x0910 (R/W) Data Lockdown 2 by Way */ + __IOM uint32_t INST_LOCK_2_WAY; /*!< \brief Offset: 0x0914 (R/W) Instruction Lockdown 2 by Way */ + __IOM uint32_t DATA_LOCK_3_WAY; /*!< \brief Offset: 0x0918 (R/W) Data Lockdown 3 by Way */ + __IOM uint32_t INST_LOCK_3_WAY; /*!< \brief Offset: 0x091c (R/W) Instruction Lockdown 3 by Way */ + __IOM uint32_t DATA_LOCK_4_WAY; /*!< \brief Offset: 0x0920 (R/W) Data Lockdown 4 by Way */ + __IOM uint32_t INST_LOCK_4_WAY; /*!< \brief Offset: 0x0924 (R/W) Instruction Lockdown 4 by Way */ + __IOM uint32_t DATA_LOCK_5_WAY; /*!< \brief Offset: 0x0928 (R/W) Data Lockdown 5 by Way */ + __IOM uint32_t INST_LOCK_5_WAY; /*!< \brief Offset: 0x092c (R/W) Instruction Lockdown 5 by Way */ + __IOM uint32_t DATA_LOCK_6_WAY; /*!< \brief Offset: 0x0930 (R/W) Data Lockdown 5 by Way */ + __IOM uint32_t INST_LOCK_6_WAY; /*!< \brief Offset: 0x0934 (R/W) Instruction Lockdown 5 by Way */ + __IOM uint32_t DATA_LOCK_7_WAY; /*!< \brief Offset: 0x0938 (R/W) Data Lockdown 6 by Way */ + __IOM uint32_t INST_LOCK_7_WAY; /*!< \brief Offset: 0x093c (R/W) Instruction Lockdown 6 by Way */ + RESERVED(11[0x4], uint32_t) + __IOM uint32_t LOCK_LINE_EN; /*!< \brief Offset: 0x0950 (R/W) Lockdown by Line Enable */ + __IOM uint32_t UNLOCK_ALL_BY_WAY; /*!< \brief Offset: 0x0954 (R/W) Unlock All Lines by Way */ + RESERVED(12[0xaa], uint32_t) + __IOM uint32_t ADDRESS_FILTER_START; /*!< \brief Offset: 0x0c00 (R/W) Address Filtering Start */ + __IOM uint32_t ADDRESS_FILTER_END; /*!< \brief Offset: 0x0c04 (R/W) Address Filtering End */ + RESERVED(13[0xce], uint32_t) + __IOM uint32_t DEBUG_CONTROL; /*!< \brief Offset: 0x0f40 (R/W) Debug Control Register */ +} L2C_310_TypeDef; + +#define L2C_310 ((L2C_310_TypeDef *)L2C_310_BASE) /*!< \brief L2C_310 register set access pointer */ +#endif + +#if (__GIC_PRESENT == 1U) || defined(DOXYGEN) + +/** \brief Structure type to access the Generic Interrupt Controller Distributor (GICD) +*/ +typedef struct +{ + __IOM uint32_t CTLR; /*!< \brief Offset: 0x000 (R/W) Distributor Control Register */ + __IM uint32_t TYPER; /*!< \brief Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IM uint32_t IIDR; /*!< \brief Offset: 0x008 (R/ ) Distributor Implementer Identification Register */ + RESERVED(0, uint32_t) + __IOM uint32_t STATUSR; /*!< \brief Offset: 0x010 (R/W) Error Reporting Status Register, optional */ + RESERVED(1[11], uint32_t) + __OM uint32_t SETSPI_NSR; /*!< \brief Offset: 0x040 ( /W) Set SPI Register */ + RESERVED(2, uint32_t) + __OM uint32_t CLRSPI_NSR; /*!< \brief Offset: 0x048 ( /W) Clear SPI Register */ + RESERVED(3, uint32_t) + __OM uint32_t SETSPI_SR; /*!< \brief Offset: 0x050 ( /W) Set SPI, Secure Register */ + RESERVED(4, uint32_t) + __OM uint32_t CLRSPI_SR; /*!< \brief Offset: 0x058 ( /W) Clear SPI, Secure Register */ + RESERVED(5[9], uint32_t) + __IOM uint32_t IGROUPR[32]; /*!< \brief Offset: 0x080 (R/W) Interrupt Group Registers */ + __IOM uint32_t ISENABLER[32]; /*!< \brief Offset: 0x100 (R/W) Interrupt Set-Enable Registers */ + __IOM uint32_t ICENABLER[32]; /*!< \brief Offset: 0x180 (R/W) Interrupt Clear-Enable Registers */ + __IOM uint32_t ISPENDR[32]; /*!< \brief Offset: 0x200 (R/W) Interrupt Set-Pending Registers */ + __IOM uint32_t ICPENDR[32]; /*!< \brief Offset: 0x280 (R/W) Interrupt Clear-Pending Registers */ + __IOM uint32_t ISACTIVER[32]; /*!< \brief Offset: 0x300 (R/W) Interrupt Set-Active Registers */ + __IOM uint32_t ICACTIVER[32]; /*!< \brief Offset: 0x380 (R/W) Interrupt Clear-Active Registers */ + __IOM uint32_t IPRIORITYR[255]; /*!< \brief Offset: 0x400 (R/W) Interrupt Priority Registers */ + RESERVED(6, uint32_t) + __IOM uint32_t ITARGETSR[255]; /*!< \brief Offset: 0x800 (R/W) Interrupt Targets Registers */ + RESERVED(7, uint32_t) + __IOM uint32_t ICFGR[64]; /*!< \brief Offset: 0xC00 (R/W) Interrupt Configuration Registers */ + __IOM uint32_t IGRPMODR[32]; /*!< \brief Offset: 0xD00 (R/W) Interrupt Group Modifier Registers */ + RESERVED(8[32], uint32_t) + __IOM uint32_t NSACR[64]; /*!< \brief Offset: 0xE00 (R/W) Non-secure Access Control Registers */ + __OM uint32_t SGIR; /*!< \brief Offset: 0xF00 ( /W) Software Generated Interrupt Register */ + RESERVED(9[3], uint32_t) + __IOM uint32_t CPENDSGIR[4]; /*!< \brief Offset: 0xF10 (R/W) SGI Clear-Pending Registers */ + __IOM uint32_t SPENDSGIR[4]; /*!< \brief Offset: 0xF20 (R/W) SGI Set-Pending Registers */ + RESERVED(10[5236], uint32_t) + __IOM uint64_t IROUTER[988]; /*!< \brief Offset: 0x6100(R/W) Interrupt Routing Registers */ +} GICDistributor_Type; + +#define GICDistributor ((GICDistributor_Type *) GIC_DISTRIBUTOR_BASE ) /*!< \brief GIC Distributor register set access pointer */ + +/** \brief Structure type to access the Generic Interrupt Controller Interface (GICC) +*/ +typedef struct +{ + __IOM uint32_t CTLR; /*!< \brief Offset: 0x000 (R/W) CPU Interface Control Register */ + __IOM uint32_t PMR; /*!< \brief Offset: 0x004 (R/W) Interrupt Priority Mask Register */ + __IOM uint32_t BPR; /*!< \brief Offset: 0x008 (R/W) Binary Point Register */ + __IM uint32_t IAR; /*!< \brief Offset: 0x00C (R/ ) Interrupt Acknowledge Register */ + __OM uint32_t EOIR; /*!< \brief Offset: 0x010 ( /W) End Of Interrupt Register */ + __IM uint32_t RPR; /*!< \brief Offset: 0x014 (R/ ) Running Priority Register */ + __IM uint32_t HPPIR; /*!< \brief Offset: 0x018 (R/ ) Highest Priority Pending Interrupt Register */ + __IOM uint32_t ABPR; /*!< \brief Offset: 0x01C (R/W) Aliased Binary Point Register */ + __IM uint32_t AIAR; /*!< \brief Offset: 0x020 (R/ ) Aliased Interrupt Acknowledge Register */ + __OM uint32_t AEOIR; /*!< \brief Offset: 0x024 ( /W) Aliased End Of Interrupt Register */ + __IM uint32_t AHPPIR; /*!< \brief Offset: 0x028 (R/ ) Aliased Highest Priority Pending Interrupt Register */ + __IOM uint32_t STATUSR; /*!< \brief Offset: 0x02C (R/W) Error Reporting Status Register, optional */ + RESERVED(1[40], uint32_t) + __IOM uint32_t APR[4]; /*!< \brief Offset: 0x0D0 (R/W) Active Priority Register */ + __IOM uint32_t NSAPR[4]; /*!< \brief Offset: 0x0E0 (R/W) Non-secure Active Priority Register */ + RESERVED(2[3], uint32_t) + __IM uint32_t IIDR; /*!< \brief Offset: 0x0FC (R/ ) CPU Interface Identification Register */ + RESERVED(3[960], uint32_t) + __OM uint32_t DIR; /*!< \brief Offset: 0x1000( /W) Deactivate Interrupt Register */ +} GICInterface_Type; + +#define GICInterface ((GICInterface_Type *) GIC_INTERFACE_BASE ) /*!< \brief GIC Interface register set access pointer */ +#endif + +#if (__TIM_PRESENT == 1U) || defined(DOXYGEN) +#if ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN) +/** \brief Structure type to access the Private Timer +*/ +typedef struct +{ + __IOM uint32_t LOAD; //!< \brief Offset: 0x000 (R/W) Private Timer Load Register + __IOM uint32_t COUNTER; //!< \brief Offset: 0x004 (R/W) Private Timer Counter Register + __IOM uint32_t CONTROL; //!< \brief Offset: 0x008 (R/W) Private Timer Control Register + __IOM uint32_t ISR; //!< \brief Offset: 0x00C (R/W) Private Timer Interrupt Status Register + RESERVED(0[4], uint32_t) + __IOM uint32_t WLOAD; //!< \brief Offset: 0x020 (R/W) Watchdog Load Register + __IOM uint32_t WCOUNTER; //!< \brief Offset: 0x024 (R/W) Watchdog Counter Register + __IOM uint32_t WCONTROL; //!< \brief Offset: 0x028 (R/W) Watchdog Control Register + __IOM uint32_t WISR; //!< \brief Offset: 0x02C (R/W) Watchdog Interrupt Status Register + __IOM uint32_t WRESET; //!< \brief Offset: 0x030 (R/W) Watchdog Reset Status Register + __OM uint32_t WDISABLE; //!< \brief Offset: 0x034 ( /W) Watchdog Disable Register +} Timer_Type; +#define PTIM ((Timer_Type *) TIMER_BASE ) /*!< \brief Timer register struct */ +#endif +#endif + + /******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - L1 Cache Functions + - L2C-310 Cache Controller Functions + - PL1 Timer Functions + - GIC Functions + - MMU Functions + ******************************************************************************/ + +/* ########################## L1 Cache functions ################################# */ + +/** \brief Enable Caches by setting I and C bits in SCTLR register. +*/ +__STATIC_FORCEINLINE void L1C_EnableCaches(void) { + __set_SCTLR( __get_SCTLR() | SCTLR_I_Msk | SCTLR_C_Msk); + __ISB(); +} + +/** \brief Disable Caches by clearing I and C bits in SCTLR register. +*/ +__STATIC_FORCEINLINE void L1C_DisableCaches(void) { + __set_SCTLR( __get_SCTLR() & (~SCTLR_I_Msk) & (~SCTLR_C_Msk)); + __ISB(); +} + +/** \brief Enable Branch Prediction by setting Z bit in SCTLR register. +*/ +__STATIC_FORCEINLINE void L1C_EnableBTAC(void) { + __set_SCTLR( __get_SCTLR() | SCTLR_Z_Msk); + __ISB(); +} + +/** \brief Disable Branch Prediction by clearing Z bit in SCTLR register. +*/ +__STATIC_FORCEINLINE void L1C_DisableBTAC(void) { + __set_SCTLR( __get_SCTLR() & (~SCTLR_Z_Msk)); + __ISB(); +} + +/** \brief Invalidate entire branch predictor array +*/ +__STATIC_FORCEINLINE void L1C_InvalidateBTAC(void) { + __set_BPIALL(0); + __DSB(); //ensure completion of the invalidation + __ISB(); //ensure instruction fetch path sees new state +} + +/** \brief Invalidate the whole instruction cache +*/ +__STATIC_FORCEINLINE void L1C_InvalidateICacheAll(void) { + __set_ICIALLU(0); + __DSB(); //ensure completion of the invalidation + __ISB(); //ensure instruction fetch path sees new I cache state +} + +/** \brief Clean data cache line by address. +* \param [in] va Pointer to data to clear the cache for. +*/ +__STATIC_FORCEINLINE void L1C_CleanDCacheMVA(void *va) { + __set_DCCMVAC((uint32_t)va); + __DMB(); //ensure the ordering of data cache maintenance operations and their effects +} + +/** \brief Invalidate data cache line by address. +* \param [in] va Pointer to data to invalidate the cache for. +*/ +__STATIC_FORCEINLINE void L1C_InvalidateDCacheMVA(void *va) { + __set_DCIMVAC((uint32_t)va); + __DMB(); //ensure the ordering of data cache maintenance operations and their effects +} + +/** \brief Clean and Invalidate data cache by address. +* \param [in] va Pointer to data to invalidate the cache for. +*/ +__STATIC_FORCEINLINE void L1C_CleanInvalidateDCacheMVA(void *va) { + __set_DCCIMVAC((uint32_t)va); + __DMB(); //ensure the ordering of data cache maintenance operations and their effects +} + +/** \brief Calculate log2 rounded up +* - log(0) => 0 +* - log(1) => 0 +* - log(2) => 1 +* - log(3) => 2 +* - log(4) => 2 +* - log(5) => 3 +* : : +* - log(16) => 4 +* - log(32) => 5 +* : : +* \param [in] n input value parameter +* \return log2(n) +*/ +__STATIC_FORCEINLINE uint8_t __log2_up(uint32_t n) +{ + if (n < 2U) { + return 0U; + } + uint8_t log = 0U; + uint32_t t = n; + while(t > 1U) + { + log++; + t >>= 1U; + } + if (n & 1U) { log++; } + return log; +} + +/** \brief Apply cache maintenance to given cache level. +* \param [in] level cache level to be maintained +* \param [in] maint 0 - invalidate, 1 - clean, otherwise - invalidate and clean +*/ +__STATIC_FORCEINLINE void __L1C_MaintainDCacheSetWay(uint32_t level, uint32_t maint) +{ + uint32_t Dummy; + uint32_t ccsidr; + uint32_t num_sets; + uint32_t num_ways; + uint32_t shift_way; + uint32_t log2_linesize; + int32_t log2_num_ways; + + Dummy = level << 1U; + /* set csselr, select ccsidr register */ + __set_CSSELR(Dummy); + /* get current ccsidr register */ + ccsidr = __get_CCSIDR(); + num_sets = ((ccsidr & 0x0FFFE000U) >> 13U) + 1U; + num_ways = ((ccsidr & 0x00001FF8U) >> 3U) + 1U; + log2_linesize = (ccsidr & 0x00000007U) + 2U + 2U; + log2_num_ways = __log2_up(num_ways); + if ((log2_num_ways < 0) || (log2_num_ways > 32)) { + return; // FATAL ERROR + } + shift_way = 32U - (uint32_t)log2_num_ways; + for(int32_t way = num_ways-1; way >= 0; way--) + { + for(int32_t set = num_sets-1; set >= 0; set--) + { + Dummy = (level << 1U) | (((uint32_t)set) << log2_linesize) | (((uint32_t)way) << shift_way); + switch (maint) + { + case 0U: __set_DCISW(Dummy); break; + case 1U: __set_DCCSW(Dummy); break; + default: __set_DCCISW(Dummy); break; + } + } + } + __DMB(); +} + +/** \brief Clean and Invalidate the entire data or unified cache +* Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency +* \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean +*/ +__STATIC_FORCEINLINE void L1C_CleanInvalidateCache(uint32_t op) { + uint32_t clidr; + uint32_t cache_type; + clidr = __get_CLIDR(); + for(uint32_t i = 0U; i<7U; i++) + { + cache_type = (clidr >> i*3U) & 0x7UL; + if ((cache_type >= 2U) && (cache_type <= 4U)) + { + __L1C_MaintainDCacheSetWay(i, op); + } + } +} + +/** \brief Clean and Invalidate the entire data or unified cache +* Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency +* \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean +* \deprecated Use generic L1C_CleanInvalidateCache instead. +*/ +CMSIS_DEPRECATED +__STATIC_FORCEINLINE void __L1C_CleanInvalidateCache(uint32_t op) { + L1C_CleanInvalidateCache(op); +} + +/** \brief Invalidate the whole data cache. +*/ +__STATIC_FORCEINLINE void L1C_InvalidateDCacheAll(void) { + L1C_CleanInvalidateCache(0); +} + +/** \brief Clean the whole data cache. + */ +__STATIC_FORCEINLINE void L1C_CleanDCacheAll(void) { + L1C_CleanInvalidateCache(1); +} + +/** \brief Clean and invalidate the whole data cache. + */ +__STATIC_FORCEINLINE void L1C_CleanInvalidateDCacheAll(void) { + L1C_CleanInvalidateCache(2); +} + +/* ########################## L2 Cache functions ################################# */ +#if (__L2C_PRESENT == 1U) || defined(DOXYGEN) +/** \brief Cache Sync operation by writing CACHE_SYNC register. +*/ +__STATIC_INLINE void L2C_Sync(void) +{ + L2C_310->CACHE_SYNC = 0x0; +} + +/** \brief Read cache controller cache ID from CACHE_ID register. + * \return L2C_310_TypeDef::CACHE_ID + */ +__STATIC_INLINE int L2C_GetID (void) +{ + return L2C_310->CACHE_ID; +} + +/** \brief Read cache controller cache type from CACHE_TYPE register. +* \return L2C_310_TypeDef::CACHE_TYPE +*/ +__STATIC_INLINE int L2C_GetType (void) +{ + return L2C_310->CACHE_TYPE; +} + +/** \brief Invalidate all cache by way +*/ +__STATIC_INLINE void L2C_InvAllByWay (void) +{ + unsigned int assoc; + + if (L2C_310->AUX_CNT & (1U << 16U)) { + assoc = 16U; + } else { + assoc = 8U; + } + + L2C_310->INV_WAY = (1U << assoc) - 1U; + while(L2C_310->INV_WAY & ((1U << assoc) - 1U)); //poll invalidate + + L2C_Sync(); +} + +/** \brief Clean and Invalidate all cache by way +*/ +__STATIC_INLINE void L2C_CleanInvAllByWay (void) +{ + unsigned int assoc; + + if (L2C_310->AUX_CNT & (1U << 16U)) { + assoc = 16U; + } else { + assoc = 8U; + } + + L2C_310->CLEAN_INV_WAY = (1U << assoc) - 1U; + while(L2C_310->CLEAN_INV_WAY & ((1U << assoc) - 1U)); //poll invalidate + + L2C_Sync(); +} + +/** \brief Enable Level 2 Cache +*/ +__STATIC_INLINE void L2C_Enable(void) +{ + L2C_310->CONTROL = 0; + L2C_310->INTERRUPT_CLEAR = 0x000001FFuL; + L2C_310->DEBUG_CONTROL = 0; + L2C_310->DATA_LOCK_0_WAY = 0; + L2C_310->CACHE_SYNC = 0; + L2C_310->CONTROL = 0x01; + L2C_Sync(); +} + +/** \brief Disable Level 2 Cache +*/ +__STATIC_INLINE void L2C_Disable(void) +{ + L2C_310->CONTROL = 0x00; + L2C_Sync(); +} + +/** \brief Invalidate cache by physical address +* \param [in] pa Pointer to data to invalidate cache for. +*/ +__STATIC_INLINE void L2C_InvPa (void *pa) +{ + L2C_310->INV_LINE_PA = (unsigned int)pa; + L2C_Sync(); +} + +/** \brief Clean cache by physical address +* \param [in] pa Pointer to data to invalidate cache for. +*/ +__STATIC_INLINE void L2C_CleanPa (void *pa) +{ + L2C_310->CLEAN_LINE_PA = (unsigned int)pa; + L2C_Sync(); +} + +/** \brief Clean and invalidate cache by physical address +* \param [in] pa Pointer to data to invalidate cache for. +*/ +__STATIC_INLINE void L2C_CleanInvPa (void *pa) +{ + L2C_310->CLEAN_INV_LINE_PA = (unsigned int)pa; + L2C_Sync(); +} +#endif + +/* ########################## GIC functions ###################################### */ +#if (__GIC_PRESENT == 1U) || defined(DOXYGEN) + +/** \brief Enable the interrupt distributor using the GIC's CTLR register. +*/ +__STATIC_INLINE void GIC_EnableDistributor(void) +{ + GICDistributor->CTLR |= 1U; +} + +/** \brief Disable the interrupt distributor using the GIC's CTLR register. +*/ +__STATIC_INLINE void GIC_DisableDistributor(void) +{ + GICDistributor->CTLR &=~1U; +} + +/** \brief Read the GIC's TYPER register. +* \return GICDistributor_Type::TYPER +*/ +__STATIC_INLINE uint32_t GIC_DistributorInfo(void) +{ + return (GICDistributor->TYPER); +} + +/** \brief Reads the GIC's IIDR register. +* \return GICDistributor_Type::IIDR +*/ +__STATIC_INLINE uint32_t GIC_DistributorImplementer(void) +{ + return (GICDistributor->IIDR); +} + +/** \brief Sets the GIC's ITARGETSR register for the given interrupt. +* \param [in] IRQn Interrupt to be configured. +* \param [in] cpu_target CPU interfaces to assign this interrupt to. +*/ +__STATIC_INLINE void GIC_SetTarget(IRQn_Type IRQn, uint32_t cpu_target) +{ + uint32_t mask = GICDistributor->ITARGETSR[IRQn / 4U] & ~(0xFFUL << ((IRQn % 4U) * 8U)); + GICDistributor->ITARGETSR[IRQn / 4U] = mask | ((cpu_target & 0xFFUL) << ((IRQn % 4U) * 8U)); +} + +/** \brief Read the GIC's ITARGETSR register. +* \param [in] IRQn Interrupt to acquire the configuration for. +* \return GICDistributor_Type::ITARGETSR +*/ +__STATIC_INLINE uint32_t GIC_GetTarget(IRQn_Type IRQn) +{ + return (GICDistributor->ITARGETSR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL; +} + +/** \brief Enable the CPU's interrupt interface. +*/ +__STATIC_INLINE void GIC_EnableInterface(void) +{ + GICInterface->CTLR |= 1U; //enable interface +} + +/** \brief Disable the CPU's interrupt interface. +*/ +__STATIC_INLINE void GIC_DisableInterface(void) +{ + GICInterface->CTLR &=~1U; //disable distributor +} + +/** \brief Read the CPU's IAR register. +* \return GICInterface_Type::IAR +*/ +__STATIC_INLINE IRQn_Type GIC_AcknowledgePending(void) +{ + return (IRQn_Type)(GICInterface->IAR); +} + +/** \brief Writes the given interrupt number to the CPU's EOIR register. +* \param [in] IRQn The interrupt to be signaled as finished. +*/ +__STATIC_INLINE void GIC_EndInterrupt(IRQn_Type IRQn) +{ + GICInterface->EOIR = IRQn; +} + +/** \brief Enables the given interrupt using GIC's ISENABLER register. +* \param [in] IRQn The interrupt to be enabled. +*/ +__STATIC_INLINE void GIC_EnableIRQ(IRQn_Type IRQn) +{ + GICDistributor->ISENABLER[IRQn / 32U] = 1U << (IRQn % 32U); +} + +/** \brief Get interrupt enable status using GIC's ISENABLER register. +* \param [in] IRQn The interrupt to be queried. +* \return 0 - interrupt is not enabled, 1 - interrupt is enabled. +*/ +__STATIC_INLINE uint32_t GIC_GetEnableIRQ(IRQn_Type IRQn) +{ + return (GICDistributor->ISENABLER[IRQn / 32U] >> (IRQn % 32U)) & 1UL; +} + +/** \brief Disables the given interrupt using GIC's ICENABLER register. +* \param [in] IRQn The interrupt to be disabled. +*/ +__STATIC_INLINE void GIC_DisableIRQ(IRQn_Type IRQn) +{ + GICDistributor->ICENABLER[IRQn / 32U] = 1U << (IRQn % 32U); +} + +/** \brief Get interrupt pending status from GIC's ISPENDR register. +* \param [in] IRQn The interrupt to be queried. +* \return 0 - interrupt is not pending, 1 - interrupt is pendig. +*/ +__STATIC_INLINE uint32_t GIC_GetPendingIRQ(IRQn_Type IRQn) +{ + uint32_t pend; + + if (IRQn >= 16U) { + pend = (GICDistributor->ISPENDR[IRQn / 32U] >> (IRQn % 32U)) & 1UL; + } else { + // INTID 0-15 Software Generated Interrupt + pend = (GICDistributor->SPENDSGIR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL; + // No CPU identification offered + if (pend != 0U) { + pend = 1U; + } else { + pend = 0U; + } + } + + return (pend); +} + +/** \brief Sets the given interrupt as pending using GIC's ISPENDR register. +* \param [in] IRQn The interrupt to be enabled. +*/ +__STATIC_INLINE void GIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if (IRQn >= 16U) { + GICDistributor->ISPENDR[IRQn / 32U] = 1U << (IRQn % 32U); + } else { + // INTID 0-15 Software Generated Interrupt + GICDistributor->SPENDSGIR[IRQn / 4U] = 1U << ((IRQn % 4U) * 8U); + } +} + +/** \brief Clears the given interrupt from being pending using GIC's ICPENDR register. +* \param [in] IRQn The interrupt to be enabled. +*/ +__STATIC_INLINE void GIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if (IRQn >= 16U) { + GICDistributor->ICPENDR[IRQn / 32U] = 1U << (IRQn % 32U); + } else { + // INTID 0-15 Software Generated Interrupt + GICDistributor->CPENDSGIR[IRQn / 4U] = 1U << ((IRQn % 4U) * 8U); + } +} + +/** \brief Sets the interrupt configuration using GIC's ICFGR register. +* \param [in] IRQn The interrupt to be configured. +* \param [in] int_config Int_config field value. Bit 0: Reserved (0 - N-N model, 1 - 1-N model for some GIC before v1) +* Bit 1: 0 - level sensitive, 1 - edge triggered +*/ +__STATIC_INLINE void GIC_SetConfiguration(IRQn_Type IRQn, uint32_t int_config) +{ + uint32_t icfgr = GICDistributor->ICFGR[IRQn / 16U]; + uint32_t shift = (IRQn % 16U) << 1U; + + icfgr &= (~(3U << shift)); + icfgr |= ( int_config << shift); + + GICDistributor->ICFGR[IRQn / 16U] = icfgr; +} + +/** \brief Get the interrupt configuration from the GIC's ICFGR register. +* \param [in] IRQn Interrupt to acquire the configuration for. +* \return Int_config field value. Bit 0: Reserved (0 - N-N model, 1 - 1-N model for some GIC before v1) +* Bit 1: 0 - level sensitive, 1 - edge triggered +*/ +__STATIC_INLINE uint32_t GIC_GetConfiguration(IRQn_Type IRQn) +{ + return (GICDistributor->ICFGR[IRQn / 16U] >> ((IRQn % 16U) >> 1U)); +} + +/** \brief Set the priority for the given interrupt in the GIC's IPRIORITYR register. +* \param [in] IRQn The interrupt to be configured. +* \param [in] priority The priority for the interrupt, lower values denote higher priorities. +*/ +__STATIC_INLINE void GIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + uint32_t mask = GICDistributor->IPRIORITYR[IRQn / 4U] & ~(0xFFUL << ((IRQn % 4U) * 8U)); + GICDistributor->IPRIORITYR[IRQn / 4U] = mask | ((priority & 0xFFUL) << ((IRQn % 4U) * 8U)); +} + +/** \brief Read the current interrupt priority from GIC's IPRIORITYR register. +* \param [in] IRQn The interrupt to be queried. +*/ +__STATIC_INLINE uint32_t GIC_GetPriority(IRQn_Type IRQn) +{ + return (GICDistributor->IPRIORITYR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL; +} + +/** \brief Set the interrupt priority mask using CPU's PMR register. +* \param [in] priority Priority mask to be set. +*/ +__STATIC_INLINE void GIC_SetInterfacePriorityMask(uint32_t priority) +{ + GICInterface->PMR = priority & 0xFFUL; //set priority mask +} + +/** \brief Read the current interrupt priority mask from CPU's PMR register. +* \result GICInterface_Type::PMR +*/ +__STATIC_INLINE uint32_t GIC_GetInterfacePriorityMask(void) +{ + return GICInterface->PMR; +} + +/** \brief Configures the group priority and subpriority split point using CPU's BPR register. +* \param [in] binary_point Amount of bits used as subpriority. +*/ +__STATIC_INLINE void GIC_SetBinaryPoint(uint32_t binary_point) +{ + GICInterface->BPR = binary_point & 7U; //set binary point +} + +/** \brief Read the current group priority and subpriority split point from CPU's BPR register. +* \return GICInterface_Type::BPR +*/ +__STATIC_INLINE uint32_t GIC_GetBinaryPoint(void) +{ + return GICInterface->BPR; +} + +/** \brief Get the status for a given interrupt. +* \param [in] IRQn The interrupt to get status for. +* \return 0 - not pending/active, 1 - pending, 2 - active, 3 - pending and active +*/ +__STATIC_INLINE uint32_t GIC_GetIRQStatus(IRQn_Type IRQn) +{ + uint32_t pending, active; + + active = ((GICDistributor->ISACTIVER[IRQn / 32U]) >> (IRQn % 32U)) & 1UL; + pending = ((GICDistributor->ISPENDR[IRQn / 32U]) >> (IRQn % 32U)) & 1UL; + + return ((active<<1U) | pending); +} + +/** \brief Generate a software interrupt using GIC's SGIR register. +* \param [in] IRQn Software interrupt to be generated. +* \param [in] target_list List of CPUs the software interrupt should be forwarded to. +* \param [in] filter_list Filter to be applied to determine interrupt receivers. +*/ +__STATIC_INLINE void GIC_SendSGI(IRQn_Type IRQn, uint32_t target_list, uint32_t filter_list) +{ + GICDistributor->SGIR = ((filter_list & 3U) << 24U) | ((target_list & 0xFFUL) << 16U) | (IRQn & 0x0FUL); +} + +/** \brief Get the interrupt number of the highest interrupt pending from CPU's HPPIR register. +* \return GICInterface_Type::HPPIR +*/ +__STATIC_INLINE uint32_t GIC_GetHighPendingIRQ(void) +{ + return GICInterface->HPPIR; +} + +/** \brief Provides information about the implementer and revision of the CPU interface. +* \return GICInterface_Type::IIDR +*/ +__STATIC_INLINE uint32_t GIC_GetInterfaceId(void) +{ + return GICInterface->IIDR; +} + +/** \brief Set the interrupt group from the GIC's IGROUPR register. +* \param [in] IRQn The interrupt to be queried. +* \param [in] group Interrupt group number: 0 - Group 0, 1 - Group 1 +*/ +__STATIC_INLINE void GIC_SetGroup(IRQn_Type IRQn, uint32_t group) +{ + uint32_t igroupr = GICDistributor->IGROUPR[IRQn / 32U]; + uint32_t shift = (IRQn % 32U); + + igroupr &= (~(1U << shift)); + igroupr |= ( (group & 1U) << shift); + + GICDistributor->IGROUPR[IRQn / 32U] = igroupr; +} +#define GIC_SetSecurity GIC_SetGroup + +/** \brief Get the interrupt group from the GIC's IGROUPR register. +* \param [in] IRQn The interrupt to be queried. +* \return 0 - Group 0, 1 - Group 1 +*/ +__STATIC_INLINE uint32_t GIC_GetGroup(IRQn_Type IRQn) +{ + return (GICDistributor->IGROUPR[IRQn / 32U] >> (IRQn % 32U)) & 1UL; +} +#define GIC_GetSecurity GIC_GetGroup + +/** \brief Initialize the interrupt distributor. +*/ +__STATIC_INLINE void GIC_DistInit(void) +{ + uint32_t i; + uint32_t num_irq = 0U; + uint32_t priority_field; + + //A reset sets all bits in the IGROUPRs corresponding to the SPIs to 0, + //configuring all of the interrupts as Secure. + + //Disable interrupt forwarding + GIC_DisableDistributor(); + //Get the maximum number of interrupts that the GIC supports + num_irq = 32U * ((GIC_DistributorInfo() & 0x1FU) + 1U); + + /* Priority level is implementation defined. + To determine the number of priority bits implemented write 0xFF to an IPRIORITYR + priority field and read back the value stored.*/ + GIC_SetPriority((IRQn_Type)0U, 0xFFU); + priority_field = GIC_GetPriority((IRQn_Type)0U); + + for (i = 32U; i < num_irq; i++) + { + //Disable the SPI interrupt + GIC_DisableIRQ((IRQn_Type)i); + //Set level-sensitive (and N-N model) + GIC_SetConfiguration((IRQn_Type)i, 0U); + //Set priority + GIC_SetPriority((IRQn_Type)i, priority_field/2U); + //Set target list to CPU0 + GIC_SetTarget((IRQn_Type)i, 1U); + } + //Enable distributor + GIC_EnableDistributor(); +} + +/** \brief Initialize the CPU's interrupt interface +*/ +__STATIC_INLINE void GIC_CPUInterfaceInit(void) +{ + uint32_t i; + uint32_t priority_field; + + //A reset sets all bits in the IGROUPRs corresponding to the SPIs to 0, + //configuring all of the interrupts as Secure. + + //Disable interrupt forwarding + GIC_DisableInterface(); + + /* Priority level is implementation defined. + To determine the number of priority bits implemented write 0xFF to an IPRIORITYR + priority field and read back the value stored.*/ + GIC_SetPriority((IRQn_Type)0U, 0xFFU); + priority_field = GIC_GetPriority((IRQn_Type)0U); + + //SGI and PPI + for (i = 0U; i < 32U; i++) + { + if(i > 15U) { + //Set level-sensitive (and N-N model) for PPI + GIC_SetConfiguration((IRQn_Type)i, 0U); + } + //Disable SGI and PPI interrupts + GIC_DisableIRQ((IRQn_Type)i); + //Set priority + GIC_SetPriority((IRQn_Type)i, priority_field/2U); + } + //Enable interface + GIC_EnableInterface(); + //Set binary point to 0 + GIC_SetBinaryPoint(0U); + //Set priority mask + GIC_SetInterfacePriorityMask(0xFFU); +} + +/** \brief Initialize and enable the GIC +*/ +__STATIC_INLINE void GIC_Enable(void) +{ + GIC_DistInit(); + GIC_CPUInterfaceInit(); //per CPU +} +#endif + +/* ########################## Generic Timer functions ############################ */ +#if (__TIM_PRESENT == 1U) || defined(DOXYGEN) + +/* PL1 Physical Timer */ +#if (__CORTEX_A == 7U) || defined(DOXYGEN) + +/** \brief Physical Timer Control register */ +typedef union +{ + struct + { + uint32_t ENABLE:1; /*!< \brief bit: 0 Enables the timer. */ + uint32_t IMASK:1; /*!< \brief bit: 1 Timer output signal mask bit. */ + uint32_t ISTATUS:1; /*!< \brief bit: 2 The status of the timer. */ + RESERVED(0:29, uint32_t) + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} CNTP_CTL_Type; + +/** \brief Configures the frequency the timer shall run at. +* \param [in] value The timer frequency in Hz. +*/ +__STATIC_INLINE void PL1_SetCounterFrequency(uint32_t value) +{ + __set_CNTFRQ(value); + __ISB(); +} + +/** \brief Sets the reset value of the timer. +* \param [in] value The value the timer is loaded with. +*/ +__STATIC_INLINE void PL1_SetLoadValue(uint32_t value) +{ + __set_CNTP_TVAL(value); + __ISB(); +} + +/** \brief Get the current counter value. +* \return Current counter value. +*/ +__STATIC_INLINE uint32_t PL1_GetCurrentValue(void) +{ + return(__get_CNTP_TVAL()); +} + +/** \brief Get the current physical counter value. +* \return Current physical counter value. +*/ +__STATIC_INLINE uint64_t PL1_GetCurrentPhysicalValue(void) +{ + return(__get_CNTPCT()); +} + +/** \brief Set the physical compare value. +* \param [in] value New physical timer compare value. +*/ +__STATIC_INLINE void PL1_SetPhysicalCompareValue(uint64_t value) +{ + __set_CNTP_CVAL(value); + __ISB(); +} + +/** \brief Get the physical compare value. +* \return Physical compare value. +*/ +__STATIC_INLINE uint64_t PL1_GetPhysicalCompareValue(void) +{ + return(__get_CNTP_CVAL()); +} + +/** \brief Configure the timer by setting the control value. +* \param [in] value New timer control value. +*/ +__STATIC_INLINE void PL1_SetControl(uint32_t value) +{ + __set_CNTP_CTL(value); + __ISB(); +} + +/** \brief Get the control value. +* \return Control value. +*/ +__STATIC_INLINE uint32_t PL1_GetControl(void) +{ + return(__get_CNTP_CTL()); +} +#endif + +/* Private Timer */ +#if ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN) +/** \brief Set the load value to timers LOAD register. +* \param [in] value The load value to be set. +*/ +__STATIC_INLINE void PTIM_SetLoadValue(uint32_t value) +{ + PTIM->LOAD = value; +} + +/** \brief Get the load value from timers LOAD register. +* \return Timer_Type::LOAD +*/ +__STATIC_INLINE uint32_t PTIM_GetLoadValue(void) +{ + return(PTIM->LOAD); +} + +/** \brief Set current counter value from its COUNTER register. +*/ +__STATIC_INLINE void PTIM_SetCurrentValue(uint32_t value) +{ + PTIM->COUNTER = value; +} + +/** \brief Get current counter value from timers COUNTER register. +* \result Timer_Type::COUNTER +*/ +__STATIC_INLINE uint32_t PTIM_GetCurrentValue(void) +{ + return(PTIM->COUNTER); +} + +/** \brief Configure the timer using its CONTROL register. +* \param [in] value The new configuration value to be set. +*/ +__STATIC_INLINE void PTIM_SetControl(uint32_t value) +{ + PTIM->CONTROL = value; +} + +/** ref Timer_Type::CONTROL Get the current timer configuration from its CONTROL register. +* \return Timer_Type::CONTROL +*/ +__STATIC_INLINE uint32_t PTIM_GetControl(void) +{ + return(PTIM->CONTROL); +} + +/** ref Timer_Type::CONTROL Get the event flag in timers ISR register. +* \return 0 - flag is not set, 1- flag is set +*/ +__STATIC_INLINE uint32_t PTIM_GetEventFlag(void) +{ + return (PTIM->ISR & 1UL); +} + +/** ref Timer_Type::CONTROL Clears the event flag in timers ISR register. +*/ +__STATIC_INLINE void PTIM_ClearEventFlag(void) +{ + PTIM->ISR = 1; +} +#endif +#endif + +/* ########################## MMU functions ###################################### */ + +#define SECTION_DESCRIPTOR (0x2) +#define SECTION_MASK (0xFFFFFFFC) + +#define SECTION_TEXCB_MASK (0xFFFF8FF3) +#define SECTION_B_SHIFT (2) +#define SECTION_C_SHIFT (3) +#define SECTION_TEX0_SHIFT (12) +#define SECTION_TEX1_SHIFT (13) +#define SECTION_TEX2_SHIFT (14) + +#define SECTION_XN_MASK (0xFFFFFFEF) +#define SECTION_XN_SHIFT (4) + +#define SECTION_DOMAIN_MASK (0xFFFFFE1F) +#define SECTION_DOMAIN_SHIFT (5) + +#define SECTION_P_MASK (0xFFFFFDFF) +#define SECTION_P_SHIFT (9) + +#define SECTION_AP_MASK (0xFFFF73FF) +#define SECTION_AP_SHIFT (10) +#define SECTION_AP2_SHIFT (15) + +#define SECTION_S_MASK (0xFFFEFFFF) +#define SECTION_S_SHIFT (16) + +#define SECTION_NG_MASK (0xFFFDFFFF) +#define SECTION_NG_SHIFT (17) + +#define SECTION_NS_MASK (0xFFF7FFFF) +#define SECTION_NS_SHIFT (19) + +#define PAGE_L1_DESCRIPTOR (0x1) +#define PAGE_L1_MASK (0xFFFFFFFC) + +#define PAGE_L2_4K_DESC (0x2) +#define PAGE_L2_4K_MASK (0xFFFFFFFD) + +#define PAGE_L2_64K_DESC (0x1) +#define PAGE_L2_64K_MASK (0xFFFFFFFC) + +#define PAGE_4K_TEXCB_MASK (0xFFFFFE33) +#define PAGE_4K_B_SHIFT (2) +#define PAGE_4K_C_SHIFT (3) +#define PAGE_4K_TEX0_SHIFT (6) +#define PAGE_4K_TEX1_SHIFT (7) +#define PAGE_4K_TEX2_SHIFT (8) + +#define PAGE_64K_TEXCB_MASK (0xFFFF8FF3) +#define PAGE_64K_B_SHIFT (2) +#define PAGE_64K_C_SHIFT (3) +#define PAGE_64K_TEX0_SHIFT (12) +#define PAGE_64K_TEX1_SHIFT (13) +#define PAGE_64K_TEX2_SHIFT (14) + +#define PAGE_TEXCB_MASK (0xFFFF8FF3) +#define PAGE_B_SHIFT (2) +#define PAGE_C_SHIFT (3) +#define PAGE_TEX_SHIFT (12) + +#define PAGE_XN_4K_MASK (0xFFFFFFFE) +#define PAGE_XN_4K_SHIFT (0) +#define PAGE_XN_64K_MASK (0xFFFF7FFF) +#define PAGE_XN_64K_SHIFT (15) + +#define PAGE_DOMAIN_MASK (0xFFFFFE1F) +#define PAGE_DOMAIN_SHIFT (5) + +#define PAGE_P_MASK (0xFFFFFDFF) +#define PAGE_P_SHIFT (9) + +#define PAGE_AP_MASK (0xFFFFFDCF) +#define PAGE_AP_SHIFT (4) +#define PAGE_AP2_SHIFT (9) + +#define PAGE_S_MASK (0xFFFFFBFF) +#define PAGE_S_SHIFT (10) + +#define PAGE_NG_MASK (0xFFFFF7FF) +#define PAGE_NG_SHIFT (11) + +#define PAGE_NS_MASK (0xFFFFFFF7) +#define PAGE_NS_SHIFT (3) + +#define OFFSET_1M (0x00100000) +#define OFFSET_64K (0x00010000) +#define OFFSET_4K (0x00001000) + +#define DESCRIPTOR_FAULT (0x00000000) + +/* Attributes enumerations */ + +/* Region size attributes */ +typedef enum +{ + SECTION, + PAGE_4k, + PAGE_64k, +} mmu_region_size_Type; + +/* Region type attributes */ +typedef enum +{ + NORMAL, + DEVICE, + SHARED_DEVICE, + NON_SHARED_DEVICE, + STRONGLY_ORDERED +} mmu_memory_Type; + +/* Region cacheability attributes */ +typedef enum +{ + NON_CACHEABLE, + WB_WA, + WT, + WB_NO_WA, +} mmu_cacheability_Type; + +/* Region parity check attributes */ +typedef enum +{ + ECC_DISABLED, + ECC_ENABLED, +} mmu_ecc_check_Type; + +/* Region execution attributes */ +typedef enum +{ + EXECUTE, + NON_EXECUTE, +} mmu_execute_Type; + +/* Region global attributes */ +typedef enum +{ + GLOBAL, + NON_GLOBAL, +} mmu_global_Type; + +/* Region shareability attributes */ +typedef enum +{ + NON_SHARED, + SHARED, +} mmu_shared_Type; + +/* Region security attributes */ +typedef enum +{ + SECURE, + NON_SECURE, +} mmu_secure_Type; + +/* Region access attributes */ +typedef enum +{ + NO_ACCESS, + RW, + READ, +} mmu_access_Type; + +/* Memory Region definition */ +typedef struct RegionStruct { + mmu_region_size_Type rg_t; + mmu_memory_Type mem_t; + uint8_t domain; + mmu_cacheability_Type inner_norm_t; + mmu_cacheability_Type outer_norm_t; + mmu_ecc_check_Type e_t; + mmu_execute_Type xn_t; + mmu_global_Type g_t; + mmu_secure_Type sec_t; + mmu_access_Type priv_t; + mmu_access_Type user_t; + mmu_shared_Type sh_t; + +} mmu_region_attributes_Type; + +//Following macros define the descriptors and attributes +//Sect_Normal. Outer & inner wb/wa, non-shareable, executable, rw, domain 0 +#define section_normal(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = WB_WA; \ + region.outer_norm_t = WB_WA; \ + region.mem_t = NORMAL; \ + region.sec_t = SECURE; \ + region.xn_t = EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Normal_NC. Outer & inner non-cacheable, non-shareable, executable, rw, domain 0 +#define section_normal_nc(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = NORMAL; \ + region.sec_t = SECURE; \ + region.xn_t = EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Normal_Cod. Outer & inner wb/wa, non-shareable, executable, ro, domain 0 +#define section_normal_cod(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = WB_WA; \ + region.outer_norm_t = WB_WA; \ + region.mem_t = NORMAL; \ + region.sec_t = SECURE; \ + region.xn_t = EXECUTE; \ + region.priv_t = READ; \ + region.user_t = READ; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Normal_RO. Sect_Normal_Cod, but not executable +#define section_normal_ro(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = WB_WA; \ + region.outer_norm_t = WB_WA; \ + region.mem_t = NORMAL; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = READ; \ + region.user_t = READ; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Normal_RW. Sect_Normal_Cod, but writeable and not executable +#define section_normal_rw(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = WB_WA; \ + region.outer_norm_t = WB_WA; \ + region.mem_t = NORMAL; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); +//Sect_SO. Strongly-ordered (therefore shareable), not executable, rw, domain 0, base addr 0 +#define section_so(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = STRONGLY_ORDERED; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Device_RO. Device, non-shareable, non-executable, ro, domain 0, base addr 0 +#define section_device_ro(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = STRONGLY_ORDERED; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = READ; \ + region.user_t = READ; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Device_RW. Sect_Device_RO, but writeable +#define section_device_rw(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = STRONGLY_ORDERED; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); +//Page_4k_Device_RW. Shared device, not executable, rw, domain 0 +#define page4k_device_rw(descriptor_l1, descriptor_l2, region) region.rg_t = PAGE_4k; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = SHARED_DEVICE; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region); + +//Page_64k_Device_RW. Shared device, not executable, rw, domain 0 +#define page64k_device_rw(descriptor_l1, descriptor_l2, region) region.rg_t = PAGE_64k; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = SHARED_DEVICE; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region); + +/** \brief Set section execution-never attribute + + \param [out] descriptor_l1 L1 descriptor. + \param [in] xn Section execution-never attribute : EXECUTE , NON_EXECUTE. + + \return 0 +*/ +__STATIC_INLINE int MMU_XNSection(uint32_t *descriptor_l1, mmu_execute_Type xn) +{ + *descriptor_l1 &= SECTION_XN_MASK; + *descriptor_l1 |= ((xn & 0x1) << SECTION_XN_SHIFT); + return 0; +} + +/** \brief Set section domain + + \param [out] descriptor_l1 L1 descriptor. + \param [in] domain Section domain + + \return 0 +*/ +__STATIC_INLINE int MMU_DomainSection(uint32_t *descriptor_l1, uint8_t domain) +{ + *descriptor_l1 &= SECTION_DOMAIN_MASK; + *descriptor_l1 |= ((domain & 0xF) << SECTION_DOMAIN_SHIFT); + return 0; +} + +/** \brief Set section parity check + + \param [out] descriptor_l1 L1 descriptor. + \param [in] p_bit Parity check: ECC_DISABLED, ECC_ENABLED + + \return 0 +*/ +__STATIC_INLINE int MMU_PSection(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit) +{ + *descriptor_l1 &= SECTION_P_MASK; + *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT); + return 0; +} + +/** \brief Set section access privileges + + \param [out] descriptor_l1 L1 descriptor. + \param [in] user User Level Access: NO_ACCESS, RW, READ + \param [in] priv Privilege Level Access: NO_ACCESS, RW, READ + \param [in] afe Access flag enable + + \return 0 +*/ +__STATIC_INLINE int MMU_APSection(uint32_t *descriptor_l1, mmu_access_Type user, mmu_access_Type priv, uint32_t afe) +{ + uint32_t ap = 0; + + if (afe == 0) { //full access + if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; } + else if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; } + else if ((priv == RW) && (user == READ)) { ap = 0x2; } + else if ((priv == RW) && (user == RW)) { ap = 0x3; } + else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; } + else if ((priv == READ) && (user == READ)) { ap = 0x7; } + } + + else { //Simplified access + if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; } + else if ((priv == RW) && (user == RW)) { ap = 0x3; } + else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; } + else if ((priv == READ) && (user == READ)) { ap = 0x7; } + } + + *descriptor_l1 &= SECTION_AP_MASK; + *descriptor_l1 |= (ap & 0x3) << SECTION_AP_SHIFT; + *descriptor_l1 |= ((ap & 0x4)>>2) << SECTION_AP2_SHIFT; + + return 0; +} + +/** \brief Set section shareability + + \param [out] descriptor_l1 L1 descriptor. + \param [in] s_bit Section shareability: NON_SHARED, SHARED + + \return 0 +*/ +__STATIC_INLINE int MMU_SharedSection(uint32_t *descriptor_l1, mmu_shared_Type s_bit) +{ + *descriptor_l1 &= SECTION_S_MASK; + *descriptor_l1 |= ((s_bit & 0x1) << SECTION_S_SHIFT); + return 0; +} + +/** \brief Set section Global attribute + + \param [out] descriptor_l1 L1 descriptor. + \param [in] g_bit Section attribute: GLOBAL, NON_GLOBAL + + \return 0 +*/ +__STATIC_INLINE int MMU_GlobalSection(uint32_t *descriptor_l1, mmu_global_Type g_bit) +{ + *descriptor_l1 &= SECTION_NG_MASK; + *descriptor_l1 |= ((g_bit & 0x1) << SECTION_NG_SHIFT); + return 0; +} + +/** \brief Set section Security attribute + + \param [out] descriptor_l1 L1 descriptor. + \param [in] s_bit Section Security attribute: SECURE, NON_SECURE + + \return 0 +*/ +__STATIC_INLINE int MMU_SecureSection(uint32_t *descriptor_l1, mmu_secure_Type s_bit) +{ + *descriptor_l1 &= SECTION_NS_MASK; + *descriptor_l1 |= ((s_bit & 0x1) << SECTION_NS_SHIFT); + return 0; +} + +/* Page 4k or 64k */ +/** \brief Set 4k/64k page execution-never attribute + + \param [out] descriptor_l2 L2 descriptor. + \param [in] xn Page execution-never attribute : EXECUTE , NON_EXECUTE. + \param [in] page Page size: PAGE_4k, PAGE_64k, + + \return 0 +*/ +__STATIC_INLINE int MMU_XNPage(uint32_t *descriptor_l2, mmu_execute_Type xn, mmu_region_size_Type page) +{ + if (page == PAGE_4k) + { + *descriptor_l2 &= PAGE_XN_4K_MASK; + *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_4K_SHIFT); + } + else + { + *descriptor_l2 &= PAGE_XN_64K_MASK; + *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_64K_SHIFT); + } + return 0; +} + +/** \brief Set 4k/64k page domain + + \param [out] descriptor_l1 L1 descriptor. + \param [in] domain Page domain + + \return 0 +*/ +__STATIC_INLINE int MMU_DomainPage(uint32_t *descriptor_l1, uint8_t domain) +{ + *descriptor_l1 &= PAGE_DOMAIN_MASK; + *descriptor_l1 |= ((domain & 0xf) << PAGE_DOMAIN_SHIFT); + return 0; +} + +/** \brief Set 4k/64k page parity check + + \param [out] descriptor_l1 L1 descriptor. + \param [in] p_bit Parity check: ECC_DISABLED, ECC_ENABLED + + \return 0 +*/ +__STATIC_INLINE int MMU_PPage(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit) +{ + *descriptor_l1 &= SECTION_P_MASK; + *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT); + return 0; +} + +/** \brief Set 4k/64k page access privileges + + \param [out] descriptor_l2 L2 descriptor. + \param [in] user User Level Access: NO_ACCESS, RW, READ + \param [in] priv Privilege Level Access: NO_ACCESS, RW, READ + \param [in] afe Access flag enable + + \return 0 +*/ +__STATIC_INLINE int MMU_APPage(uint32_t *descriptor_l2, mmu_access_Type user, mmu_access_Type priv, uint32_t afe) +{ + uint32_t ap = 0; + + if (afe == 0) { //full access + if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; } + else if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; } + else if ((priv == RW) && (user == READ)) { ap = 0x2; } + else if ((priv == RW) && (user == RW)) { ap = 0x3; } + else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; } + else if ((priv == READ) && (user == READ)) { ap = 0x6; } + } + + else { //Simplified access + if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; } + else if ((priv == RW) && (user == RW)) { ap = 0x3; } + else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; } + else if ((priv == READ) && (user == READ)) { ap = 0x7; } + } + + *descriptor_l2 &= PAGE_AP_MASK; + *descriptor_l2 |= (ap & 0x3) << PAGE_AP_SHIFT; + *descriptor_l2 |= ((ap & 0x4)>>2) << PAGE_AP2_SHIFT; + + return 0; +} + +/** \brief Set 4k/64k page shareability + + \param [out] descriptor_l2 L2 descriptor. + \param [in] s_bit 4k/64k page shareability: NON_SHARED, SHARED + + \return 0 +*/ +__STATIC_INLINE int MMU_SharedPage(uint32_t *descriptor_l2, mmu_shared_Type s_bit) +{ + *descriptor_l2 &= PAGE_S_MASK; + *descriptor_l2 |= ((s_bit & 0x1) << PAGE_S_SHIFT); + return 0; +} + +/** \brief Set 4k/64k page Global attribute + + \param [out] descriptor_l2 L2 descriptor. + \param [in] g_bit 4k/64k page attribute: GLOBAL, NON_GLOBAL + + \return 0 +*/ +__STATIC_INLINE int MMU_GlobalPage(uint32_t *descriptor_l2, mmu_global_Type g_bit) +{ + *descriptor_l2 &= PAGE_NG_MASK; + *descriptor_l2 |= ((g_bit & 0x1) << PAGE_NG_SHIFT); + return 0; +} + +/** \brief Set 4k/64k page Security attribute + + \param [out] descriptor_l1 L1 descriptor. + \param [in] s_bit 4k/64k page Security attribute: SECURE, NON_SECURE + + \return 0 +*/ +__STATIC_INLINE int MMU_SecurePage(uint32_t *descriptor_l1, mmu_secure_Type s_bit) +{ + *descriptor_l1 &= PAGE_NS_MASK; + *descriptor_l1 |= ((s_bit & 0x1) << PAGE_NS_SHIFT); + return 0; +} + +/** \brief Set Section memory attributes + + \param [out] descriptor_l1 L1 descriptor. + \param [in] mem Section memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED + \param [in] outer Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA, + \param [in] inner Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA, + + \return 0 +*/ +__STATIC_INLINE int MMU_MemorySection(uint32_t *descriptor_l1, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner) +{ + *descriptor_l1 &= SECTION_TEXCB_MASK; + + if (STRONGLY_ORDERED == mem) + { + return 0; + } + else if (SHARED_DEVICE == mem) + { + *descriptor_l1 |= (1 << SECTION_B_SHIFT); + } + else if (NON_SHARED_DEVICE == mem) + { + *descriptor_l1 |= (1 << SECTION_TEX1_SHIFT); + } + else if (NORMAL == mem) + { + *descriptor_l1 |= 1 << SECTION_TEX2_SHIFT; + switch(inner) + { + case NON_CACHEABLE: + break; + case WB_WA: + *descriptor_l1 |= (1 << SECTION_B_SHIFT); + break; + case WT: + *descriptor_l1 |= 1 << SECTION_C_SHIFT; + break; + case WB_NO_WA: + *descriptor_l1 |= (1 << SECTION_B_SHIFT) | (1 << SECTION_C_SHIFT); + break; + } + switch(outer) + { + case NON_CACHEABLE: + break; + case WB_WA: + *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT); + break; + case WT: + *descriptor_l1 |= 1 << SECTION_TEX1_SHIFT; + break; + case WB_NO_WA: + *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT) | (1 << SECTION_TEX0_SHIFT); + break; + } + } + return 0; +} + +/** \brief Set 4k/64k page memory attributes + + \param [out] descriptor_l2 L2 descriptor. + \param [in] mem 4k/64k page memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED + \param [in] outer Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA, + \param [in] inner Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA, + \param [in] page Page size + + \return 0 +*/ +__STATIC_INLINE int MMU_MemoryPage(uint32_t *descriptor_l2, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner, mmu_region_size_Type page) +{ + *descriptor_l2 &= PAGE_4K_TEXCB_MASK; + + if (page == PAGE_64k) + { + //same as section + MMU_MemorySection(descriptor_l2, mem, outer, inner); + } + else + { + if (STRONGLY_ORDERED == mem) + { + return 0; + } + else if (SHARED_DEVICE == mem) + { + *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT); + } + else if (NON_SHARED_DEVICE == mem) + { + *descriptor_l2 |= (1 << PAGE_4K_TEX1_SHIFT); + } + else if (NORMAL == mem) + { + *descriptor_l2 |= 1 << PAGE_4K_TEX2_SHIFT; + switch(inner) + { + case NON_CACHEABLE: + break; + case WB_WA: + *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT); + break; + case WT: + *descriptor_l2 |= 1 << PAGE_4K_C_SHIFT; + break; + case WB_NO_WA: + *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT) | (1 << PAGE_4K_C_SHIFT); + break; + } + switch(outer) + { + case NON_CACHEABLE: + break; + case WB_WA: + *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT); + break; + case WT: + *descriptor_l2 |= 1 << PAGE_4K_TEX1_SHIFT; + break; + case WB_NO_WA: + *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT) | (1 << PAGE_4K_TEX0_SHIFT); + break; + } + } + } + + return 0; +} + +/** \brief Create a L1 section descriptor + + \param [out] descriptor L1 descriptor + \param [in] reg Section attributes + + \return 0 +*/ +__STATIC_INLINE int MMU_GetSectionDescriptor(uint32_t *descriptor, mmu_region_attributes_Type reg) +{ + *descriptor = 0; + + MMU_MemorySection(descriptor, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t); + MMU_XNSection(descriptor,reg.xn_t); + MMU_DomainSection(descriptor, reg.domain); + MMU_PSection(descriptor, reg.e_t); + MMU_APSection(descriptor, reg.priv_t, reg.user_t, 1); + MMU_SharedSection(descriptor,reg.sh_t); + MMU_GlobalSection(descriptor,reg.g_t); + MMU_SecureSection(descriptor,reg.sec_t); + *descriptor &= SECTION_MASK; + *descriptor |= SECTION_DESCRIPTOR; + + return 0; +} + + +/** \brief Create a L1 and L2 4k/64k page descriptor + + \param [out] descriptor L1 descriptor + \param [out] descriptor2 L2 descriptor + \param [in] reg 4k/64k page attributes + + \return 0 +*/ +__STATIC_INLINE int MMU_GetPageDescriptor(uint32_t *descriptor, uint32_t *descriptor2, mmu_region_attributes_Type reg) +{ + *descriptor = 0; + *descriptor2 = 0; + + switch (reg.rg_t) + { + case PAGE_4k: + MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_4k); + MMU_XNPage(descriptor2, reg.xn_t, PAGE_4k); + MMU_DomainPage(descriptor, reg.domain); + MMU_PPage(descriptor, reg.e_t); + MMU_APPage(descriptor2, reg.priv_t, reg.user_t, 1); + MMU_SharedPage(descriptor2,reg.sh_t); + MMU_GlobalPage(descriptor2,reg.g_t); + MMU_SecurePage(descriptor,reg.sec_t); + *descriptor &= PAGE_L1_MASK; + *descriptor |= PAGE_L1_DESCRIPTOR; + *descriptor2 &= PAGE_L2_4K_MASK; + *descriptor2 |= PAGE_L2_4K_DESC; + break; + + case PAGE_64k: + MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_64k); + MMU_XNPage(descriptor2, reg.xn_t, PAGE_64k); + MMU_DomainPage(descriptor, reg.domain); + MMU_PPage(descriptor, reg.e_t); + MMU_APPage(descriptor2, reg.priv_t, reg.user_t, 1); + MMU_SharedPage(descriptor2,reg.sh_t); + MMU_GlobalPage(descriptor2,reg.g_t); + MMU_SecurePage(descriptor,reg.sec_t); + *descriptor &= PAGE_L1_MASK; + *descriptor |= PAGE_L1_DESCRIPTOR; + *descriptor2 &= PAGE_L2_64K_MASK; + *descriptor2 |= PAGE_L2_64K_DESC; + break; + + case SECTION: + //error + break; + } + + return 0; +} + +/** \brief Create a 1MB Section + + \param [in] ttb Translation table base address + \param [in] base_address Section base address + \param [in] count Number of sections to create + \param [in] descriptor_l1 L1 descriptor (region attributes) + +*/ +__STATIC_INLINE void MMU_TTSection(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1) +{ + uint32_t offset; + uint32_t entry; + uint32_t i; + + offset = base_address >> 20; + entry = (base_address & 0xFFF00000) | descriptor_l1; + + //4 bytes aligned + ttb = ttb + offset; + + for (i = 0; i < count; i++ ) + { + //4 bytes aligned + *ttb++ = entry; + entry += OFFSET_1M; + } +} + +/** \brief Create a 4k page entry + + \param [in] ttb L1 table base address + \param [in] base_address 4k base address + \param [in] count Number of 4k pages to create + \param [in] descriptor_l1 L1 descriptor (region attributes) + \param [in] ttb_l2 L2 table base address + \param [in] descriptor_l2 L2 descriptor (region attributes) + +*/ +__STATIC_INLINE void MMU_TTPage4k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 ) +{ + + uint32_t offset, offset2; + uint32_t entry, entry2; + uint32_t i; + + offset = base_address >> 20; + entry = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1; + + //4 bytes aligned + ttb += offset; + //create l1_entry + *ttb = entry; + + offset2 = (base_address & 0xff000) >> 12; + ttb_l2 += offset2; + entry2 = (base_address & 0xFFFFF000) | descriptor_l2; + for (i = 0; i < count; i++ ) + { + //4 bytes aligned + *ttb_l2++ = entry2; + entry2 += OFFSET_4K; + } +} + +/** \brief Create a 64k page entry + + \param [in] ttb L1 table base address + \param [in] base_address 64k base address + \param [in] count Number of 64k pages to create + \param [in] descriptor_l1 L1 descriptor (region attributes) + \param [in] ttb_l2 L2 table base address + \param [in] descriptor_l2 L2 descriptor (region attributes) + +*/ +__STATIC_INLINE void MMU_TTPage64k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 ) +{ + uint32_t offset, offset2; + uint32_t entry, entry2; + uint32_t i,j; + + + offset = base_address >> 20; + entry = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1; + + //4 bytes aligned + ttb += offset; + //create l1_entry + *ttb = entry; + + offset2 = (base_address & 0xff000) >> 12; + ttb_l2 += offset2; + entry2 = (base_address & 0xFFFF0000) | descriptor_l2; + for (i = 0; i < count; i++ ) + { + //create 16 entries + for (j = 0; j < 16; j++) + { + //4 bytes aligned + *ttb_l2++ = entry2; + } + entry2 += OFFSET_64K; + } +} + +/** \brief Enable MMU +*/ +__STATIC_INLINE void MMU_Enable(void) +{ + // Set M bit 0 to enable the MMU + // Set AFE bit to enable simplified access permissions model + // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking + __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29)); + __ISB(); +} + +/** \brief Disable MMU +*/ +__STATIC_INLINE void MMU_Disable(void) +{ + // Clear M bit 0 to disable the MMU + __set_SCTLR( __get_SCTLR() & ~1); + __ISB(); +} + +/** \brief Invalidate entire unified TLB +*/ + +__STATIC_INLINE void MMU_InvalidateTLB(void) +{ + __set_TLBIALL(0); + __DSB(); //ensure completion of the invalidation + __ISB(); //ensure instruction fetch path sees new state +} + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CA_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/midi-dials/Drivers/CMSIS/Core_A/Include/irq_ctrl.h b/midi-dials/Drivers/CMSIS/Core_A/Include/irq_ctrl.h new file mode 100644 index 0000000..a6d313d --- /dev/null +++ b/midi-dials/Drivers/CMSIS/Core_A/Include/irq_ctrl.h @@ -0,0 +1,186 @@ +/**************************************************************************//** + * @file irq_ctrl.h + * @brief Interrupt Controller API header file + * @version V1.0.0 + * @date 23. June 2017 + ******************************************************************************/ +/* + * Copyright (c) 2017 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef IRQ_CTRL_H_ +#define IRQ_CTRL_H_ + +#include + +#ifndef IRQHANDLER_T +#define IRQHANDLER_T +/// Interrupt handler data type +typedef void (*IRQHandler_t) (void); +#endif + +#ifndef IRQN_ID_T +#define IRQN_ID_T +/// Interrupt ID number data type +typedef int32_t IRQn_ID_t; +#endif + +/* Interrupt mode bit-masks */ +#define IRQ_MODE_TRIG_Pos (0U) +#define IRQ_MODE_TRIG_Msk (0x07UL /*<< IRQ_MODE_TRIG_Pos*/) +#define IRQ_MODE_TRIG_LEVEL (0x00UL /*<< IRQ_MODE_TRIG_Pos*/) ///< Trigger: level triggered interrupt +#define IRQ_MODE_TRIG_LEVEL_LOW (0x01UL /*<< IRQ_MODE_TRIG_Pos*/) ///< Trigger: low level triggered interrupt +#define IRQ_MODE_TRIG_LEVEL_HIGH (0x02UL /*<< IRQ_MODE_TRIG_Pos*/) ///< Trigger: high level triggered interrupt +#define IRQ_MODE_TRIG_EDGE (0x04UL /*<< IRQ_MODE_TRIG_Pos*/) ///< Trigger: edge triggered interrupt +#define IRQ_MODE_TRIG_EDGE_RISING (0x05UL /*<< IRQ_MODE_TRIG_Pos*/) ///< Trigger: rising edge triggered interrupt +#define IRQ_MODE_TRIG_EDGE_FALLING (0x06UL /*<< IRQ_MODE_TRIG_Pos*/) ///< Trigger: falling edge triggered interrupt +#define IRQ_MODE_TRIG_EDGE_BOTH (0x07UL /*<< IRQ_MODE_TRIG_Pos*/) ///< Trigger: rising and falling edge triggered interrupt + +#define IRQ_MODE_TYPE_Pos (3U) +#define IRQ_MODE_TYPE_Msk (0x01UL << IRQ_MODE_TYPE_Pos) +#define IRQ_MODE_TYPE_IRQ (0x00UL << IRQ_MODE_TYPE_Pos) ///< Type: interrupt source triggers CPU IRQ line +#define IRQ_MODE_TYPE_FIQ (0x01UL << IRQ_MODE_TYPE_Pos) ///< Type: interrupt source triggers CPU FIQ line + +#define IRQ_MODE_DOMAIN_Pos (4U) +#define IRQ_MODE_DOMAIN_Msk (0x01UL << IRQ_MODE_DOMAIN_Pos) +#define IRQ_MODE_DOMAIN_NONSECURE (0x00UL << IRQ_MODE_DOMAIN_Pos) ///< Domain: interrupt is targeting non-secure domain +#define IRQ_MODE_DOMAIN_SECURE (0x01UL << IRQ_MODE_DOMAIN_Pos) ///< Domain: interrupt is targeting secure domain + +#define IRQ_MODE_CPU_Pos (5U) +#define IRQ_MODE_CPU_Msk (0xFFUL << IRQ_MODE_CPU_Pos) +#define IRQ_MODE_CPU_ALL (0x00UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets all CPUs +#define IRQ_MODE_CPU_0 (0x01UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets CPU 0 +#define IRQ_MODE_CPU_1 (0x02UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets CPU 1 +#define IRQ_MODE_CPU_2 (0x04UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets CPU 2 +#define IRQ_MODE_CPU_3 (0x08UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets CPU 3 +#define IRQ_MODE_CPU_4 (0x10UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets CPU 4 +#define IRQ_MODE_CPU_5 (0x20UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets CPU 5 +#define IRQ_MODE_CPU_6 (0x40UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets CPU 6 +#define IRQ_MODE_CPU_7 (0x80UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets CPU 7 + +#define IRQ_MODE_ERROR (0x80000000UL) ///< Bit indicating mode value error + +/* Interrupt priority bit-masks */ +#define IRQ_PRIORITY_Msk (0x0000FFFFUL) ///< Interrupt priority value bit-mask +#define IRQ_PRIORITY_ERROR (0x80000000UL) ///< Bit indicating priority value error + +/// Initialize interrupt controller. +/// \return 0 on success, -1 on error. +int32_t IRQ_Initialize (void); + +/// Register interrupt handler. +/// \param[in] irqn interrupt ID number +/// \param[in] handler interrupt handler function address +/// \return 0 on success, -1 on error. +int32_t IRQ_SetHandler (IRQn_ID_t irqn, IRQHandler_t handler); + +/// Get the registered interrupt handler. +/// \param[in] irqn interrupt ID number +/// \return registered interrupt handler function address. +IRQHandler_t IRQ_GetHandler (IRQn_ID_t irqn); + +/// Enable interrupt. +/// \param[in] irqn interrupt ID number +/// \return 0 on success, -1 on error. +int32_t IRQ_Enable (IRQn_ID_t irqn); + +/// Disable interrupt. +/// \param[in] irqn interrupt ID number +/// \return 0 on success, -1 on error. +int32_t IRQ_Disable (IRQn_ID_t irqn); + +/// Get interrupt enable state. +/// \param[in] irqn interrupt ID number +/// \return 0 - interrupt is disabled, 1 - interrupt is enabled. +uint32_t IRQ_GetEnableState (IRQn_ID_t irqn); + +/// Configure interrupt request mode. +/// \param[in] irqn interrupt ID number +/// \param[in] mode mode configuration +/// \return 0 on success, -1 on error. +int32_t IRQ_SetMode (IRQn_ID_t irqn, uint32_t mode); + +/// Get interrupt mode configuration. +/// \param[in] irqn interrupt ID number +/// \return current interrupt mode configuration with optional IRQ_MODE_ERROR bit set. +uint32_t IRQ_GetMode (IRQn_ID_t irqn); + +/// Get ID number of current interrupt request (IRQ). +/// \return interrupt ID number. +IRQn_ID_t IRQ_GetActiveIRQ (void); + +/// Get ID number of current fast interrupt request (FIQ). +/// \return interrupt ID number. +IRQn_ID_t IRQ_GetActiveFIQ (void); + +/// Signal end of interrupt processing. +/// \param[in] irqn interrupt ID number +/// \return 0 on success, -1 on error. +int32_t IRQ_EndOfInterrupt (IRQn_ID_t irqn); + +/// Set interrupt pending flag. +/// \param[in] irqn interrupt ID number +/// \return 0 on success, -1 on error. +int32_t IRQ_SetPending (IRQn_ID_t irqn); + +/// Get interrupt pending flag. +/// \param[in] irqn interrupt ID number +/// \return 0 - interrupt is not pending, 1 - interrupt is pending. +uint32_t IRQ_GetPending (IRQn_ID_t irqn); + +/// Clear interrupt pending flag. +/// \param[in] irqn interrupt ID number +/// \return 0 on success, -1 on error. +int32_t IRQ_ClearPending (IRQn_ID_t irqn); + +/// Set interrupt priority value. +/// \param[in] irqn interrupt ID number +/// \param[in] priority interrupt priority value +/// \return 0 on success, -1 on error. +int32_t IRQ_SetPriority (IRQn_ID_t irqn, uint32_t priority); + +/// Get interrupt priority. +/// \param[in] irqn interrupt ID number +/// \return current interrupt priority value with optional IRQ_PRIORITY_ERROR bit set. +uint32_t IRQ_GetPriority (IRQn_ID_t irqn); + +/// Set priority masking threshold. +/// \param[in] priority priority masking threshold value +/// \return 0 on success, -1 on error. +int32_t IRQ_SetPriorityMask (uint32_t priority); + +/// Get priority masking threshold +/// \return current priority masking threshold value with optional IRQ_PRIORITY_ERROR bit set. +uint32_t IRQ_GetPriorityMask (void); + +/// Set priority grouping field split point +/// \param[in] bits number of MSB bits included in the group priority field comparison +/// \return 0 on success, -1 on error. +int32_t IRQ_SetPriorityGroupBits (uint32_t bits); + +/// Get priority grouping field split point +/// \return current number of MSB bits included in the group priority field comparison with +/// optional IRQ_PRIORITY_ERROR bit set. +uint32_t IRQ_GetPriorityGroupBits (void); + +#endif // IRQ_CTRL_H_ diff --git a/midi-dials/Drivers/CMSIS/Core_A/Source/irq_ctrl_gic.c b/midi-dials/Drivers/CMSIS/Core_A/Source/irq_ctrl_gic.c new file mode 100644 index 0000000..cf08a53 --- /dev/null +++ b/midi-dials/Drivers/CMSIS/Core_A/Source/irq_ctrl_gic.c @@ -0,0 +1,410 @@ +/**************************************************************************//** + * @file irq_ctrl_gic.c + * @brief Interrupt controller handling implementation for GIC + * @version V1.0.1 + * @date 9. April 2018 + ******************************************************************************/ +/* + * Copyright (c) 2017 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "RTE_Components.h" +#include CMSIS_device_header + +#include "irq_ctrl.h" + +#if defined(__GIC_PRESENT) && (__GIC_PRESENT == 1U) + +/// Number of implemented interrupt lines +#ifndef IRQ_GIC_LINE_COUNT +#define IRQ_GIC_LINE_COUNT (1020U) +#endif + +static IRQHandler_t IRQTable[IRQ_GIC_LINE_COUNT] = { 0U }; +static uint32_t IRQ_ID0; + +/// Initialize interrupt controller. +__WEAK int32_t IRQ_Initialize (void) { + uint32_t i; + + for (i = 0U; i < IRQ_GIC_LINE_COUNT; i++) { + IRQTable[i] = (IRQHandler_t)NULL; + } + GIC_Enable(); + return (0); +} + + +/// Register interrupt handler. +__WEAK int32_t IRQ_SetHandler (IRQn_ID_t irqn, IRQHandler_t handler) { + int32_t status; + + if ((irqn >= 0) && (irqn < (IRQn_ID_t)IRQ_GIC_LINE_COUNT)) { + IRQTable[irqn] = handler; + status = 0; + } else { + status = -1; + } + + return (status); +} + + +/// Get the registered interrupt handler. +__WEAK IRQHandler_t IRQ_GetHandler (IRQn_ID_t irqn) { + IRQHandler_t h; + + // Ignore CPUID field (software generated interrupts) + irqn &= 0x3FFU; + + if ((irqn >= 0) && (irqn < (IRQn_ID_t)IRQ_GIC_LINE_COUNT)) { + h = IRQTable[irqn]; + } else { + h = (IRQHandler_t)0; + } + + return (h); +} + + +/// Enable interrupt. +__WEAK int32_t IRQ_Enable (IRQn_ID_t irqn) { + int32_t status; + + if ((irqn >= 0) && (irqn < (IRQn_ID_t)IRQ_GIC_LINE_COUNT)) { + GIC_EnableIRQ ((IRQn_Type)irqn); + status = 0; + } else { + status = -1; + } + + return (status); +} + + +/// Disable interrupt. +__WEAK int32_t IRQ_Disable (IRQn_ID_t irqn) { + int32_t status; + + if ((irqn >= 0) && (irqn < (IRQn_ID_t)IRQ_GIC_LINE_COUNT)) { + GIC_DisableIRQ ((IRQn_Type)irqn); + status = 0; + } else { + status = -1; + } + + return (status); +} + + +/// Get interrupt enable state. +__WEAK uint32_t IRQ_GetEnableState (IRQn_ID_t irqn) { + uint32_t enable; + + if ((irqn >= 0) && (irqn < (IRQn_ID_t)IRQ_GIC_LINE_COUNT)) { + enable = GIC_GetEnableIRQ((IRQn_Type)irqn); + } else { + enable = 0U; + } + + return (enable); +} + + +/// Configure interrupt request mode. +__WEAK int32_t IRQ_SetMode (IRQn_ID_t irqn, uint32_t mode) { + uint32_t val; + uint8_t cfg; + uint8_t secure; + uint8_t cpu; + int32_t status = 0; + + if ((irqn >= 0) && (irqn < (IRQn_ID_t)IRQ_GIC_LINE_COUNT)) { + // Check triggering mode + val = (mode & IRQ_MODE_TRIG_Msk); + + if (val == IRQ_MODE_TRIG_LEVEL) { + cfg = 0x00U; + } else if (val == IRQ_MODE_TRIG_EDGE) { + cfg = 0x02U; + } else { + cfg = 0x00U; + status = -1; + } + + // Check interrupt type + val = mode & IRQ_MODE_TYPE_Msk; + + if (val != IRQ_MODE_TYPE_IRQ) { + status = -1; + } + + // Check interrupt domain + val = mode & IRQ_MODE_DOMAIN_Msk; + + if (val == IRQ_MODE_DOMAIN_NONSECURE) { + secure = 0U; + } else { + // Check security extensions support + val = GIC_DistributorInfo() & (1UL << 10U); + + if (val != 0U) { + // Security extensions are supported + secure = 1U; + } else { + secure = 0U; + status = -1; + } + } + + // Check interrupt CPU targets + val = mode & IRQ_MODE_CPU_Msk; + + if (val == IRQ_MODE_CPU_ALL) { + cpu = 0xFFU; + } else { + cpu = val >> IRQ_MODE_CPU_Pos; + } + + // Apply configuration if no mode error + if (status == 0) { + GIC_SetConfiguration((IRQn_Type)irqn, cfg); + GIC_SetTarget ((IRQn_Type)irqn, cpu); + + if (secure != 0U) { + GIC_SetGroup ((IRQn_Type)irqn, secure); + } + } + } + + return (status); +} + + +/// Get interrupt mode configuration. +__WEAK uint32_t IRQ_GetMode (IRQn_ID_t irqn) { + uint32_t mode; + uint32_t val; + + if ((irqn >= 0) && (irqn < (IRQn_ID_t)IRQ_GIC_LINE_COUNT)) { + mode = IRQ_MODE_TYPE_IRQ; + + // Get trigger mode + val = GIC_GetConfiguration((IRQn_Type)irqn); + + if ((val & 2U) != 0U) { + // Corresponding interrupt is edge triggered + mode |= IRQ_MODE_TRIG_EDGE; + } else { + // Corresponding interrupt is level triggered + mode |= IRQ_MODE_TRIG_LEVEL; + } + + // Get interrupt CPU targets + mode |= GIC_GetTarget ((IRQn_Type)irqn) << IRQ_MODE_CPU_Pos; + + } else { + mode = IRQ_MODE_ERROR; + } + + return (mode); +} + + +/// Get ID number of current interrupt request (IRQ). +__WEAK IRQn_ID_t IRQ_GetActiveIRQ (void) { + IRQn_ID_t irqn; + uint32_t prio; + + /* Dummy read to avoid GIC 390 errata 801120 */ + GIC_GetHighPendingIRQ(); + + irqn = GIC_AcknowledgePending(); + + __DSB(); + + /* Workaround GIC 390 errata 733075 (GIC-390_Errata_Notice_v6.pdf, 09-Jul-2014) */ + /* The following workaround code is for a single-core system. It would be */ + /* different in a multi-core system. */ + /* If the ID is 0 or 0x3FE or 0x3FF, then the GIC CPU interface may be locked-up */ + /* so unlock it, otherwise service the interrupt as normal. */ + /* Special IDs 1020=0x3FC and 1021=0x3FD are reserved values in GICv1 and GICv2 */ + /* so will not occur here. */ + + if ((irqn == 0) || (irqn >= 0x3FE)) { + /* Unlock the CPU interface with a dummy write to Interrupt Priority Register */ + prio = GIC_GetPriority((IRQn_Type)0); + GIC_SetPriority ((IRQn_Type)0, prio); + + __DSB(); + + if ((irqn == 0U) && ((GIC_GetIRQStatus ((IRQn_Type)irqn) & 1U) != 0U) && (IRQ_ID0 == 0U)) { + /* If the ID is 0, is active and has not been seen before */ + IRQ_ID0 = 1U; + } + /* End of Workaround GIC 390 errata 733075 */ + } + + return (irqn); +} + + +/// Get ID number of current fast interrupt request (FIQ). +__WEAK IRQn_ID_t IRQ_GetActiveFIQ (void) { + return ((IRQn_ID_t)-1); +} + + +/// Signal end of interrupt processing. +__WEAK int32_t IRQ_EndOfInterrupt (IRQn_ID_t irqn) { + int32_t status; + IRQn_Type irq = (IRQn_Type)irqn; + + irqn &= 0x3FFU; + + if ((irqn >= 0) && (irqn < (IRQn_ID_t)IRQ_GIC_LINE_COUNT)) { + GIC_EndInterrupt (irq); + + if (irqn == 0) { + IRQ_ID0 = 0U; + } + + status = 0; + } else { + status = -1; + } + + return (status); +} + + +/// Set interrupt pending flag. +__WEAK int32_t IRQ_SetPending (IRQn_ID_t irqn) { + int32_t status; + + if ((irqn >= 0) && (irqn < (IRQn_ID_t)IRQ_GIC_LINE_COUNT)) { + GIC_SetPendingIRQ ((IRQn_Type)irqn); + status = 0; + } else { + status = -1; + } + + return (status); +} + +/// Get interrupt pending flag. +__WEAK uint32_t IRQ_GetPending (IRQn_ID_t irqn) { + uint32_t pending; + + if ((irqn >= 16) && (irqn < (IRQn_ID_t)IRQ_GIC_LINE_COUNT)) { + pending = GIC_GetPendingIRQ ((IRQn_Type)irqn); + } else { + pending = 0U; + } + + return (pending & 1U); +} + + +/// Clear interrupt pending flag. +__WEAK int32_t IRQ_ClearPending (IRQn_ID_t irqn) { + int32_t status; + + if ((irqn >= 16) && (irqn < (IRQn_ID_t)IRQ_GIC_LINE_COUNT)) { + GIC_ClearPendingIRQ ((IRQn_Type)irqn); + status = 0; + } else { + status = -1; + } + + return (status); +} + + +/// Set interrupt priority value. +__WEAK int32_t IRQ_SetPriority (IRQn_ID_t irqn, uint32_t priority) { + int32_t status; + + if ((irqn >= 0) && (irqn < (IRQn_ID_t)IRQ_GIC_LINE_COUNT)) { + GIC_SetPriority ((IRQn_Type)irqn, priority); + status = 0; + } else { + status = -1; + } + + return (status); +} + + +/// Get interrupt priority. +__WEAK uint32_t IRQ_GetPriority (IRQn_ID_t irqn) { + uint32_t priority; + + if ((irqn >= 0) && (irqn < (IRQn_ID_t)IRQ_GIC_LINE_COUNT)) { + priority = GIC_GetPriority ((IRQn_Type)irqn); + } else { + priority = IRQ_PRIORITY_ERROR; + } + + return (priority); +} + + +/// Set priority masking threshold. +__WEAK int32_t IRQ_SetPriorityMask (uint32_t priority) { + GIC_SetInterfacePriorityMask (priority); + return (0); +} + + +/// Get priority masking threshold +__WEAK uint32_t IRQ_GetPriorityMask (void) { + return GIC_GetInterfacePriorityMask(); +} + + +/// Set priority grouping field split point +__WEAK int32_t IRQ_SetPriorityGroupBits (uint32_t bits) { + int32_t status; + + if (bits == IRQ_PRIORITY_Msk) { + bits = 7U; + } + + if (bits < 8U) { + GIC_SetBinaryPoint (7U - bits); + status = 0; + } else { + status = -1; + } + + return (status); +} + + +/// Get priority grouping field split point +__WEAK uint32_t IRQ_GetPriorityGroupBits (void) { + uint32_t bp; + + bp = GIC_GetBinaryPoint() & 0x07U; + + return (7U - bp); +} + +#endif -- cgit