|
| 1 | +/** |
| 2 | + * |
| 3 | + * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries. |
| 4 | + * |
| 5 | + * \asf_license_start |
| 6 | + * |
| 7 | + * \page License |
| 8 | + * |
| 9 | + * SPDX-License-Identifier: Apache-2.0 |
| 10 | + * |
| 11 | + * Licensed under the Apache License, Version 2.0 (the "License"); you may |
| 12 | + * not use this file except in compliance with the License. |
| 13 | + * You may obtain a copy of the Licence at |
| 14 | + * |
| 15 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 16 | + * |
| 17 | + * Unless required by applicable law or agreed to in writing, software |
| 18 | + * distributed under the License is distributed on an AS IS BASIS, WITHOUT |
| 19 | + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 20 | + * See the License for the specific language governing permissions and |
| 21 | + * limitations under the License. |
| 22 | + * |
| 23 | + * \asf_license_stop |
| 24 | + * |
| 25 | + */ |
| 26 | + |
| 27 | +/** @file cpu.h |
| 28 | + *MEC1701 CPU abstractions |
| 29 | + */ |
| 30 | +/** @defgroup cpu |
| 31 | + */ |
| 32 | + |
| 33 | + |
| 34 | +#ifndef _CPU_H |
| 35 | +#define _CPU_H |
| 36 | + |
| 37 | +#include <stdint.h> |
| 38 | + |
| 39 | +#ifdef __cplusplus |
| 40 | +extern "C" { |
| 41 | +#endif |
| 42 | + |
| 43 | + |
| 44 | +#ifdef __CC_ARM /* Keil ARM MDK */ |
| 45 | + |
| 46 | +#ifndef UINT8_C |
| 47 | +#define UINT8_C(x) (unsigned char)(x) |
| 48 | +#endif |
| 49 | + |
| 50 | +#ifndef UINT16_C |
| 51 | +#define UINT16_C(x) (unsigned int)(x) |
| 52 | +#endif |
| 53 | + |
| 54 | +#ifndef UINT32_C |
| 55 | +#define UINT32_C(x) (unsigned long)(x) |
| 56 | +#endif |
| 57 | + |
| 58 | +#define USED __attribute__((used)) |
| 59 | +#define WEAK __attribute__((weak)) |
| 60 | +#define INLINE __inline |
| 61 | +#define NORETURN __declspec(noreturn) |
| 62 | +#define PACKED __packed |
| 63 | +#define CPU_GET_INTERRUPT_STATE() __get_PRIMASK() |
| 64 | +#define CPU_SET_INTERRUPT_STATE(x) __set_PRIMASK((x)) |
| 65 | + |
| 66 | +/* |
| 67 | + * Keil MDK intrisic __disable_irq() returns the value of MSR PRIMASK |
| 68 | + * before disabling interrupts. |
| 69 | +*/ |
| 70 | +#define CPU_DISABLE_INTERRUPTS() __disable_irq() |
| 71 | +#define CPU_GET_DISABLE_INTERRUPTS(x) {(x) = __disable_irq();} |
| 72 | +#define CPU_RESTORE_INTERRUPTS(x) { if (!(x)) { __enable_irq(); } } |
| 73 | +#define CPU_ENABLE_INTERRUPTS() __enable_irq() |
| 74 | + |
| 75 | +#define CPU_NOP() __nop() |
| 76 | +#define CPU_WAIT_FOR_INTR() __wfi() |
| 77 | + |
| 78 | +#define CPU_REV(x) __rev(x) |
| 79 | + |
| 80 | +#define CPU_CLZ(x) __clz(x) |
| 81 | + |
| 82 | +/* |
| 83 | + * The microsecond delay register is implemented in a Normal Data Memory |
| 84 | + * Region. Normal regions have relaxed data ordering semantics. This can |
| 85 | + * cause issues because writes to this register can complete before a |
| 86 | + * previous write to Device or Strongly ordered memory. Please use the |
| 87 | + * inline code after this definition. It uses the Data Synchronization |
| 88 | + * Barrier instruction to insure all outstanding writes complete before |
| 89 | + * the instruction after DSB is executed. |
| 90 | + * #define MEC2016_DELAY_REG *((volatile uint8_t*) MEC2016_DELAY_REG_BASE) |
| 91 | + */ |
| 92 | + |
| 93 | +static inline void MICROSEC_DELAY(unsigned char n) |
| 94 | +{ |
| 95 | +volatile unsigned long *pdly_reg; |
| 96 | + |
| 97 | +pdly_reg = (volatile unsigned long *)0x10000000ul; |
| 98 | + |
| 99 | +__asm volatile ( |
| 100 | +"\tstrb n, [pdly_reg]\n" |
| 101 | +"\tldrb n, [pdly_reg]\n" |
| 102 | +"\tadd n, #0\n" |
| 103 | +"\tdmb\n" |
| 104 | +); |
| 105 | +} |
| 106 | + |
| 107 | +#elif defined(__XC32_PART_SUPPORT_VERSION) /* Microchip XC32 compiler customized GCC */ |
| 108 | + |
| 109 | +#error "!!! FORCED BUILD ERROR: compiler.h XC32 support has not been implemented !!!" |
| 110 | + |
| 111 | +#elif defined(__GNUC__) && defined(__ARM_EABI__) /* GCC for ARM (arm-none-eabi-gcc) */ |
| 112 | + |
| 113 | +#include <stdint.h> |
| 114 | +#include <stddef.h> |
| 115 | + |
| 116 | +#ifndef __always_inline |
| 117 | +#define __always_inline inline __attribute__((always_inline)) |
| 118 | +#endif |
| 119 | + |
| 120 | +static __always_inline void __NOP_THUMB2(void) |
| 121 | +{ |
| 122 | +__asm volatile ("nop"); |
| 123 | +} |
| 124 | + |
| 125 | +#define CPU_NOP() __NOP_THUMB2() |
| 126 | + |
| 127 | +static __always_inline void __WFI_THUMB2(void) |
| 128 | +{ |
| 129 | +__asm volatile ("dsb"); |
| 130 | +__asm volatile ("isb"); |
| 131 | +__asm volatile ("wfi"); |
| 132 | +__asm volatile ("nop"); |
| 133 | +__asm volatile ("nop"); |
| 134 | +__asm volatile ("nop"); |
| 135 | +} |
| 136 | + |
| 137 | +#define CPU_WAIT_FOR_INTR() __WFI_THUMB2() |
| 138 | + |
| 139 | +/* We require user have ARM CMSIS header files available and configured |
| 140 | + * core_cmFunc.h includes inlines for global interrupt control. |
| 141 | + * For some reason, CMSIS __disable_irq for GCC does not return current PRIMASK |
| 142 | + * value. */ |
| 143 | + |
| 144 | +static __always_inline uint32_t __get_disable_irq(void) |
| 145 | +{ |
| 146 | +uint32_t pri_mask; |
| 147 | + |
| 148 | +__asm volatile ( |
| 149 | +"\tmrs %0, primask\n" |
| 150 | +"\tcpsid i\n" |
| 151 | +"\tdsb\n" |
| 152 | +"\tisb\n" |
| 153 | +: "=r" (pri_mask) :: "memory" |
| 154 | +); |
| 155 | +return pri_mask; |
| 156 | +} |
| 157 | + |
| 158 | +static __always_inline uint32_t __get_primask(void) |
| 159 | +{ |
| 160 | +uint32_t pri_mask; |
| 161 | + |
| 162 | +__asm volatile ( |
| 163 | +"\tmrs %0, primask\n" |
| 164 | +"\tisb\n" |
| 165 | +: "=r" (pri_mask) :: "memory" |
| 166 | +); |
| 167 | +return pri_mask; |
| 168 | +} |
| 169 | + |
| 170 | +static __always_inline void __enable_irqs(void) |
| 171 | +{ |
| 172 | +__asm volatile ("cpsie i" : : : "memory"); |
| 173 | +} |
| 174 | + |
| 175 | +static __always_inline void __disable_irqs(void) |
| 176 | +{ |
| 177 | + __asm volatile ( |
| 178 | +"\tcpsid i\n" |
| 179 | +"\tdsb\n" |
| 180 | +"\tisb\n" : : : "memory"); |
| 181 | +} |
| 182 | + |
| 183 | + |
| 184 | +#define CPU_GET_INTERRUPT_STATE() __get_primask() |
| 185 | +#define CPU_GET_DISABLE_INTERRUPTS(x) {(x) = __get_disable_irq();} |
| 186 | +#define CPU_RESTORE_INTERRUPTS(x) { if (!(x)) { __enable_irqs(); } } |
| 187 | +#define CPU_ENABLE_INTERRUPTS() __enable_irqs() |
| 188 | +#define CPU_DISABLE_INTERRUPTS() __disable_irqs() |
| 189 | + |
| 190 | + |
| 191 | +static __always_inline uint32_t __REV_THUMB2(uint32_t u32) |
| 192 | +{ |
| 193 | +return __builtin_bswap32(u32); |
| 194 | +} |
| 195 | + |
| 196 | +#define CPU_REV(x) __REV_THUMB2(x) |
| 197 | + |
| 198 | +/* |
| 199 | + * __builtin_clz() will not be available if user compiles with built-ins disabled flag. |
| 200 | + */ |
| 201 | +#define CPU_CLZ(x) __builtin_clz(x) |
| 202 | + |
| 203 | +static inline __attribute__((always_inline, noreturn)) void CPU_JMP(uint32_t addr) |
| 204 | +{ |
| 205 | +addr |= (1ul << 0); |
| 206 | + |
| 207 | +__asm volatile ( |
| 208 | +"\n\t" |
| 209 | +"\tBX %0 \n" |
| 210 | +"\tNOP \n" |
| 211 | +: /* no outputs */ |
| 212 | +:"r"(addr) |
| 213 | +:); |
| 214 | +while(1); |
| 215 | +} |
| 216 | + |
| 217 | + |
| 218 | +/* |
| 219 | + * The microsecond delay register is implemented in a Normal Data Memory |
| 220 | + * Region. Normal regions have relaxed data ordering semantics. This can |
| 221 | + * cause issues because writes to this register can complete before a |
| 222 | + * previous write to Device or Strongly ordered memory. Please use the |
| 223 | + * inline code after this definition. It uses the Data Synchronization |
| 224 | + * Barrier instruction to insure all outstanding writes complete before |
| 225 | + * the instruction after DSB is executed. |
| 226 | + * #define MEC2016_DELAY_REG *((volatile uint8_t*) MEC2016_DELAY_REG_BASE) |
| 227 | + */ |
| 228 | + |
| 229 | +static __always_inline void MICROSEC_DELAY(uint8_t n) |
| 230 | +{ |
| 231 | +uint32_t dly_reg_addr = 0x10000000ul; |
| 232 | + |
| 233 | +__asm volatile ( |
| 234 | +"\tstrb %0, [%1]\n" |
| 235 | +"\tldrb %0, [%1]\n" |
| 236 | +"\tadd %0, #0\n" |
| 237 | +"\tdmb\n" |
| 238 | +: |
| 239 | +: "r" (n), "r" (dly_reg_addr) |
| 240 | +: "memory" |
| 241 | +); |
| 242 | +} |
| 243 | + |
| 244 | +static __always_inline void write_read_back8(volatile uint8_t* addr, uint8_t val) |
| 245 | +{ |
| 246 | +__asm__ __volatile__ ( |
| 247 | +"\n\t" |
| 248 | +"strb %1, [%0] \n\t" |
| 249 | +"ldrb %1, [%0] \n\t" |
| 250 | +: /* No outputs */ |
| 251 | +: "r" (addr), "r" (val) |
| 252 | +: "memory" |
| 253 | +); |
| 254 | +} |
| 255 | + |
| 256 | +static __always_inline void write_read_back16(volatile uint16_t* addr, uint16_t val) |
| 257 | +{ |
| 258 | +__asm__ __volatile__ ( |
| 259 | +"\n\t" |
| 260 | +"strh %1, [%0] \n\t" |
| 261 | +"ldrh %1, [%0] \n\t" |
| 262 | +: /* No outputs */ |
| 263 | +: "r" (addr), "r" (val) |
| 264 | +: "memory" |
| 265 | +); |
| 266 | +} |
| 267 | + |
| 268 | +static __always_inline void write_read_back32(volatile uint32_t* addr, uint32_t val) |
| 269 | +{ |
| 270 | +__asm__ __volatile__ ( |
| 271 | +"\n\t" |
| 272 | +"str %1, [%0] \n\t" |
| 273 | +"ldr %1, [%0] \n\t" |
| 274 | +: /* No outputs */ |
| 275 | +: "r" (addr), "r" (val) |
| 276 | +: "memory" |
| 277 | +); |
| 278 | +} |
| 279 | + |
| 280 | +#else /* Unknown compiler */ |
| 281 | + |
| 282 | +#error "!!! FORCED BUILD ERROR: cpu.h Unknown compiler !!!" |
| 283 | + |
| 284 | +#endif |
| 285 | + |
| 286 | +#endif /* #ifndef _CPU_H */ |
| 287 | +/** @} |
| 288 | + */ |
0 commit comments