1 /**************************************************************************//**
\r
3 * @brief CMSIS Cortex-M Core Function/Instruction Header File
\r
5 * @date 02. March 2016
\r
6 ******************************************************************************/
\r
8 * Copyright (c) 2009-2016 ARM Limited. All rights reserved.
\r
10 * SPDX-License-Identifier: Apache-2.0
\r
12 * Licensed under the Apache License, Version 2.0 (the License); you may
\r
13 * not use this file except in compliance with the License.
\r
14 * You may obtain a copy of the License at
\r
16 * http://www.apache.org/licenses/LICENSE-2.0
\r
18 * Unless required by applicable law or agreed to in writing, software
\r
19 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
\r
20 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
\r
21 * See the License for the specific language governing permissions and
\r
22 * limitations under the License.
\r
25 #ifndef __CMSIS_GCC_H
\r
26 #define __CMSIS_GCC_H
\r
28 /* ignore some GCC warnings */
\r
29 #if defined ( __GNUC__ )
\r
30 #pragma GCC diagnostic push
\r
31 #pragma GCC diagnostic ignored "-Wsign-conversion"
\r
32 #pragma GCC diagnostic ignored "-Wconversion"
\r
33 #pragma GCC diagnostic ignored "-Wunused-parameter"
\r
37 /* ########################### Core Function Access ########################### */
\r
38 /** \ingroup CMSIS_Core_FunctionInterface
\r
39 \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
\r
44 \brief Enable IRQ Interrupts
\r
45 \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
\r
46 Can only be executed in Privileged modes.
\r
48 __attribute__((always_inline)) __STATIC_INLINE void __enable_irq(void)
\r
50 __ASM volatile ("cpsie i" : : : "memory");
\r
55 \brief Disable IRQ Interrupts
\r
56 \details Disables IRQ interrupts by setting the I-bit in the CPSR.
\r
57 Can only be executed in Privileged modes.
\r
59 __attribute__((always_inline)) __STATIC_INLINE void __disable_irq(void)
\r
61 __ASM volatile ("cpsid i" : : : "memory");
\r
66 \brief Get Control Register
\r
67 \details Returns the content of the Control Register.
\r
68 \return Control Register value
\r
70 __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_CONTROL(void)
\r
74 __ASM volatile ("MRS %0, control" : "=r" (result) );
\r
80 \brief Set Control Register
\r
81 \details Writes the given value to the Control Register.
\r
82 \param [in] control Control Register value to set
\r
84 __attribute__((always_inline)) __STATIC_INLINE void __set_CONTROL(uint32_t control)
\r
86 __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
\r
91 \brief Get IPSR Register
\r
92 \details Returns the content of the IPSR Register.
\r
93 \return IPSR Register value
\r
95 __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_IPSR(void)
\r
99 __ASM volatile ("MRS %0, ipsr" : "=r" (result) );
\r
105 \brief Get APSR Register
\r
106 \details Returns the content of the APSR Register.
\r
107 \return APSR Register value
\r
109 __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_APSR(void)
\r
113 __ASM volatile ("MRS %0, apsr" : "=r" (result) );
\r
119 \brief Get xPSR Register
\r
120 \details Returns the content of the xPSR Register.
\r
121 \return xPSR Register value
\r
123 __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_xPSR(void)
\r
127 __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
\r
133 \brief Get Process Stack Pointer
\r
134 \details Returns the current value of the Process Stack Pointer (PSP).
\r
135 \return PSP Register value
\r
137 __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_PSP(void)
\r
139 register uint32_t result;
\r
141 __ASM volatile ("MRS %0, psp" : "=r" (result) );
\r
147 \brief Set Process Stack Pointer
\r
148 \details Assigns the given value to the Process Stack Pointer (PSP).
\r
149 \param [in] topOfProcStack Process Stack Pointer value to set
\r
151 __attribute__((always_inline)) __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
\r
153 __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : "sp");
\r
158 \brief Get Main Stack Pointer
\r
159 \details Returns the current value of the Main Stack Pointer (MSP).
\r
160 \return MSP Register value
\r
162 __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_MSP(void)
\r
164 register uint32_t result;
\r
166 __ASM volatile ("MRS %0, msp" : "=r" (result) );
\r
172 \brief Set Main Stack Pointer
\r
173 \details Assigns the given value to the Main Stack Pointer (MSP).
\r
174 \param [in] topOfMainStack Main Stack Pointer value to set
\r
176 __attribute__((always_inline)) __STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
\r
178 __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : "sp");
\r
183 \brief Get Priority Mask
\r
184 \details Returns the current state of the priority mask bit from the Priority Mask Register.
\r
185 \return Priority Mask value
\r
187 __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_PRIMASK(void)
\r
191 __ASM volatile ("MRS %0, primask" : "=r" (result) );
\r
197 \brief Set Priority Mask
\r
198 \details Assigns the given value to the Priority Mask Register.
\r
199 \param [in] priMask Priority Mask
\r
201 __attribute__((always_inline)) __STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
\r
203 __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
\r
207 #if ((defined (__CORTEX_M ) && (__CORTEX_M >= 3U)) || \
\r
208 (defined (__CORTEX_SC) && (__CORTEX_SC >= 300U)) )
\r
212 \details Enables FIQ interrupts by clearing the F-bit in the CPSR.
\r
213 Can only be executed in Privileged modes.
\r
215 __attribute__((always_inline)) __STATIC_INLINE void __enable_fault_irq(void)
\r
217 __ASM volatile ("cpsie f" : : : "memory");
\r
223 \details Disables FIQ interrupts by setting the F-bit in the CPSR.
\r
224 Can only be executed in Privileged modes.
\r
226 __attribute__((always_inline)) __STATIC_INLINE void __disable_fault_irq(void)
\r
228 __ASM volatile ("cpsid f" : : : "memory");
\r
233 \brief Get Base Priority
\r
234 \details Returns the current value of the Base Priority register.
\r
235 \return Base Priority register value
\r
237 __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_BASEPRI(void)
\r
241 __ASM volatile ("MRS %0, basepri" : "=r" (result) );
\r
247 \brief Set Base Priority
\r
248 \details Assigns the given value to the Base Priority register.
\r
249 \param [in] basePri Base Priority value to set
\r
251 __attribute__((always_inline)) __STATIC_INLINE void __set_BASEPRI(uint32_t value)
\r
253 __ASM volatile ("MSR basepri, %0" : : "r" (value) : "memory");
\r
258 \brief Set Base Priority with condition
\r
259 \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
\r
260 or the new value increases the BASEPRI priority level.
\r
261 \param [in] basePri Base Priority value to set
\r
263 __attribute__((always_inline)) __STATIC_INLINE void __set_BASEPRI_MAX(uint32_t value)
\r
265 __ASM volatile ("MSR basepri_max, %0" : : "r" (value) : "memory");
\r
270 \brief Get Fault Mask
\r
271 \details Returns the current value of the Fault Mask register.
\r
272 \return Fault Mask register value
\r
274 __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_FAULTMASK(void)
\r
278 __ASM volatile ("MRS %0, faultmask" : "=r" (result) );
\r
284 \brief Set Fault Mask
\r
285 \details Assigns the given value to the Fault Mask register.
\r
286 \param [in] faultMask Fault Mask value to set
\r
288 __attribute__((always_inline)) __STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
\r
290 __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
\r
293 #endif /* ((defined (__CORTEX_M ) && (__CORTEX_M >= 3U)) || \
\r
294 (defined (__CORTEX_SC) && (__CORTEX_SC >= 300U)) ) */
\r
297 #if (defined (__CORTEX_M) && (__CORTEX_M >= 4U))
\r
301 \details Returns the current value of the Floating Point Status/Control register.
\r
302 \return Floating Point Status/Control register value
\r
304 __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_FPSCR(void)
\r
306 #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
\r
307 (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
\r
310 __ASM volatile (""); /* Empty asm statement works as a scheduling barrier */
\r
311 __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
\r
312 __ASM volatile ("");
\r
322 \details Assigns the given value to the Floating Point Status/Control register.
\r
323 \param [in] fpscr Floating Point Status/Control value to set
\r
325 __attribute__((always_inline)) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
\r
327 #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
\r
328 (defined (__FPU_USED ) && (__FPU_USED == 1U)) )
\r
329 __ASM volatile (""); /* Empty asm statement works as a scheduling barrier */
\r
330 __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc");
\r
331 __ASM volatile ("");
\r
335 #endif /* (defined (__CORTEX_M) && (__CORTEX_M >= 4U)) */
\r
339 /*@} end of CMSIS_Core_RegAccFunctions */
\r
342 /* ########################## Core Instruction Access ######################### */
\r
343 /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
\r
344 Access to dedicated instructions
\r
348 /* Define macros for porting to both thumb1 and thumb2.
\r
349 * For thumb1, use low register (r0-r7), specified by constraint "l"
\r
350 * Otherwise, use general registers, specified by constraint "r" */
\r
351 #if defined (__thumb__) && !defined (__thumb2__)
\r
352 #define __CMSIS_GCC_OUT_REG(r) "=l" (r)
\r
353 #define __CMSIS_GCC_USE_REG(r) "l" (r)
\r
355 #define __CMSIS_GCC_OUT_REG(r) "=r" (r)
\r
356 #define __CMSIS_GCC_USE_REG(r) "r" (r)
\r
360 \brief No Operation
\r
361 \details No Operation does nothing. This instruction can be used for code alignment purposes.
\r
363 __attribute__((always_inline)) __STATIC_INLINE void __NOP(void)
\r
365 __ASM volatile ("nop");
\r
370 \brief Wait For Interrupt
\r
371 \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
\r
373 __attribute__((always_inline)) __STATIC_INLINE void __WFI(void)
\r
375 __ASM volatile ("wfi");
\r
380 \brief Wait For Event
\r
381 \details Wait For Event is a hint instruction that permits the processor to enter
\r
382 a low-power state until one of a number of events occurs.
\r
384 __attribute__((always_inline)) __STATIC_INLINE void __WFE(void)
\r
386 __ASM volatile ("wfe");
\r
392 \details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
\r
394 __attribute__((always_inline)) __STATIC_INLINE void __SEV(void)
\r
396 __ASM volatile ("sev");
\r
401 \brief Instruction Synchronization Barrier
\r
402 \details Instruction Synchronization Barrier flushes the pipeline in the processor,
\r
403 so that all instructions following the ISB are fetched from cache or memory,
\r
404 after the instruction has been completed.
\r
406 __attribute__((always_inline)) __STATIC_INLINE void __ISB(void)
\r
408 __ASM volatile ("isb 0xF":::"memory");
\r
413 \brief Data Synchronization Barrier
\r
414 \details Acts as a special kind of Data Memory Barrier.
\r
415 It completes when all explicit memory accesses before this instruction complete.
\r
417 __attribute__((always_inline)) __STATIC_INLINE void __DSB(void)
\r
419 __ASM volatile ("dsb 0xF":::"memory");
\r
424 \brief Data Memory Barrier
\r
425 \details Ensures the apparent order of the explicit memory operations before
\r
426 and after the instruction, without ensuring their completion.
\r
428 __attribute__((always_inline)) __STATIC_INLINE void __DMB(void)
\r
430 __ASM volatile ("dmb 0xF":::"memory");
\r
435 \brief Reverse byte order (32 bit)
\r
436 \details Reverses the byte order in integer value.
\r
437 \param [in] value Value to reverse
\r
438 \return Reversed value
\r
440 __attribute__((always_inline)) __STATIC_INLINE uint32_t __REV(uint32_t value)
\r
442 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
\r
443 return __builtin_bswap32(value);
\r
447 __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
\r
454 \brief Reverse byte order (16 bit)
\r
455 \details Reverses the byte order in two unsigned short values.
\r
456 \param [in] value Value to reverse
\r
457 \return Reversed value
\r
459 __attribute__((always_inline)) __STATIC_INLINE uint32_t __REV16(uint32_t value)
\r
463 __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
\r
469 \brief Reverse byte order in signed short value
\r
470 \details Reverses the byte order in a signed short value with sign extension to integer.
\r
471 \param [in] value Value to reverse
\r
472 \return Reversed value
\r
474 __attribute__((always_inline)) __STATIC_INLINE int32_t __REVSH(int32_t value)
\r
476 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
\r
477 return (short)__builtin_bswap16(value);
\r
481 __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
\r
488 \brief Rotate Right in unsigned value (32 bit)
\r
489 \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
\r
490 \param [in] op1 Value to rotate
\r
491 \param [in] op2 Number of Bits to rotate
\r
492 \return Rotated value
\r
494 __attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
\r
496 return (op1 >> op2) | (op1 << (32U - op2));
\r
502 \details Causes the processor to enter Debug state.
\r
503 Debug tools can use this to investigate system state when the instruction at a particular address is reached.
\r
504 \param [in] value is ignored by the processor.
\r
505 If required, a debugger can use it to store additional information about the breakpoint.
\r
507 #define __BKPT(value) __ASM volatile ("bkpt "#value)
\r
511 \brief Reverse bit order of value
\r
512 \details Reverses the bit order of the given value.
\r
513 \param [in] value Value to reverse
\r
514 \return Reversed value
\r
516 __attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
\r
520 #if ((defined (__CORTEX_M ) && (__CORTEX_M >= 3U)) || \
\r
521 (defined (__CORTEX_SC) && (__CORTEX_SC >= 300U)) )
\r
522 __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
\r
524 int32_t s = 4 /*sizeof(v)*/ * 8 - 1; /* extra shift needed at end */
\r
526 result = value; /* r will be reversed bits of v; first get LSB of v */
\r
527 for (value >>= 1U; value; value >>= 1U)
\r
530 result |= value & 1U;
\r
533 result <<= s; /* shift when v's highest bits are zero */
\r
540 \brief Count leading zeros
\r
541 \details Counts the number of leading zeros of a data value.
\r
542 \param [in] value Value to count the leading zeros
\r
543 \return number of leading zeros in value
\r
545 #define __CLZ __builtin_clz
\r
548 #if ((defined (__CORTEX_M ) && (__CORTEX_M >= 3U)) || \
\r
549 (defined (__CORTEX_SC) && (__CORTEX_SC >= 300U)) )
\r
552 \brief LDR Exclusive (8 bit)
\r
553 \details Executes a exclusive LDR instruction for 8 bit value.
\r
554 \param [in] ptr Pointer to data
\r
555 \return value of type uint8_t at (*ptr)
\r
557 __attribute__((always_inline)) __STATIC_INLINE uint8_t __LDREXB(volatile uint8_t *addr)
\r
561 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
\r
562 __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
\r
564 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
\r
565 accepted by assembler. So has to use following less efficient pattern.
\r
567 __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
\r
569 return ((uint8_t) result); /* Add explicit type cast here */
\r
574 \brief LDR Exclusive (16 bit)
\r
575 \details Executes a exclusive LDR instruction for 16 bit values.
\r
576 \param [in] ptr Pointer to data
\r
577 \return value of type uint16_t at (*ptr)
\r
579 __attribute__((always_inline)) __STATIC_INLINE uint16_t __LDREXH(volatile uint16_t *addr)
\r
583 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
\r
584 __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
\r
586 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
\r
587 accepted by assembler. So has to use following less efficient pattern.
\r
589 __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
\r
591 return ((uint16_t) result); /* Add explicit type cast here */
\r
596 \brief LDR Exclusive (32 bit)
\r
597 \details Executes a exclusive LDR instruction for 32 bit values.
\r
598 \param [in] ptr Pointer to data
\r
599 \return value of type uint32_t at (*ptr)
\r
601 __attribute__((always_inline)) __STATIC_INLINE uint32_t __LDREXW(volatile uint32_t *addr)
\r
605 __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
\r
611 \brief STR Exclusive (8 bit)
\r
612 \details Executes a exclusive STR instruction for 8 bit values.
\r
613 \param [in] value Value to store
\r
614 \param [in] ptr Pointer to location
\r
615 \return 0 Function succeeded
\r
616 \return 1 Function failed
\r
618 __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
\r
622 __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
\r
628 \brief STR Exclusive (16 bit)
\r
629 \details Executes a exclusive STR instruction for 16 bit values.
\r
630 \param [in] value Value to store
\r
631 \param [in] ptr Pointer to location
\r
632 \return 0 Function succeeded
\r
633 \return 1 Function failed
\r
635 __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
\r
639 __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
\r
645 \brief STR Exclusive (32 bit)
\r
646 \details Executes a exclusive STR instruction for 32 bit values.
\r
647 \param [in] value Value to store
\r
648 \param [in] ptr Pointer to location
\r
649 \return 0 Function succeeded
\r
650 \return 1 Function failed
\r
652 __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
\r
656 __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
\r
662 \brief Remove the exclusive lock
\r
663 \details Removes the exclusive lock which is created by LDREX.
\r
665 __attribute__((always_inline)) __STATIC_INLINE void __CLREX(void)
\r
667 __ASM volatile ("clrex" ::: "memory");
\r
672 \brief Signed Saturate
\r
673 \details Saturates a signed value.
\r
674 \param [in] value Value to be saturated
\r
675 \param [in] sat Bit position to saturate to (1..32)
\r
676 \return Saturated value
\r
678 #define __SSAT(ARG1,ARG2) \
\r
680 int32_t __RES, __ARG1 = (ARG1); \
\r
681 __ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
\r
687 \brief Unsigned Saturate
\r
688 \details Saturates an unsigned value.
\r
689 \param [in] value Value to be saturated
\r
690 \param [in] sat Bit position to saturate to (0..31)
\r
691 \return Saturated value
\r
693 #define __USAT(ARG1,ARG2) \
\r
695 uint32_t __RES, __ARG1 = (ARG1); \
\r
696 __ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
\r
702 \brief Rotate Right with Extend (32 bit)
\r
703 \details Moves each bit of a bitstring right by one bit.
\r
704 The carry input is shifted in at the left end of the bitstring.
\r
705 \param [in] value Value to rotate
\r
706 \return Rotated value
\r
708 __attribute__((always_inline)) __STATIC_INLINE uint32_t __RRX(uint32_t value)
\r
712 __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
\r
718 \brief LDRT Unprivileged (8 bit)
\r
719 \details Executes a Unprivileged LDRT instruction for 8 bit value.
\r
720 \param [in] ptr Pointer to data
\r
721 \return value of type uint8_t at (*ptr)
\r
723 __attribute__((always_inline)) __STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *ptr)
\r
727 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
\r
728 __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) );
\r
730 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
\r
731 accepted by assembler. So has to use following less efficient pattern.
\r
733 __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" );
\r
735 return ((uint8_t) result); /* Add explicit type cast here */
\r
740 \brief LDRT Unprivileged (16 bit)
\r
741 \details Executes a Unprivileged LDRT instruction for 16 bit values.
\r
742 \param [in] ptr Pointer to data
\r
743 \return value of type uint16_t at (*ptr)
\r
745 __attribute__((always_inline)) __STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *ptr)
\r
749 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
\r
750 __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) );
\r
752 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
\r
753 accepted by assembler. So has to use following less efficient pattern.
\r
755 __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" );
\r
757 return ((uint16_t) result); /* Add explicit type cast here */
\r
762 \brief LDRT Unprivileged (32 bit)
\r
763 \details Executes a Unprivileged LDRT instruction for 32 bit values.
\r
764 \param [in] ptr Pointer to data
\r
765 \return value of type uint32_t at (*ptr)
\r
767 __attribute__((always_inline)) __STATIC_INLINE uint32_t __LDRT(volatile uint32_t *ptr)
\r
771 __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) );
\r
777 \brief STRT Unprivileged (8 bit)
\r
778 \details Executes a Unprivileged STRT instruction for 8 bit values.
\r
779 \param [in] value Value to store
\r
780 \param [in] ptr Pointer to location
\r
782 __attribute__((always_inline)) __STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *ptr)
\r
784 __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
\r
789 \brief STRT Unprivileged (16 bit)
\r
790 \details Executes a Unprivileged STRT instruction for 16 bit values.
\r
791 \param [in] value Value to store
\r
792 \param [in] ptr Pointer to location
\r
794 __attribute__((always_inline)) __STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *ptr)
\r
796 __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
\r
801 \brief STRT Unprivileged (32 bit)
\r
802 \details Executes a Unprivileged STRT instruction for 32 bit values.
\r
803 \param [in] value Value to store
\r
804 \param [in] ptr Pointer to location
\r
806 __attribute__((always_inline)) __STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *ptr)
\r
808 __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) );
\r
811 #endif /* ((defined (__CORTEX_M ) && (__CORTEX_M >= 3U)) || \
\r
812 (defined (__CORTEX_SC) && (__CORTEX_SC >= 300U)) ) */
\r
814 /*@}*/ /* end of group CMSIS_Core_InstructionInterface */
\r
817 /* ################### Compiler specific Intrinsics ########################### */
\r
818 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
\r
819 Access to dedicated SIMD instructions
\r
823 #if (defined (__CORTEX_M) && (__CORTEX_M >= 4U))
\r
825 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
\r
829 __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
833 __attribute__((always_inline)) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
\r
837 __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
841 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
\r
845 __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
849 __attribute__((always_inline)) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
\r
853 __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
857 __attribute__((always_inline)) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
\r
861 __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
865 __attribute__((always_inline)) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
\r
869 __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
874 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
\r
878 __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
882 __attribute__((always_inline)) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
\r
886 __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
890 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
\r
894 __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
898 __attribute__((always_inline)) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
\r
902 __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
906 __attribute__((always_inline)) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
\r
910 __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
914 __attribute__((always_inline)) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
\r
918 __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
923 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
\r
927 __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
931 __attribute__((always_inline)) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
\r
935 __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
939 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
\r
943 __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
947 __attribute__((always_inline)) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
\r
951 __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
955 __attribute__((always_inline)) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
\r
959 __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
963 __attribute__((always_inline)) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
\r
967 __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
971 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
\r
975 __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
979 __attribute__((always_inline)) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
\r
983 __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
987 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
\r
991 __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
995 __attribute__((always_inline)) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
\r
999 __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1003 __attribute__((always_inline)) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
\r
1007 __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1011 __attribute__((always_inline)) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
\r
1015 __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1019 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
\r
1023 __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1027 __attribute__((always_inline)) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
\r
1031 __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1035 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
\r
1039 __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1043 __attribute__((always_inline)) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
\r
1047 __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1051 __attribute__((always_inline)) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
\r
1055 __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1059 __attribute__((always_inline)) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
\r
1063 __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1067 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
\r
1071 __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1075 __attribute__((always_inline)) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
\r
1079 __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1083 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
\r
1087 __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1091 __attribute__((always_inline)) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
\r
1095 __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1099 __attribute__((always_inline)) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
\r
1103 __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1107 __attribute__((always_inline)) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
\r
1111 __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1115 __attribute__((always_inline)) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
\r
1119 __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1123 __attribute__((always_inline)) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
\r
1127 __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
\r
1131 #define __SSAT16(ARG1,ARG2) \
\r
1133 int32_t __RES, __ARG1 = (ARG1); \
\r
1134 __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
\r
1138 #define __USAT16(ARG1,ARG2) \
\r
1140 uint32_t __RES, __ARG1 = (ARG1); \
\r
1141 __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
\r
1145 __attribute__((always_inline)) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
\r
1149 __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
\r
1153 __attribute__((always_inline)) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
\r
1157 __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1161 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
\r
1165 __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
\r
1169 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
\r
1173 __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1177 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
\r
1181 __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1185 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
\r
1189 __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1193 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
\r
1197 __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
\r
1201 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
\r
1205 __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
\r
1209 __attribute__((always_inline)) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
\r
1217 #ifndef __ARMEB__ /* Little endian */
\r
1218 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
\r
1219 #else /* Big endian */
\r
1220 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
\r
1226 __attribute__((always_inline)) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
\r
1234 #ifndef __ARMEB__ /* Little endian */
\r
1235 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
\r
1236 #else /* Big endian */
\r
1237 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
\r
1243 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
\r
1247 __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1251 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
\r
1255 __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1259 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
\r
1263 __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
\r
1267 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
\r
1271 __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
\r
1275 __attribute__((always_inline)) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
\r
1283 #ifndef __ARMEB__ /* Little endian */
\r
1284 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
\r
1285 #else /* Big endian */
\r
1286 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
\r
1292 __attribute__((always_inline)) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
\r
1300 #ifndef __ARMEB__ /* Little endian */
\r
1301 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
\r
1302 #else /* Big endian */
\r
1303 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
\r
1309 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
\r
1313 __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1317 __attribute__((always_inline)) __STATIC_INLINE int32_t __QADD( int32_t op1, int32_t op2)
\r
1321 __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1325 __attribute__((always_inline)) __STATIC_INLINE int32_t __QSUB( int32_t op1, int32_t op2)
\r
1329 __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
1333 #define __PKHBT(ARG1,ARG2,ARG3) \
\r
1335 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
\r
1336 __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
\r
1340 #define __PKHTB(ARG1,ARG2,ARG3) \
\r
1342 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
\r
1344 __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
\r
1346 __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
\r
1350 __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
\r
1354 __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
\r
1358 #endif /* (defined (__CORTEX_M) && (__CORTEX_M >= 4U)) */
\r
1359 /*@} end of group CMSIS_SIMD_intrinsics */
\r
1362 #if defined ( __GNUC__ )
\r
1363 #pragma GCC diagnostic pop
\r
1366 #endif /* __CMSIS_GCC_H */
\r