/**************************************************************************//**\r
* @file cmsis_armclang.h\r
* @brief CMSIS compiler armclang (Arm Compiler 6) header file\r
- * @version V5.0.4\r
- * @date 10. January 2018\r
+ * @version V5.1.0\r
+ * @date 14. March 2019\r
******************************************************************************/\r
/*\r
- * Copyright (c) 2009-2018 Arm Limited. All rights reserved.\r
+ * Copyright (c) 2009-2019 Arm Limited. All rights reserved.\r
*\r
* SPDX-License-Identifier: Apache-2.0\r
*\r
#ifndef __STATIC_INLINE\r
#define __STATIC_INLINE static __inline\r
#endif\r
-#ifndef __STATIC_FORCEINLINE \r
+#ifndef __STATIC_FORCEINLINE\r
#define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline\r
-#endif \r
+#endif\r
#ifndef __NO_RETURN\r
#define __NO_RETURN __attribute__((__noreturn__))\r
#endif\r
*/\r
__STATIC_FORCEINLINE uint32_t __get_PSP(void)\r
{\r
- register uint32_t result;\r
+ uint32_t result;\r
\r
__ASM volatile ("MRS %0, psp" : "=r" (result) );\r
return(result);\r
*/\r
__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void)\r
{\r
- register uint32_t result;\r
+ uint32_t result;\r
\r
__ASM volatile ("MRS %0, psp_ns" : "=r" (result) );\r
return(result);\r
*/\r
__STATIC_FORCEINLINE uint32_t __get_MSP(void)\r
{\r
- register uint32_t result;\r
+ uint32_t result;\r
\r
__ASM volatile ("MRS %0, msp" : "=r" (result) );\r
return(result);\r
*/\r
__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void)\r
{\r
- register uint32_t result;\r
+ uint32_t result;\r
\r
__ASM volatile ("MRS %0, msp_ns" : "=r" (result) );\r
return(result);\r
*/\r
__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void)\r
{\r
- register uint32_t result;\r
+ uint32_t result;\r
\r
__ASM volatile ("MRS %0, sp_ns" : "=r" (result) );\r
return(result);\r
// without main extensions, the non-secure PSPLIM is RAZ/WI\r
return 0U;\r
#else\r
- register uint32_t result;\r
+ uint32_t result;\r
__ASM volatile ("MRS %0, psplim" : "=r" (result) );\r
return result;\r
#endif\r
// without main extensions, the non-secure PSPLIM is RAZ/WI\r
return 0U;\r
#else\r
- register uint32_t result;\r
+ uint32_t result;\r
__ASM volatile ("MRS %0, psplim_ns" : "=r" (result) );\r
return result;\r
#endif\r
// without main extensions, the non-secure MSPLIM is RAZ/WI\r
return 0U;\r
#else\r
- register uint32_t result;\r
+ uint32_t result;\r
__ASM volatile ("MRS %0, msplim" : "=r" (result) );\r
return result;\r
#endif\r
// without main extensions, the non-secure MSPLIM is RAZ/WI\r
return 0U;\r
#else\r
- register uint32_t result;\r
+ uint32_t result;\r
__ASM volatile ("MRS %0, msplim_ns" : "=r" (result) );\r
return result;\r
#endif\r
#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \\r
(defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */\r
\r
-\r
-#if ((defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \\r
- (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )\r
-\r
/**\r
\brief Get FPSCR\r
\details Returns the current value of the Floating Point Status/Control register.\r
#define __set_FPSCR(x) ((void)(x))\r
#endif\r
\r
-#endif /* ((defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \\r
- (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */\r
-\r
-\r
\r
/*@} end of CMSIS_Core_RegAccFunctions */\r
\r
* Otherwise, use general registers, specified by constraint "r" */\r
#if defined (__thumb__) && !defined (__thumb2__)\r
#define __CMSIS_GCC_OUT_REG(r) "=l" (r)\r
+#define __CMSIS_GCC_RW_REG(r) "+l" (r)\r
#define __CMSIS_GCC_USE_REG(r) "l" (r)\r
#else\r
#define __CMSIS_GCC_OUT_REG(r) "=r" (r)\r
+#define __CMSIS_GCC_RW_REG(r) "+r" (r)\r
#define __CMSIS_GCC_USE_REG(r) "r" (r)\r
#endif\r
\r
so that all instructions following the ISB are fetched from cache or memory,\r
after the instruction has been completed.\r
*/\r
-#define __ISB() __builtin_arm_isb(0xF);\r
+#define __ISB() __builtin_arm_isb(0xF)\r
\r
/**\r
\brief Data Synchronization Barrier\r
\details Acts as a special kind of Data Memory Barrier.\r
It completes when all explicit memory accesses before this instruction complete.\r
*/\r
-#define __DSB() __builtin_arm_dsb(0xF);\r
+#define __DSB() __builtin_arm_dsb(0xF)\r
\r
\r
/**\r
\details Ensures the apparent order of the explicit memory operations before\r
and after the instruction, without ensuring their completion.\r
*/\r
-#define __DMB() __builtin_arm_dmb(0xF);\r
+#define __DMB() __builtin_arm_dmb(0xF)\r
\r
\r
/**\r
\param [in] value Value to count the leading zeros\r
\return number of leading zeros in value\r
*/\r
-#define __CLZ (uint8_t)__builtin_clz\r
+__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)\r
+{\r
+ /* Even though __builtin_clz produces a CLZ instruction on ARM, formally\r
+ __builtin_clz(0) is undefined behaviour, so handle this case specially.\r
+ This guarantees ARM-compatible results if happening to compile on a non-ARM\r
+ target, and ensures the compiler doesn't decide to activate any\r
+ optimisations using the logic "value was passed to __builtin_clz, so it\r
+ is non-zero".\r
+ ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a\r
+ single CLZ instruction.\r
+ */\r
+ if (value == 0U)\r
+ {\r
+ return 32U;\r
+ }\r
+ return __builtin_clz(value);\r
+}\r
\r
\r
#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \\r
\r
#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1))\r
\r
-__STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-\r
-__STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-\r
-__STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );\r
- return(result);\r
-}\r
-\r
-#define __SSAT16(ARG1,ARG2) \\r
-({ \\r
- int32_t __RES, __ARG1 = (ARG1); \\r
- __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \\r
- __RES; \\r
- })\r
-\r
-#define __USAT16(ARG1,ARG2) \\r
-({ \\r
- uint32_t __RES, __ARG1 = (ARG1); \\r
- __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \\r
- __RES; \\r
- })\r
-\r
-__STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)\r
-{\r
- union llreg_u{\r
- uint32_t w32[2];\r
- uint64_t w64;\r
- } llr;\r
- llr.w64 = acc;\r
-\r
-#ifndef __ARMEB__ /* Little endian */\r
- __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );\r
-#else /* Big endian */\r
- __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );\r
-#endif\r
-\r
- return(llr.w64);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)\r
-{\r
- union llreg_u{\r
- uint32_t w32[2];\r
- uint64_t w64;\r
- } llr;\r
- llr.w64 = acc;\r
-\r
-#ifndef __ARMEB__ /* Little endian */\r
- __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );\r
-#else /* Big endian */\r
- __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );\r
-#endif\r
-\r
- return(llr.w64);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)\r
-{\r
- union llreg_u{\r
- uint32_t w32[2];\r
- uint64_t w64;\r
- } llr;\r
- llr.w64 = acc;\r
-\r
-#ifndef __ARMEB__ /* Little endian */\r
- __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );\r
-#else /* Big endian */\r
- __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );\r
-#endif\r
-\r
- return(llr.w64);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)\r
-{\r
- union llreg_u{\r
- uint32_t w32[2];\r
- uint64_t w64;\r
- } llr;\r
- llr.w64 = acc;\r
-\r
-#ifndef __ARMEB__ /* Little endian */\r
- __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );\r
-#else /* Big endian */\r
- __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );\r
-#endif\r
-\r
- return(llr.w64);\r
-}\r
-\r
-__STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2)\r
-{\r
- uint32_t result;\r
-\r
- __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2)\r
-{\r
- int32_t result;\r
-\r
- __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2)\r
-{\r
- int32_t result;\r
-\r
- __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );\r
- return(result);\r
-}\r
-\r
-#if 0\r
-#define __PKHBT(ARG1,ARG2,ARG3) \\r
-({ \\r
- uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \\r
- __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \\r
- __RES; \\r
- })\r
-\r
-#define __PKHTB(ARG1,ARG2,ARG3) \\r
-({ \\r
- uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \\r
- if (ARG3 == 0) \\r
- __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \\r
- else \\r
- __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \\r
- __RES; \\r
- })\r
-#endif\r
+#define __SADD8 __builtin_arm_sadd8\r
+#define __QADD8 __builtin_arm_qadd8\r
+#define __SHADD8 __builtin_arm_shadd8\r
+#define __UADD8 __builtin_arm_uadd8\r
+#define __UQADD8 __builtin_arm_uqadd8\r
+#define __UHADD8 __builtin_arm_uhadd8\r
+#define __SSUB8 __builtin_arm_ssub8\r
+#define __QSUB8 __builtin_arm_qsub8\r
+#define __SHSUB8 __builtin_arm_shsub8\r
+#define __USUB8 __builtin_arm_usub8\r
+#define __UQSUB8 __builtin_arm_uqsub8\r
+#define __UHSUB8 __builtin_arm_uhsub8\r
+#define __SADD16 __builtin_arm_sadd16\r
+#define __QADD16 __builtin_arm_qadd16\r
+#define __SHADD16 __builtin_arm_shadd16\r
+#define __UADD16 __builtin_arm_uadd16\r
+#define __UQADD16 __builtin_arm_uqadd16\r
+#define __UHADD16 __builtin_arm_uhadd16\r
+#define __SSUB16 __builtin_arm_ssub16\r
+#define __QSUB16 __builtin_arm_qsub16\r
+#define __SHSUB16 __builtin_arm_shsub16\r
+#define __USUB16 __builtin_arm_usub16\r
+#define __UQSUB16 __builtin_arm_uqsub16\r
+#define __UHSUB16 __builtin_arm_uhsub16\r
+#define __SASX __builtin_arm_sasx\r
+#define __QASX __builtin_arm_qasx\r
+#define __SHASX __builtin_arm_shasx\r
+#define __UASX __builtin_arm_uasx\r
+#define __UQASX __builtin_arm_uqasx\r
+#define __UHASX __builtin_arm_uhasx\r
+#define __SSAX __builtin_arm_ssax\r
+#define __QSAX __builtin_arm_qsax\r
+#define __SHSAX __builtin_arm_shsax\r
+#define __USAX __builtin_arm_usax\r
+#define __UQSAX __builtin_arm_uqsax\r
+#define __UHSAX __builtin_arm_uhsax\r
+#define __USAD8 __builtin_arm_usad8\r
+#define __USADA8 __builtin_arm_usada8\r
+#define __SSAT16 __builtin_arm_ssat16\r
+#define __USAT16 __builtin_arm_usat16\r
+#define __UXTB16 __builtin_arm_uxtb16\r
+#define __UXTAB16 __builtin_arm_uxtab16\r
+#define __SXTB16 __builtin_arm_sxtb16\r
+#define __SXTAB16 __builtin_arm_sxtab16\r
+#define __SMUAD __builtin_arm_smuad\r
+#define __SMUADX __builtin_arm_smuadx\r
+#define __SMLAD __builtin_arm_smlad\r
+#define __SMLADX __builtin_arm_smladx\r
+#define __SMLALD __builtin_arm_smlald\r
+#define __SMLALDX __builtin_arm_smlaldx\r
+#define __SMUSD __builtin_arm_smusd\r
+#define __SMUSDX __builtin_arm_smusdx\r
+#define __SMLSD __builtin_arm_smlsd\r
+#define __SMLSDX __builtin_arm_smlsdx\r
+#define __SMLSLD __builtin_arm_smlsld\r
+#define __SMLSLDX __builtin_arm_smlsldx\r
+#define __SEL __builtin_arm_sel\r
+#define __QADD __builtin_arm_qadd\r
+#define __QSUB __builtin_arm_qsub\r
\r
#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \\r
((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )\r