1 /**************************************************************************//**
\r
2 * @file core_cmSimd.h
\r
3 * @brief CMSIS Cortex-M SIMD Header File
\r
5 * @date 22. August 2014
\r
9 ******************************************************************************/
\r
10 /* Copyright (c) 2009 - 2014 ARM LIMITED
\r
12 All rights reserved.
\r
13 Redistribution and use in source and binary forms, with or without
\r
14 modification, are permitted provided that the following conditions are met:
\r
15 - Redistributions of source code must retain the above copyright
\r
16 notice, this list of conditions and the following disclaimer.
\r
17 - Redistributions in binary form must reproduce the above copyright
\r
18 notice, this list of conditions and the following disclaimer in the
\r
19 documentation and/or other materials provided with the distribution.
\r
20 - Neither the name of ARM nor the names of its contributors may be used
\r
21 to endorse or promote products derived from this software without
\r
22 specific prior written permission.
\r
24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
\r
25 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
\r
26 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
\r
27 ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
\r
28 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
\r
29 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
\r
30 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
\r
31 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
\r
32 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
\r
33 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
\r
34 POSSIBILITY OF SUCH DAMAGE.
\r
35 ---------------------------------------------------------------------------*/
\r
38 #if defined ( __ICCARM__ )
\r
39 #pragma system_include /* treat file as system include file for MISRA check */
\r
42 #ifndef __CORE_CMSIMD_H
\r
43 #define __CORE_CMSIMD_H
\r
50 /*******************************************************************************
\r
51 * Hardware Abstraction Layer
\r
52 ******************************************************************************/
\r
55 /* ################### Compiler specific Intrinsics ########################### */
\r
56 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
\r
57 Access to dedicated SIMD instructions
\r
61 #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
\r
62 /* ARM armcc specific functions */
\r
63 #define __SADD8 __sadd8
\r
64 #define __QADD8 __qadd8
\r
65 #define __SHADD8 __shadd8
\r
66 #define __UADD8 __uadd8
\r
67 #define __UQADD8 __uqadd8
\r
68 #define __UHADD8 __uhadd8
\r
69 #define __SSUB8 __ssub8
\r
70 #define __QSUB8 __qsub8
\r
71 #define __SHSUB8 __shsub8
\r
72 #define __USUB8 __usub8
\r
73 #define __UQSUB8 __uqsub8
\r
74 #define __UHSUB8 __uhsub8
\r
75 #define __SADD16 __sadd16
\r
76 #define __QADD16 __qadd16
\r
77 #define __SHADD16 __shadd16
\r
78 #define __UADD16 __uadd16
\r
79 #define __UQADD16 __uqadd16
\r
80 #define __UHADD16 __uhadd16
\r
81 #define __SSUB16 __ssub16
\r
82 #define __QSUB16 __qsub16
\r
83 #define __SHSUB16 __shsub16
\r
84 #define __USUB16 __usub16
\r
85 #define __UQSUB16 __uqsub16
\r
86 #define __UHSUB16 __uhsub16
\r
87 #define __SASX __sasx
\r
88 #define __QASX __qasx
\r
89 #define __SHASX __shasx
\r
90 #define __UASX __uasx
\r
91 #define __UQASX __uqasx
\r
92 #define __UHASX __uhasx
\r
93 #define __SSAX __ssax
\r
94 #define __QSAX __qsax
\r
95 #define __SHSAX __shsax
\r
96 #define __USAX __usax
\r
97 #define __UQSAX __uqsax
\r
98 #define __UHSAX __uhsax
\r
99 #define __USAD8 __usad8
\r
100 #define __USADA8 __usada8
\r
101 #define __SSAT16 __ssat16
\r
102 #define __USAT16 __usat16
\r
103 #define __UXTB16 __uxtb16
\r
104 #define __UXTAB16 __uxtab16
\r
105 #define __SXTB16 __sxtb16
\r
106 #define __SXTAB16 __sxtab16
\r
107 #define __SMUAD __smuad
\r
108 #define __SMUADX __smuadx
\r
109 #define __SMLAD __smlad
\r
110 #define __SMLADX __smladx
\r
111 #define __SMLALD __smlald
\r
112 #define __SMLALDX __smlaldx
\r
113 #define __SMUSD __smusd
\r
114 #define __SMUSDX __smusdx
\r
115 #define __SMLSD __smlsd
\r
116 #define __SMLSDX __smlsdx
\r
117 #define __SMLSLD __smlsld
\r
118 #define __SMLSLDX __smlsldx
\r
119 #define __SEL __sel
\r
120 #define __QADD __qadd
\r
121 #define __QSUB __qsub
\r
123 #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
\r
124 ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
\r
126 #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
\r
127 ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
\r
129 #define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
\r
130 ((int64_t)(ARG3) << 32) ) >> 32))
\r
133 #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
\r
134 /* GNU gcc specific functions */
\r
135 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
\r
139 __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
143 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
\r
147 __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
151 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
\r
155 __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
159 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
\r
163 __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
167 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
\r
171 __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
175 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
\r
179 __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
184 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
\r
188 __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
192 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
\r
196 __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
200 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
\r
204 __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
208 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
\r
212 __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
216 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
\r
220 __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
224 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
\r
228 __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
233 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
\r
237 __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
241 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
\r
245 __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
249 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
\r
253 __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
257 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
\r
261 __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
265 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
\r
269 __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
273 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
\r
277 __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
281 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
\r
285 __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
289 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
\r
293 __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
297 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
\r
301 __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
305 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
\r
309 __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
313 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
\r
317 __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
321 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
\r
325 __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
329 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
\r
333 __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
337 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
\r
341 __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
345 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
\r
349 __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
353 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
\r
357 __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
361 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
\r
365 __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
369 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
\r
373 __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
377 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
\r
381 __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
385 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
\r
389 __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
393 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
\r
397 __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
401 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
\r
405 __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
409 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
\r
413 __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
417 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
\r
421 __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
425 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
\r
429 __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
433 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
\r
437 __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
\r
441 #define __SSAT16(ARG1,ARG2) \
\r
443 uint32_t __RES, __ARG1 = (ARG1); \
\r
444 __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
\r
448 #define __USAT16(ARG1,ARG2) \
\r
450 uint32_t __RES, __ARG1 = (ARG1); \
\r
451 __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
\r
455 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
\r
459 __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
\r
463 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
\r
467 __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
471 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
\r
475 __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
\r
479 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
\r
483 __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
487 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
\r
491 __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
495 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
\r
499 __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
503 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
\r
507 __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
\r
511 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
\r
515 __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
\r
519 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
\r
527 #ifndef __ARMEB__ // Little endian
\r
528 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
\r
529 #else // Big endian
\r
530 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
\r
536 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
\r
544 #ifndef __ARMEB__ // Little endian
\r
545 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
\r
546 #else // Big endian
\r
547 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
\r
553 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
\r
557 __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
561 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
\r
565 __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
569 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
\r
573 __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
\r
577 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
\r
581 __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
\r
585 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
\r
593 #ifndef __ARMEB__ // Little endian
\r
594 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
\r
595 #else // Big endian
\r
596 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
\r
602 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
\r
610 #ifndef __ARMEB__ // Little endian
\r
611 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
\r
612 #else // Big endian
\r
613 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
\r
619 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
\r
623 __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
627 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
\r
631 __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
635 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
\r
639 __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
643 #define __PKHBT(ARG1,ARG2,ARG3) \
\r
645 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
\r
646 __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
\r
650 #define __PKHTB(ARG1,ARG2,ARG3) \
\r
652 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
\r
654 __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
\r
656 __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
\r
660 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
\r
664 __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
\r
669 #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
\r
670 /* IAR iccarm specific functions */
\r
671 #include <cmsis_iar.h>
\r
674 #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
\r
675 /* TI CCS specific functions */
\r
676 #include <cmsis_ccs.h>
\r
679 #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
\r
680 /* TASKING carm specific functions */
\r
681 /* not yet supported */
\r
684 #elif defined ( __CSMC__ ) /*------------------ COSMIC Compiler -------------------*/
\r
685 /* Cosmic specific functions */
\r
686 #include <cmsis_csm.h>
\r
690 /*@} end of group CMSIS_SIMD_intrinsics */
\r
697 #endif /* __CORE_CMSIMD_H */
\r