1 /**************************************************************************//**
\r
2 * @file core_cm4_simd.h
\r
3 * @brief CMSIS Cortex-M4 SIMD Header File
\r
5 * @date 25. February 2013
\r
9 ******************************************************************************/
\r
10 /* Copyright (c) 2009 - 2013 ARM LIMITED
\r
12 All rights reserved.
\r
13 Redistribution and use in source and binary forms, with or without
\r
14 modification, are permitted provided that the following conditions are met:
\r
15 - Redistributions of source code must retain the above copyright
\r
16 notice, this list of conditions and the following disclaimer.
\r
17 - Redistributions in binary form must reproduce the above copyright
\r
18 notice, this list of conditions and the following disclaimer in the
\r
19 documentation and/or other materials provided with the distribution.
\r
20 - Neither the name of ARM nor the names of its contributors may be used
\r
21 to endorse or promote products derived from this software without
\r
22 specific prior written permission.
\r
24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
\r
25 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
\r
26 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
\r
27 ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
\r
28 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
\r
29 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
\r
30 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
\r
31 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
\r
32 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
\r
33 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
\r
34 POSSIBILITY OF SUCH DAMAGE.
\r
35 ---------------------------------------------------------------------------*/
\r
42 #ifndef __CORE_CM4_SIMD_H
\r
43 #define __CORE_CM4_SIMD_H
\r
46 /*******************************************************************************
\r
47 * Hardware Abstraction Layer
\r
48 ******************************************************************************/
\r
51 /* ################### Compiler specific Intrinsics ########################### */
\r
52 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
\r
53 Access to dedicated SIMD instructions
\r
57 #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
\r
58 /* ARM armcc specific functions */
\r
60 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
\r
61 #define __SADD8 __sadd8
\r
62 #define __QADD8 __qadd8
\r
63 #define __SHADD8 __shadd8
\r
64 #define __UADD8 __uadd8
\r
65 #define __UQADD8 __uqadd8
\r
66 #define __UHADD8 __uhadd8
\r
67 #define __SSUB8 __ssub8
\r
68 #define __QSUB8 __qsub8
\r
69 #define __SHSUB8 __shsub8
\r
70 #define __USUB8 __usub8
\r
71 #define __UQSUB8 __uqsub8
\r
72 #define __UHSUB8 __uhsub8
\r
73 #define __SADD16 __sadd16
\r
74 #define __QADD16 __qadd16
\r
75 #define __SHADD16 __shadd16
\r
76 #define __UADD16 __uadd16
\r
77 #define __UQADD16 __uqadd16
\r
78 #define __UHADD16 __uhadd16
\r
79 #define __SSUB16 __ssub16
\r
80 #define __QSUB16 __qsub16
\r
81 #define __SHSUB16 __shsub16
\r
82 #define __USUB16 __usub16
\r
83 #define __UQSUB16 __uqsub16
\r
84 #define __UHSUB16 __uhsub16
\r
85 #define __SASX __sasx
\r
86 #define __QASX __qasx
\r
87 #define __SHASX __shasx
\r
88 #define __UASX __uasx
\r
89 #define __UQASX __uqasx
\r
90 #define __UHASX __uhasx
\r
91 #define __SSAX __ssax
\r
92 #define __QSAX __qsax
\r
93 #define __SHSAX __shsax
\r
94 #define __USAX __usax
\r
95 #define __UQSAX __uqsax
\r
96 #define __UHSAX __uhsax
\r
97 #define __USAD8 __usad8
\r
98 #define __USADA8 __usada8
\r
99 #define __SSAT16 __ssat16
\r
100 #define __USAT16 __usat16
\r
101 #define __UXTB16 __uxtb16
\r
102 #define __UXTAB16 __uxtab16
\r
103 #define __SXTB16 __sxtb16
\r
104 #define __SXTAB16 __sxtab16
\r
105 #define __SMUAD __smuad
\r
106 #define __SMUADX __smuadx
\r
107 #define __SMLAD __smlad
\r
108 #define __SMLADX __smladx
\r
109 #define __SMLALD __smlald
\r
110 #define __SMLALDX __smlaldx
\r
111 #define __SMUSD __smusd
\r
112 #define __SMUSDX __smusdx
\r
113 #define __SMLSD __smlsd
\r
114 #define __SMLSDX __smlsdx
\r
115 #define __SMLSLD __smlsld
\r
116 #define __SMLSLDX __smlsldx
\r
117 #define __SEL __sel
\r
118 #define __QADD __qadd
\r
119 #define __QSUB __qsub
\r
121 #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
\r
122 ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
\r
124 #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
\r
125 ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
\r
127 #define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
\r
128 ((int64_t)(ARG3) << 32) ) >> 32))
\r
130 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
\r
134 #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
\r
135 /* IAR iccarm specific functions */
\r
137 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
\r
138 #include <cmsis_iar.h>
\r
140 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
\r
144 #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
\r
145 /* TI CCS specific functions */
\r
147 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
\r
148 #include <cmsis_ccs.h>
\r
150 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
\r
154 #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
\r
155 /* GNU gcc specific functions */
\r
157 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
\r
158 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
\r
162 __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
166 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
\r
170 __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
174 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
\r
178 __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
182 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
\r
186 __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
190 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
\r
194 __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
198 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
\r
202 __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
207 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
\r
211 __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
215 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
\r
219 __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
223 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
\r
227 __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
231 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
\r
235 __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
239 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
\r
243 __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
247 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
\r
251 __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
256 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
\r
260 __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
264 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
\r
268 __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
272 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
\r
276 __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
280 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
\r
284 __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
288 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
\r
292 __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
296 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
\r
300 __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
304 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
\r
308 __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
312 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
\r
316 __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
320 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
\r
324 __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
328 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
\r
332 __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
336 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
\r
340 __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
344 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
\r
348 __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
352 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
\r
356 __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
360 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
\r
364 __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
368 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
\r
372 __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
376 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
\r
380 __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
384 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
\r
388 __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
392 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
\r
396 __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
400 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
\r
404 __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
408 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
\r
412 __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
416 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
\r
420 __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
424 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
\r
428 __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
432 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
\r
436 __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
440 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
\r
444 __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
448 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
\r
452 __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
456 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
\r
460 __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
\r
464 #define __SSAT16(ARG1,ARG2) \
\r
466 uint32_t __RES, __ARG1 = (ARG1); \
\r
467 __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
\r
471 #define __USAT16(ARG1,ARG2) \
\r
473 uint32_t __RES, __ARG1 = (ARG1); \
\r
474 __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
\r
478 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
\r
482 __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
\r
486 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
\r
490 __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
494 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
\r
498 __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
\r
502 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
\r
506 __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
510 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
\r
514 __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
518 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
\r
522 __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
526 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
\r
530 __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
\r
534 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
\r
538 __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
\r
542 #define __SMLALD(ARG1,ARG2,ARG3) \
\r
544 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
\r
545 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
\r
546 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
\r
549 #define __SMLALDX(ARG1,ARG2,ARG3) \
\r
551 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
\r
552 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
\r
553 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
\r
556 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
\r
560 __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
564 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
\r
568 __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
572 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
\r
576 __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
\r
580 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
\r
584 __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
\r
588 #define __SMLSLD(ARG1,ARG2,ARG3) \
\r
590 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
\r
591 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
\r
592 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
\r
595 #define __SMLSLDX(ARG1,ARG2,ARG3) \
\r
597 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
\r
598 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
\r
599 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
\r
602 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
\r
606 __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
610 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
\r
614 __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
618 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
\r
622 __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
\r
626 #define __PKHBT(ARG1,ARG2,ARG3) \
\r
628 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
\r
629 __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
\r
633 #define __PKHTB(ARG1,ARG2,ARG3) \
\r
635 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
\r
637 __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
\r
639 __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
\r
643 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
\r
647 __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
\r
651 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
\r
655 #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
\r
656 /* TASKING carm specific functions */
\r
659 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
\r
660 /* not yet supported */
\r
661 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
\r
666 /*@} end of group CMSIS_SIMD_intrinsics */
\r
669 #endif /* __CORE_CM4_SIMD_H */
\r