1 /*This file is prepared for Doxygen automatic documentation generation.*/
\r
2 /*! \file *********************************************************************
\r
4 * \brief Compiler file for AVR32.
\r
6 * This file defines commonly used types and macros.
\r
8 * - Compiler: IAR EWAVR32 and GNU GCC for AVR32
\r
9 * - Supported devices: All AVR32 devices can be used.
\r
12 * \author Atmel Corporation: http://www.atmel.com \n
\r
13 * Support and FAQ: http://support.atmel.no/
\r
15 ******************************************************************************/
\r
17 /* Copyright (c) 2007, Atmel Corporation All rights reserved.
\r
19 * Redistribution and use in source and binary forms, with or without
\r
20 * modification, are permitted provided that the following conditions are met:
\r
22 * 1. Redistributions of source code must retain the above copyright notice,
\r
23 * this list of conditions and the following disclaimer.
\r
25 * 2. Redistributions in binary form must reproduce the above copyright notice,
\r
26 * this list of conditions and the following disclaimer in the documentation
\r
27 * and/or other materials provided with the distribution.
\r
29 * 3. The name of ATMEL may not be used to endorse or promote products derived
\r
30 * from this software without specific prior written permission.
\r
32 * THIS SOFTWARE IS PROVIDED BY ATMEL ``AS IS'' AND ANY EXPRESS OR IMPLIED
\r
33 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
\r
34 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY AND
\r
35 * SPECIFICALLY DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT,
\r
36 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
\r
37 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
\r
38 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
\r
39 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
\r
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
\r
41 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\r
45 #ifndef _COMPILER_H_
\r
46 #define _COMPILER_H_
\r
48 #if (__GNUC__ && __AVR32__) || (__ICCAVR32__ || __AAVR32__)
\r
49 # include <avr32/io.h>
\r
52 # include <intrinsics.h>
\r
54 #include "preprocessor.h"
\r
57 //_____ D E C L A R A T I O N S ____________________________________________
\r
59 #ifdef __AVR32_ABI_COMPILER__ // Automatically defined when compiling for AVR32, not when assembling.
\r
67 /*! \name Compiler Keywords
\r
69 * Port of some keywords from GNU GCC for AVR32 to IAR Embedded Workbench for Atmel AVR32.
\r
73 #define __inline__ inline
\r
74 #define __volatile__
\r
80 /*! \name Usual Types
\r
83 typedef unsigned char Bool; //!< Boolean.
\r
84 typedef unsigned char U8 ; //!< 8-bit unsigned integer.
\r
85 typedef unsigned short int U16; //!< 16-bit unsigned integer.
\r
86 typedef unsigned long int U32; //!< 32-bit unsigned integer.
\r
87 typedef unsigned long long int U64; //!< 64-bit unsigned integer.
\r
88 typedef signed char S8 ; //!< 8-bit signed integer.
\r
89 typedef signed short int S16; //!< 16-bit signed integer.
\r
90 typedef signed long int S32; //!< 32-bit signed integer.
\r
91 typedef signed long long int S64; //!< 64-bit signed integer.
\r
92 typedef float F32; //!< 32-bit floating-point number.
\r
93 typedef double F64; //!< 64-bit floating-point number.
\r
97 /*! \name Status Types
\r
100 typedef Bool Status_bool_t; //!< Boolean status.
\r
101 typedef U8 Status_t; //!< 8-bit-coded status.
\r
105 /*! \name Aliasing Aggregate Types
\r
133 //! Union of pointers to 64-, 32-, 16- and 8-bit unsigned integers.
\r
142 //! Union of pointers to volatile 64-, 32-, 16- and 8-bit unsigned integers.
\r
145 volatile U64 *u64ptr;
\r
146 volatile U32 *u32ptr;
\r
147 volatile U16 *u16ptr;
\r
148 volatile U8 *u8ptr ;
\r
151 //! Union of pointers to constant 64-, 32-, 16- and 8-bit unsigned integers.
\r
160 //! Union of pointers to constant volatile 64-, 32-, 16- and 8-bit unsigned integers.
\r
163 const volatile U64 *u64ptr;
\r
164 const volatile U32 *u32ptr;
\r
165 const volatile U16 *u16ptr;
\r
166 const volatile U8 *u8ptr ;
\r
169 //! Structure of pointers to 64-, 32-, 16- and 8-bit unsigned integers.
\r
178 //! Structure of pointers to volatile 64-, 32-, 16- and 8-bit unsigned integers.
\r
181 volatile U64 *u64ptr;
\r
182 volatile U32 *u32ptr;
\r
183 volatile U16 *u16ptr;
\r
184 volatile U8 *u8ptr ;
\r
187 //! Structure of pointers to constant 64-, 32-, 16- and 8-bit unsigned integers.
\r
196 //! Structure of pointers to constant volatile 64-, 32-, 16- and 8-bit unsigned integers.
\r
199 const volatile U64 *u64ptr;
\r
200 const volatile U32 *u32ptr;
\r
201 const volatile U16 *u16ptr;
\r
202 const volatile U8 *u8ptr ;
\r
207 #endif // __AVR32_ABI_COMPILER__
\r
210 //_____ M A C R O S ________________________________________________________
\r
212 /*! \name Usual Constants
\r
234 #ifdef __AVR32_ABI_COMPILER__ // Automatically defined when compiling for AVR32, not when assembling.
\r
236 /*! \name Bit-Field Handling
\r
240 /*! \brief Reads the bits of a value specified by a given bit-mask.
\r
242 * \param value Value to read bits from.
\r
243 * \param mask Bit-mask indicating bits to read.
\r
245 * \return Read bits.
\r
247 #define Rd_bits( value, mask) ((value) & (mask))
\r
249 /*! \brief Writes the bits of a C lvalue specified by a given bit-mask.
\r
251 * \param lvalue C lvalue to write bits to.
\r
252 * \param mask Bit-mask indicating bits to write.
\r
253 * \param bits Bits to write.
\r
255 * \return Resulting value with written bits.
\r
257 #define Wr_bits(lvalue, mask, bits) ((lvalue) = ((lvalue) & ~(mask)) |\
\r
258 ((bits ) & (mask)))
\r
260 /*! \brief Tests the bits of a value specified by a given bit-mask.
\r
262 * \param value Value of which to test bits.
\r
263 * \param mask Bit-mask indicating bits to test.
\r
265 * \return \c 1 if at least one of the tested bits is set, else \c 0.
\r
267 #define Tst_bits( value, mask) (Rd_bits(value, mask) != 0)
\r
269 /*! \brief Clears the bits of a C lvalue specified by a given bit-mask.
\r
271 * \param lvalue C lvalue of which to clear bits.
\r
272 * \param mask Bit-mask indicating bits to clear.
\r
274 * \return Resulting value with cleared bits.
\r
276 #define Clr_bits(lvalue, mask) ((lvalue) &= ~(mask))
\r
278 /*! \brief Sets the bits of a C lvalue specified by a given bit-mask.
\r
280 * \param lvalue C lvalue of which to set bits.
\r
281 * \param mask Bit-mask indicating bits to set.
\r
283 * \return Resulting value with set bits.
\r
285 #define Set_bits(lvalue, mask) ((lvalue) |= (mask))
\r
287 /*! \brief Toggles the bits of a C lvalue specified by a given bit-mask.
\r
289 * \param lvalue C lvalue of which to toggle bits.
\r
290 * \param mask Bit-mask indicating bits to toggle.
\r
292 * \return Resulting value with toggled bits.
\r
294 #define Tgl_bits(lvalue, mask) ((lvalue) ^= (mask))
\r
296 /*! \brief Reads the bit-field of a value specified by a given bit-mask.
\r
298 * \param value Value to read a bit-field from.
\r
299 * \param mask Bit-mask indicating the bit-field to read.
\r
301 * \return Read bit-field.
\r
303 #define Rd_bitfield( value, mask) (Rd_bits( value, mask) >> ctz(mask))
\r
305 /*! \brief Writes the bit-field of a C lvalue specified by a given bit-mask.
\r
307 * \param lvalue C lvalue to write a bit-field to.
\r
308 * \param mask Bit-mask indicating the bit-field to write.
\r
309 * \param bitfield Bit-field to write.
\r
311 * \return Resulting value with written bit-field.
\r
313 #define Wr_bitfield(lvalue, mask, bitfield) (Wr_bits(lvalue, mask, (U32)(bitfield) << ctz(mask)))
\r
318 /*! \brief This macro is used to test fatal errors.
\r
320 * The macro tests if the expression is FALSE. If it is, a fatal error is
\r
321 * detected and the application hangs up.
\r
323 * \param expr Expression to evaluate and supposed to be nonzero.
\r
325 #ifdef _ASSERT_ENABLE_
\r
326 #define Assert(expr) \
\r
328 if (!(expr)) while (TRUE);\
\r
331 #define Assert(expr)
\r
335 /*! \name Zero-Bit Counting
\r
337 * Under AVR32-GCC, __builtin_clz and __builtin_ctz behave like macros when
\r
338 * applied to constant expressions (values known at compile time), so they are
\r
339 * more optimized than the use of the corresponding assembly instructions and
\r
340 * they can be used as constant expressions e.g. to initialize objects having
\r
341 * static storage duration, and like the corresponding assembly instructions
\r
342 * when applied to non-constant expressions (values unknown at compile time), so
\r
343 * they are more optimized than an assembly periphrasis. Hence, clz and ctz
\r
344 * ensure a possible and optimized behavior for both constant and non-constant
\r
349 /*! \brief Counts the leading zero bits of the given value considered as a 32-bit integer.
\r
351 * \param u Value of which to count the leading zero bits.
\r
353 * \return The count of leading zero bits in \a u.
\r
356 #define clz(u) __builtin_clz(u)
\r
358 #define clz(u) __count_leading_zeros(u)
\r
361 /*! \brief Counts the trailing zero bits of the given value considered as a 32-bit integer.
\r
363 * \param u Value of which to count the trailing zero bits.
\r
365 * \return The count of trailing zero bits in \a u.
\r
368 #define ctz(u) __builtin_ctz(u)
\r
370 #define ctz(u) __count_trailing_zeros(u)
\r
376 /*! \name Alignment
\r
380 /*! \brief Tests alignment of the number \a val with the \a n boundary.
\r
382 * \param val Input value.
\r
383 * \param n Boundary.
\r
385 * \return \c 1 if the number \a val is aligned with the \a n boundary, else \c 0.
\r
387 #define Test_align(val, n ) (!Tst_bits( val, (n) - 1 ) )
\r
389 /*! \brief Gets alignment of the number \a val with respect to the \a n boundary.
\r
391 * \param val Input value.
\r
392 * \param n Boundary.
\r
394 * \return Alignment of the number \a val with respect to the \a n boundary.
\r
396 #define Get_align( val, n ) ( Rd_bits( val, (n) - 1 ) )
\r
398 /*! \brief Sets alignment of the lvalue number \a lval to \a alg with respect to the \a n boundary.
\r
400 * \param lval Input/output lvalue.
\r
401 * \param n Boundary.
\r
402 * \param alg Alignment.
\r
404 * \return New value of \a lval resulting from its alignment set to \a alg with respect to the \a n boundary.
\r
406 #define Set_align(lval, n, alg) ( Wr_bits(lval, (n) - 1, alg) )
\r
408 /*! \brief Aligns the number \a val with the upper \a n boundary.
\r
410 * \param val Input value.
\r
411 * \param n Boundary.
\r
413 * \return Value resulting from the number \a val aligned with the upper \a n boundary.
\r
415 #define Align_up( val, n ) (((val) + ((n) - 1)) & ~((n) - 1))
\r
417 /*! \brief Aligns the number \a val with the lower \a n boundary.
\r
419 * \param val Input value.
\r
420 * \param n Boundary.
\r
422 * \return Value resulting from the number \a val aligned with the lower \a n boundary.
\r
424 #define Align_down(val, n ) ( (val) & ~((n) - 1))
\r
429 /*! \name Mathematics
\r
431 * The same considerations as for clz and ctz apply here but AVR32-GCC does not
\r
432 * provide built-in functions to access the assembly instructions abs, min and
\r
433 * max and it does not produce them by itself in most cases, so two sets of
\r
434 * macros are defined here:
\r
435 * - Abs, Min and Max to apply to constant expressions (values known at
\r
437 * - abs, min and max to apply to non-constant expressions (values unknown at
\r
442 /*! \brief Takes the absolute value of \a a.
\r
444 * \param a Input value.
\r
446 * \return Absolute value of \a a.
\r
448 * \note More optimized if only used with values known at compile time.
\r
450 #define Abs(a) (((a) < 0 ) ? -(a) : (a))
\r
452 /*! \brief Takes the minimal value of \a a and \a b.
\r
454 * \param a Input value.
\r
455 * \param b Input value.
\r
457 * \return Minimal value of \a a and \a b.
\r
459 * \note More optimized if only used with values known at compile time.
\r
461 #define Min(a, b) (((a) < (b)) ? (a) : (b))
\r
463 /*! \brief Takes the maximal value of \a a and \a b.
\r
465 * \param a Input value.
\r
466 * \param b Input value.
\r
468 * \return Maximal value of \a a and \a b.
\r
470 * \note More optimized if only used with values known at compile time.
\r
472 #define Max(a, b) (((a) > (b)) ? (a) : (b))
\r
474 /*! \brief Takes the absolute value of \a a.
\r
476 * \param a Input value.
\r
478 * \return Absolute value of \a a.
\r
480 * \note More optimized if only used with values unknown at compile time.
\r
486 int __value = (a);\
\r
487 __asm__ ("abs\t%0" : "+r" (__value) : : "cc");\
\r
492 #define abs(a) Abs(a)
\r
495 /*! \brief Takes the minimal value of \a a and \a b.
\r
497 * \param a Input value.
\r
498 * \param b Input value.
\r
500 * \return Minimal value of \a a and \a b.
\r
502 * \note More optimized if only used with values unknown at compile time.
\r
505 #define min(a, b) \
\r
508 int __value, __arg_a = (a), __arg_b = (b);\
\r
509 __asm__ ("min\t%0, %1, %2" : "=r" (__value) : "r" (__arg_a), "r" (__arg_b));\
\r
514 #define min(a, b) __min(a, b)
\r
517 /*! \brief Takes the maximal value of \a a and \a b.
\r
519 * \param a Input value.
\r
520 * \param b Input value.
\r
522 * \return Maximal value of \a a and \a b.
\r
524 * \note More optimized if only used with values unknown at compile time.
\r
527 #define max(a, b) \
\r
530 int __value, __arg_a = (a), __arg_b = (b);\
\r
531 __asm__ ("max\t%0, %1, %2" : "=r" (__value) : "r" (__arg_a), "r" (__arg_b));\
\r
536 #define max(a, b) __max(a, b)
\r
542 /*! \brief Calls the routine at address \a addr.
\r
544 * It generates a long call opcode.
\r
546 * For example, `Long_call(0x80000000)' generates a software reset on a UC3 if
\r
547 * it is invoked from the CPU supervisor mode.
\r
549 * \param addr Address of the routine to call.
\r
551 * \note It may be used as a long jump opcode in some special cases.
\r
553 #define Long_call(addr) ((*(void (*)(void))(addr))())
\r
555 /*! \brief Resets the CPU by software.
\r
557 * \warning It shall not be called from the CPU application mode.
\r
560 #define Reset_CPU() \
\r
563 __asm__ __volatile__ (\
\r
564 "lddpc r9, 3f\n\t"\
\r
565 "mfsr r8, %[SR]\n\t"\
\r
566 "bfextu r8, r8, %[SR_MX_OFFSET], %[SR_MX_SIZE]\n\t"\
\r
567 "cp.w r8, 0b001\n\t"\
\r
569 "sub r8, pc, $ - 1f\n\t"\
\r
573 "mtsr %[SR], r9\n"\
\r
589 "stdsp sp[0], sp\n\t"\
\r
590 "ldmts sp, sp\n\t"\
\r
592 "lddpc pc, 2f\n\t"\
\r
597 ".word %[RESET_SR]"\
\r
599 : [SR] "i" (AVR32_SR),\
\r
600 [SR_MX_OFFSET] "i" (AVR32_SR_M0_OFFSET),\
\r
601 [SR_MX_SIZE] "i" (AVR32_SR_M0_SIZE + AVR32_SR_M1_SIZE + AVR32_SR_M2_SIZE),\
\r
602 [RESET_SR] "i" (AVR32_SR_GM_MASK | AVR32_SR_EM_MASK | AVR32_SR_M0_MASK)\
\r
607 #define Reset_CPU() \
\r
609 extern void *volatile __program_start;\
\r
610 __asm__ __volatile__ (\
\r
611 "mov r7, LWRD(__program_start)\n\t"\
\r
612 "orh r7, HWRD(__program_start)\n\t"\
\r
613 "mov r9, LWRD("ASTRINGZ(AVR32_SR_GM_MASK | AVR32_SR_EM_MASK | AVR32_SR_M0_MASK)")\n\t"\
\r
614 "orh r9, HWRD("ASTRINGZ(AVR32_SR_GM_MASK | AVR32_SR_EM_MASK | AVR32_SR_M0_MASK)")\n\t"\
\r
615 "mfsr r8, "ASTRINGZ(AVR32_SR)"\n\t"\
\r
616 "bfextu r8, r8, "ASTRINGZ(AVR32_SR_M0_OFFSET)", "ASTRINGZ(AVR32_SR_M0_SIZE + AVR32_SR_M1_SIZE + AVR32_SR_M2_SIZE)"\n\t"\
\r
617 "cp.w r8, 001b\n\t"\
\r
619 "sub r8, pc, -12\n\t"\
\r
622 "mtsr "ASTRINGZ(AVR32_SR)", r9\n\t"\
\r
630 "st.w r0[4], r7\n\t"\
\r
638 "stdsp sp[0], sp\n\t"\
\r
639 "ldmts sp, sp\n\t"\
\r
648 /*! \name System Register Access
\r
652 /*! \brief Gets the value of the \a sysreg system register.
\r
654 * \param sysreg Address of the system register of which to get the value.
\r
656 * \return Value of the \a sysreg system register.
\r
659 #define Get_system_register(sysreg) __builtin_mfsr(sysreg)
\r
661 #define Get_system_register(sysreg) __get_system_register(sysreg)
\r
664 /*! \brief Sets the value of the \a sysreg system register to \a value.
\r
666 * \param sysreg Address of the system register of which to set the value.
\r
667 * \param value Value to set the \a sysreg system register to.
\r
670 #define Set_system_register(sysreg, value) __builtin_mtsr(sysreg, value)
\r
672 #define Set_system_register(sysreg, value) __set_system_register(sysreg, value)
\r
678 /*! \name CPU Status Register Access
\r
682 /*! \brief Tells whether exceptions are globally enabled.
\r
684 * \return \c 1 if exceptions are globally enabled, else \c 0.
\r
686 #define Is_global_exception_enabled() (!Tst_bits(Get_system_register(AVR32_SR), AVR32_SR_EM_MASK))
\r
688 /*! \brief Disables exceptions globally.
\r
691 #define Disable_global_exception() ({__asm__ __volatile__ ("ssrf\t%0" : : "i" (AVR32_SR_EM_OFFSET));})
\r
693 #define Disable_global_exception() (__set_status_flag(AVR32_SR_EM_OFFSET))
\r
696 /*! \brief Enables exceptions globally.
\r
699 #define Enable_global_exception() ({__asm__ __volatile__ ("csrf\t%0" : : "i" (AVR32_SR_EM_OFFSET));})
\r
701 #define Enable_global_exception() (__clear_status_flag(AVR32_SR_EM_OFFSET))
\r
704 /*! \brief Tells whether interrupts are globally enabled.
\r
706 * \return \c 1 if interrupts are globally enabled, else \c 0.
\r
708 #define Is_global_interrupt_enabled() (!Tst_bits(Get_system_register(AVR32_SR), AVR32_SR_GM_MASK))
\r
710 /*! \brief Disables interrupts globally.
\r
713 #define Disable_global_interrupt() ({__asm__ __volatile__ ("ssrf\t%0\n\tnop\n\tnop" : : "i" (AVR32_SR_GM_OFFSET));})
\r
715 #define Disable_global_interrupt() {__asm__ __volatile__ ("ssrf\t"ASTRINGZ(AVR32_SR_GM_OFFSET)"\n\tnop\n\tnop");}
\r
718 /*! \brief Enables interrupts globally.
\r
721 #define Enable_global_interrupt() ({__asm__ __volatile__ ("csrf\t%0" : : "i" (AVR32_SR_GM_OFFSET));})
\r
723 #define Enable_global_interrupt() (__enable_interrupt())
\r
726 /*! \brief Tells whether interrupt level \a int_lev is enabled.
\r
728 * \param int_lev Interrupt level (0 to 3).
\r
730 * \return \c 1 if interrupt level \a int_lev is enabled, else \c 0.
\r
732 #define Is_interrupt_level_enabled(int_lev) (!Tst_bits(Get_system_register(AVR32_SR), TPASTE3(AVR32_SR_I, int_lev, M_MASK)))
\r
734 /*! \brief Disables interrupt level \a int_lev.
\r
736 * \param int_lev Interrupt level to disable (0 to 3).
\r
739 #define Disable_interrupt_level(int_lev) ({__asm__ __volatile__ ("ssrf\t%0\n\tnop\n\tnop" : : "i" (TPASTE3(AVR32_SR_I, int_lev, M_OFFSET)));})
\r
741 #define Disable_interrupt_level(int_lev) {__asm__ __volatile__ ("ssrf\t"ASTRINGZ(TPASTE3(AVR32_SR_I, int_lev, M_OFFSET))"\n\tnop\n\tnop");}
\r
744 /*! \brief Enables interrupt level \a int_lev.
\r
746 * \param int_lev Interrupt level to enable (0 to 3).
\r
749 #define Enable_interrupt_level(int_lev) ({__asm__ __volatile__ ("csrf\t%0" : : "i" (TPASTE3(AVR32_SR_I, int_lev, M_OFFSET)));})
\r
751 #define Enable_interrupt_level(int_lev) (__clear_status_flag(TPASTE3(AVR32_SR_I, int_lev, M_OFFSET)))
\r
757 /*! \name Debug Register Access
\r
761 /*! \brief Gets the value of the \a dbgreg debug register.
\r
763 * \param dbgreg Address of the debug register of which to get the value.
\r
765 * \return Value of the \a dbgreg debug register.
\r
768 #define Get_debug_register(dbgreg) __builtin_mfdr(dbgreg)
\r
770 #define Get_debug_register(dbgreg) __get_debug_register(dbgreg)
\r
773 /*! \brief Sets the value of the \a dbgreg debug register to \a value.
\r
775 * \param dbgreg Address of the debug register of which to set the value.
\r
776 * \param value Value to set the \a dbgreg debug register to.
\r
779 #define Set_debug_register(dbgreg, value) __builtin_mtdr(dbgreg, value)
\r
781 #define Set_debug_register(dbgreg, value) __set_debug_register(dbgreg, value)
\r
786 #endif // __AVR32_ABI_COMPILER__
\r
789 //! Boolean evaluating MCU little endianism.
\r
790 #if (__GNUC__ && __AVR32__) || (__ICCAVR32__ || __AAVR32__)
\r
791 #define LITTLE_ENDIAN_MCU FALSE
\r
794 // Check that MCU endianism is correctly defined.
\r
795 #ifndef LITTLE_ENDIAN_MCU
\r
796 #error YOU MUST define the MCU endianism with LITTLE_ENDIAN_MCU: either FALSE or TRUE
\r
799 //! Boolean evaluating MCU big endianism.
\r
800 #define BIG_ENDIAN_MCU (!LITTLE_ENDIAN_MCU)
\r
803 #ifdef __AVR32_ABI_COMPILER__ // Automatically defined when compiling for AVR32, not when assembling.
\r
805 /*! \name MCU Endianism Handling
\r
809 #if LITTLE_ENDIAN_MCU
\r
811 #define LSB(u16) (((U8 *)&(u16))[0]) //!< Least significant byte of \a u16.
\r
812 #define MSB(u16) (((U8 *)&(u16))[1]) //!< Most significant byte of \a u16.
\r
814 #define LSH(u32) (((U16 *)&(u32))[0]) //!< Least significant half-word of \a u32.
\r
815 #define MSH(u32) (((U16 *)&(u32))[1]) //!< Most significant half-word of \a u32.
\r
816 #define LSB0W(u32) (((U8 *)&(u32))[0]) //!< Least significant byte of 1st rank of \a u32.
\r
817 #define LSB1W(u32) (((U8 *)&(u32))[1]) //!< Least significant byte of 2nd rank of \a u32.
\r
818 #define LSB2W(u32) (((U8 *)&(u32))[2]) //!< Least significant byte of 3rd rank of \a u32.
\r
819 #define LSB3W(u32) (((U8 *)&(u32))[3]) //!< Least significant byte of 4th rank of \a u32.
\r
820 #define MSB3W(u32) LSB0W(u32) //!< Most significant byte of 4th rank of \a u32.
\r
821 #define MSB2W(u32) LSB1W(u32) //!< Most significant byte of 3rd rank of \a u32.
\r
822 #define MSB1W(u32) LSB2W(u32) //!< Most significant byte of 2nd rank of \a u32.
\r
823 #define MSB0W(u32) LSB3W(u32) //!< Most significant byte of 1st rank of \a u32.
\r
825 #define LSW(u64) (((U32 *)&(u64))[0]) //!< Least significant word of \a u64.
\r
826 #define MSW(u64) (((U32 *)&(u64))[1]) //!< Most significant word of \a u64.
\r
827 #define LSH0(u64) (((U16 *)&(u64))[0]) //!< Least significant half-word of 1st rank of \a u64.
\r
828 #define LSH1(u64) (((U16 *)&(u64))[1]) //!< Least significant half-word of 2nd rank of \a u64.
\r
829 #define LSH2(u64) (((U16 *)&(u64))[2]) //!< Least significant half-word of 3rd rank of \a u64.
\r
830 #define LSH3(u64) (((U16 *)&(u64))[3]) //!< Least significant half-word of 4th rank of \a u64.
\r
831 #define MSH3(u64) LSH0(u64) //!< Most significant half-word of 4th rank of \a u64.
\r
832 #define MSH2(u64) LSH1(u64) //!< Most significant half-word of 3rd rank of \a u64.
\r
833 #define MSH1(u64) LSH2(u64) //!< Most significant half-word of 2nd rank of \a u64.
\r
834 #define MSH0(u64) LSH3(u64) //!< Most significant half-word of 1st rank of \a u64.
\r
835 #define LSB0D(u64) (((U8 *)&(u64))[0]) //!< Least significant byte of 1st rank of \a u64.
\r
836 #define LSB1D(u64) (((U8 *)&(u64))[1]) //!< Least significant byte of 2nd rank of \a u64.
\r
837 #define LSB2D(u64) (((U8 *)&(u64))[2]) //!< Least significant byte of 3rd rank of \a u64.
\r
838 #define LSB3D(u64) (((U8 *)&(u64))[3]) //!< Least significant byte of 4th rank of \a u64.
\r
839 #define LSB4D(u64) (((U8 *)&(u64))[4]) //!< Least significant byte of 5th rank of \a u64.
\r
840 #define LSB5D(u64) (((U8 *)&(u64))[5]) //!< Least significant byte of 6th rank of \a u64.
\r
841 #define LSB6D(u64) (((U8 *)&(u64))[6]) //!< Least significant byte of 7th rank of \a u64.
\r
842 #define LSB7D(u64) (((U8 *)&(u64))[7]) //!< Least significant byte of 8th rank of \a u64.
\r
843 #define MSB7D(u64) LSB0D(u64) //!< Most significant byte of 8th rank of \a u64.
\r
844 #define MSB6D(u64) LSB1D(u64) //!< Most significant byte of 7th rank of \a u64.
\r
845 #define MSB5D(u64) LSB2D(u64) //!< Most significant byte of 6th rank of \a u64.
\r
846 #define MSB4D(u64) LSB3D(u64) //!< Most significant byte of 5th rank of \a u64.
\r
847 #define MSB3D(u64) LSB4D(u64) //!< Most significant byte of 4th rank of \a u64.
\r
848 #define MSB2D(u64) LSB5D(u64) //!< Most significant byte of 3rd rank of \a u64.
\r
849 #define MSB1D(u64) LSB6D(u64) //!< Most significant byte of 2nd rank of \a u64.
\r
850 #define MSB0D(u64) LSB7D(u64) //!< Most significant byte of 1st rank of \a u64.
\r
852 #else // BIG_ENDIAN_MCU
\r
854 #define MSB(u16) (((U8 *)&(u16))[0]) //!< Most significant byte of \a u16.
\r
855 #define LSB(u16) (((U8 *)&(u16))[1]) //!< Least significant byte of \a u16.
\r
857 #define MSH(u32) (((U16 *)&(u32))[0]) //!< Most significant half-word of \a u32.
\r
858 #define LSH(u32) (((U16 *)&(u32))[1]) //!< Least significant half-word of \a u32.
\r
859 #define MSB0W(u32) (((U8 *)&(u32))[0]) //!< Most significant byte of 1st rank of \a u32.
\r
860 #define MSB1W(u32) (((U8 *)&(u32))[1]) //!< Most significant byte of 2nd rank of \a u32.
\r
861 #define MSB2W(u32) (((U8 *)&(u32))[2]) //!< Most significant byte of 3rd rank of \a u32.
\r
862 #define MSB3W(u32) (((U8 *)&(u32))[3]) //!< Most significant byte of 4th rank of \a u32.
\r
863 #define LSB3W(u32) MSB0W(u32) //!< Least significant byte of 4th rank of \a u32.
\r
864 #define LSB2W(u32) MSB1W(u32) //!< Least significant byte of 3rd rank of \a u32.
\r
865 #define LSB1W(u32) MSB2W(u32) //!< Least significant byte of 2nd rank of \a u32.
\r
866 #define LSB0W(u32) MSB3W(u32) //!< Least significant byte of 1st rank of \a u32.
\r
868 #define MSW(u64) (((U32 *)&(u64))[0]) //!< Most significant word of \a u64.
\r
869 #define LSW(u64) (((U32 *)&(u64))[1]) //!< Least significant word of \a u64.
\r
870 #define MSH0(u64) (((U16 *)&(u64))[0]) //!< Most significant half-word of 1st rank of \a u64.
\r
871 #define MSH1(u64) (((U16 *)&(u64))[1]) //!< Most significant half-word of 2nd rank of \a u64.
\r
872 #define MSH2(u64) (((U16 *)&(u64))[2]) //!< Most significant half-word of 3rd rank of \a u64.
\r
873 #define MSH3(u64) (((U16 *)&(u64))[3]) //!< Most significant half-word of 4th rank of \a u64.
\r
874 #define LSH3(u64) MSH0(u64) //!< Least significant half-word of 4th rank of \a u64.
\r
875 #define LSH2(u64) MSH1(u64) //!< Least significant half-word of 3rd rank of \a u64.
\r
876 #define LSH1(u64) MSH2(u64) //!< Least significant half-word of 2nd rank of \a u64.
\r
877 #define LSH0(u64) MSH3(u64) //!< Least significant half-word of 1st rank of \a u64.
\r
878 #define MSB0D(u64) (((U8 *)&(u64))[0]) //!< Most significant byte of 1st rank of \a u64.
\r
879 #define MSB1D(u64) (((U8 *)&(u64))[1]) //!< Most significant byte of 2nd rank of \a u64.
\r
880 #define MSB2D(u64) (((U8 *)&(u64))[2]) //!< Most significant byte of 3rd rank of \a u64.
\r
881 #define MSB3D(u64) (((U8 *)&(u64))[3]) //!< Most significant byte of 4th rank of \a u64.
\r
882 #define MSB4D(u64) (((U8 *)&(u64))[4]) //!< Most significant byte of 5th rank of \a u64.
\r
883 #define MSB5D(u64) (((U8 *)&(u64))[5]) //!< Most significant byte of 6th rank of \a u64.
\r
884 #define MSB6D(u64) (((U8 *)&(u64))[6]) //!< Most significant byte of 7th rank of \a u64.
\r
885 #define MSB7D(u64) (((U8 *)&(u64))[7]) //!< Most significant byte of 8th rank of \a u64.
\r
886 #define LSB7D(u64) MSB0D(u64) //!< Least significant byte of 8th rank of \a u64.
\r
887 #define LSB6D(u64) MSB1D(u64) //!< Least significant byte of 7th rank of \a u64.
\r
888 #define LSB5D(u64) MSB2D(u64) //!< Least significant byte of 6th rank of \a u64.
\r
889 #define LSB4D(u64) MSB3D(u64) //!< Least significant byte of 5th rank of \a u64.
\r
890 #define LSB3D(u64) MSB4D(u64) //!< Least significant byte of 4th rank of \a u64.
\r
891 #define LSB2D(u64) MSB5D(u64) //!< Least significant byte of 3rd rank of \a u64.
\r
892 #define LSB1D(u64) MSB6D(u64) //!< Least significant byte of 2nd rank of \a u64.
\r
893 #define LSB0D(u64) MSB7D(u64) //!< Least significant byte of 1st rank of \a u64.
\r
900 /*! \name Endianism Conversion
\r
902 * The same considerations as for clz and ctz apply here but AVR32-GCC's
\r
903 * __builtin_bswap_16 and __builtin_bswap_32 do not behave like macros when
\r
904 * applied to constant expressions, so two sets of macros are defined here:
\r
905 * - Swap16, Swap32 and Swap64 to apply to constant expressions (values known
\r
906 * at compile time);
\r
907 * - swap16, swap32 and swap64 to apply to non-constant expressions (values
\r
908 * unknown at compile time).
\r
912 /*! \brief Toggles the endianism of \a u16 (by swapping its bytes).
\r
914 * \param u16 U16 of which to toggle the endianism.
\r
916 * \return Value resulting from \a u16 with toggled endianism.
\r
918 * \note More optimized if only used with values known at compile time.
\r
920 #define Swap16(u16) ((U16)(((U16)(u16) >> 8) |\
\r
921 ((U16)(u16) << 8)))
\r
923 /*! \brief Toggles the endianism of \a u32 (by swapping its bytes).
\r
925 * \param u32 U32 of which to toggle the endianism.
\r
927 * \return Value resulting from \a u32 with toggled endianism.
\r
929 * \note More optimized if only used with values known at compile time.
\r
931 #define Swap32(u32) ((U32)(((U32)Swap16((U32)(u32) >> 16)) |\
\r
932 ((U32)Swap16((U32)(u32)) << 16)))
\r
934 /*! \brief Toggles the endianism of \a u64 (by swapping its bytes).
\r
936 * \param u64 U64 of which to toggle the endianism.
\r
938 * \return Value resulting from \a u64 with toggled endianism.
\r
940 * \note More optimized if only used with values known at compile time.
\r
942 #define Swap64(u64) ((U64)(((U64)Swap32((U64)(u64) >> 32)) |\
\r
943 ((U64)Swap32((U64)(u64)) << 32)))
\r
945 /*! \brief Toggles the endianism of \a u16 (by swapping its bytes).
\r
947 * \param u16 U16 of which to toggle the endianism.
\r
949 * \return Value resulting from \a u16 with toggled endianism.
\r
951 * \note More optimized if only used with values unknown at compile time.
\r
954 #define swap16(u16) ((U16)__builtin_bswap_16((U16)(u16)))
\r
956 #define swap16(u16) ((U16)__swap_bytes_in_halfwords((U16)(u16)))
\r
959 /*! \brief Toggles the endianism of \a u32 (by swapping its bytes).
\r
961 * \param u32 U32 of which to toggle the endianism.
\r
963 * \return Value resulting from \a u32 with toggled endianism.
\r
965 * \note More optimized if only used with values unknown at compile time.
\r
968 #define swap32(u32) ((U32)__builtin_bswap_32((U32)(u32)))
\r
970 #define swap32(u32) ((U32)__swap_bytes((U32)(u32)))
\r
973 /*! \brief Toggles the endianism of \a u64 (by swapping its bytes).
\r
975 * \param u64 U64 of which to toggle the endianism.
\r
977 * \return Value resulting from \a u64 with toggled endianism.
\r
979 * \note More optimized if only used with values unknown at compile time.
\r
981 #define swap64(u64) ((U64)(((U64)swap32((U64)(u64) >> 32)) |\
\r
982 ((U64)swap32((U64)(u64)) << 32)))
\r
987 /*! \name Target Abstraction
\r
991 #define _GLOBEXT_ extern //!< extern storage-class specifier.
\r
992 #define _CONST_TYPE_ const //!< const type qualifier.
\r
993 #define _MEM_TYPE_SLOW_ //!< Slow memory type.
\r
994 #define _MEM_TYPE_MEDFAST_ //!< Fairly fast memory type.
\r
995 #define _MEM_TYPE_FAST_ //!< Fast memory type.
\r
997 typedef U8 Byte; //!< 8-bit unsigned integer.
\r
999 #define memcmp_ram2ram memcmp //!< Target-specific memcmp of RAM to RAM.
\r
1000 #define memcmp_code2ram memcmp //!< Target-specific memcmp of RAM to NVRAM.
\r
1001 #define memcpy_ram2ram memcpy //!< Target-specific memcpy from RAM to RAM.
\r
1002 #define memcpy_code2ram memcpy //!< Target-specific memcpy from NVRAM to RAM.
\r
1004 #define LSB0(u32) LSB0W(u32) //!< Least significant byte of 1st rank of \a u32.
\r
1005 #define LSB1(u32) LSB1W(u32) //!< Least significant byte of 2nd rank of \a u32.
\r
1006 #define LSB2(u32) LSB2W(u32) //!< Least significant byte of 3rd rank of \a u32.
\r
1007 #define LSB3(u32) LSB3W(u32) //!< Least significant byte of 4th rank of \a u32.
\r
1008 #define MSB3(u32) MSB3W(u32) //!< Most significant byte of 4th rank of \a u32.
\r
1009 #define MSB2(u32) MSB2W(u32) //!< Most significant byte of 3rd rank of \a u32.
\r
1010 #define MSB1(u32) MSB1W(u32) //!< Most significant byte of 2nd rank of \a u32.
\r
1011 #define MSB0(u32) MSB0W(u32) //!< Most significant byte of 1st rank of \a u32.
\r
1015 #endif // __AVR32_ABI_COMPILER__
\r
1018 #endif // _COMPILER_H_
\r