1 /*******************************************************************************
\r
2 * (c) Copyright 2016-2018 Microsemi SoC Products Group. All rights reserved.
\r
5 * @author Microsemi SoC Products Group
\r
6 * @brief Mi-V soft processor register bit mask and shift constants encodings.
\r
8 * SVN $Revision: 9825 $
\r
9 * SVN $Date: 2018-03-19 10:31:41 +0530 (Mon, 19 Mar 2018) $
\r
11 #ifndef RISCV_CSR_ENCODING_H
\r
12 #define RISCV_CSR_ENCODING_H
\r
18 #define MSTATUS_UIE 0x00000001
\r
19 #define MSTATUS_SIE 0x00000002
\r
20 #define MSTATUS_HIE 0x00000004
\r
21 #define MSTATUS_MIE 0x00000008
\r
22 #define MSTATUS_UPIE 0x00000010
\r
23 #define MSTATUS_SPIE 0x00000020
\r
24 #define MSTATUS_HPIE 0x00000040
\r
25 #define MSTATUS_MPIE 0x00000080
\r
26 #define MSTATUS_SPP 0x00000100
\r
27 #define MSTATUS_HPP 0x00000600
\r
28 #define MSTATUS_MPP 0x00001800
\r
29 #define MSTATUS_FS 0x00006000
\r
30 #define MSTATUS_XS 0x00018000
\r
31 #define MSTATUS_MPRV 0x00020000
\r
32 #define MSTATUS_SUM 0x00040000 /*changed in v1.10*/
\r
33 #define MSTATUS_MXR 0x00080000 /*changed in v1.10*/
\r
34 #define MSTATUS_TVM 0x00100000 /*changed in v1.10*/
\r
35 #define MSTATUS_TW 0x00200000 /*changed in v1.10*/
\r
36 #define MSTATUS_TSR 0x00400000 /*changed in v1.10*/
\r
37 #define MSTATUS_RES 0x7F800000 /*changed in v1.10*/
\r
38 #define MSTATUS32_SD 0x80000000
\r
39 #define MSTATUS64_SD 0x8000000000000000
\r
41 #define MCAUSE32_CAUSE 0x7FFFFFFF
\r
42 #define MCAUSE64_CAUSE 0x7FFFFFFFFFFFFFFF
\r
43 #define MCAUSE32_INT 0x80000000
\r
44 #define MCAUSE64_INT 0x8000000000000000
\r
46 #define SSTATUS_UIE 0x00000001
\r
47 #define SSTATUS_SIE 0x00000002
\r
48 #define SSTATUS_UPIE 0x00000010
\r
49 #define SSTATUS_SPIE 0x00000020
\r
50 #define SSTATUS_SPP 0x00000100
\r
51 #define SSTATUS_FS 0x00006000
\r
52 #define SSTATUS_XS 0x00018000
\r
53 #define SSTATUS_PUM 0x00040000
\r
54 #define SSTATUS32_SD 0x80000000
\r
55 #define SSTATUS64_SD 0x8000000000000000
\r
57 #define MIP_SSIP (1u << IRQ_S_SOFT)
\r
58 #define MIP_HSIP (1u << IRQ_H_SOFT)
\r
59 #define MIP_MSIP (1u << IRQ_M_SOFT)
\r
60 #define MIP_STIP (1u << IRQ_S_TIMER)
\r
61 #define MIP_HTIP (1u << IRQ_H_TIMER)
\r
62 #define MIP_MTIP (1u << IRQ_M_TIMER)
\r
63 #define MIP_SEIP (1u << IRQ_S_EXT)
\r
64 #define MIP_HEIP (1u << IRQ_H_EXT)
\r
65 #define MIP_MEIP (1u << IRQ_M_EXT)
\r
67 #define SIP_SSIP MIP_SSIP
\r
68 #define SIP_STIP MIP_STIP
\r
82 #define IRQ_S_SOFT 1
\r
83 #define IRQ_H_SOFT 2
\r
84 #define IRQ_M_SOFT 3
\r
85 #define IRQ_S_TIMER 5
\r
86 #define IRQ_H_TIMER 6
\r
87 #define IRQ_M_TIMER 7
\r
89 #define IRQ_H_EXT 10
\r
90 #define IRQ_M_EXT 11
\r
92 #define DEFAULT_RSTVEC 0x00001000
\r
93 #define DEFAULT_NMIVEC 0x00001004
\r
94 #define DEFAULT_MTVEC 0x00001010
\r
95 #define CONFIG_STRING_ADDR 0x0000100C
\r
96 #define EXT_IO_BASE 0x40000000
\r
97 #define DRAM_BASE 0x80000000
\r
99 /* page table entry (PTE) fields */
\r
100 #define PTE_V 0x001 /* Valid */
\r
101 #define PTE_TYPE 0x01E /* Type */
\r
102 #define PTE_R 0x020 /* Referenced */
\r
103 #define PTE_D 0x040 /* Dirty */
\r
104 #define PTE_SOFT 0x380 /* Reserved for Software */
\r
106 #define PTE_TYPE_TABLE 0x00
\r
107 #define PTE_TYPE_TABLE_GLOBAL 0x02
\r
108 #define PTE_TYPE_URX_SR 0x04
\r
109 #define PTE_TYPE_URWX_SRW 0x06
\r
110 #define PTE_TYPE_UR_SR 0x08
\r
111 #define PTE_TYPE_URW_SRW 0x0A
\r
112 #define PTE_TYPE_URX_SRX 0x0C
\r
113 #define PTE_TYPE_URWX_SRWX 0x0E
\r
114 #define PTE_TYPE_SR 0x10
\r
115 #define PTE_TYPE_SRW 0x12
\r
116 #define PTE_TYPE_SRX 0x14
\r
117 #define PTE_TYPE_SRWX 0x16
\r
118 #define PTE_TYPE_SR_GLOBAL 0x18
\r
119 #define PTE_TYPE_SRW_GLOBAL 0x1A
\r
120 #define PTE_TYPE_SRX_GLOBAL 0x1C
\r
121 #define PTE_TYPE_SRWX_GLOBAL 0x1E
\r
123 #define PTE_PPN_SHIFT 10
\r
125 #define PTE_TABLE(PTE) ((0x0000000AU >> ((PTE) & 0x1F)) & 1)
\r
126 #define PTE_UR(PTE) ((0x0000AAA0U >> ((PTE) & 0x1F)) & 1)
\r
127 #define PTE_UW(PTE) ((0x00008880U >> ((PTE) & 0x1F)) & 1)
\r
128 #define PTE_UX(PTE) ((0x0000A0A0U >> ((PTE) & 0x1F)) & 1)
\r
129 #define PTE_SR(PTE) ((0xAAAAAAA0U >> ((PTE) & 0x1F)) & 1)
\r
130 #define PTE_SW(PTE) ((0x88888880U >> ((PTE) & 0x1F)) & 1)
\r
131 #define PTE_SX(PTE) ((0xA0A0A000U >> ((PTE) & 0x1F)) & 1)
\r
133 #define PTE_CHECK_PERM(PTE, SUPERVISOR, STORE, FETCH) \
\r
134 ((STORE) ? ((SUPERVISOR) ? PTE_SW(PTE) : PTE_UW(PTE)) : \
\r
135 (FETCH) ? ((SUPERVISOR) ? PTE_SX(PTE) : PTE_UX(PTE)) : \
\r
136 ((SUPERVISOR) ? PTE_SR(PTE) : PTE_UR(PTE)))
\r
140 #if __riscv_xlen == 64
\r
141 # define MSTATUS_SD MSTATUS64_SD
\r
142 # define SSTATUS_SD SSTATUS64_SD
\r
143 # define MCAUSE_INT MCAUSE64_INT
\r
144 # define MCAUSE_CAUSE MCAUSE64_CAUSE
\r
145 # define RISCV_PGLEVEL_BITS 9
\r
147 # define MSTATUS_SD MSTATUS32_SD
\r
148 # define SSTATUS_SD SSTATUS32_SD
\r
149 # define RISCV_PGLEVEL_BITS 10
\r
150 # define MCAUSE_INT MCAUSE32_INT
\r
151 # define MCAUSE_CAUSE MCAUSE32_CAUSE
\r
154 #define RISCV_PGSHIFT 12
\r
155 #define RISCV_PGSIZE (1 << RISCV_PGSHIFT)
\r
157 #ifndef __ASSEMBLER__
\r
161 #define read_csr(reg) ({ unsigned long __tmp; \
\r
162 asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \
\r
165 #define write_csr(reg, val) ({ \
\r
166 if (__builtin_constant_p(val) && (unsigned long)(val) < 32) \
\r
167 asm volatile ("csrw " #reg ", %0" :: "i"(val)); \
\r
169 asm volatile ("csrw " #reg ", %0" :: "r"(val)); })
\r
171 #define swap_csr(reg, val) ({ unsigned long __tmp; \
\r
172 if (__builtin_constant_p(val) && (unsigned long)(val) < 32) \
\r
173 asm volatile ("csrrw %0, " #reg ", %1" : "=r"(__tmp) : "i"(val)); \
\r
175 asm volatile ("csrrw %0, " #reg ", %1" : "=r"(__tmp) : "r"(val)); \
\r
178 #define set_csr(reg, bit) ({ unsigned long __tmp; \
\r
179 if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
\r
180 asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "i"(bit)); \
\r
182 asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "r"(bit)); \
\r
185 #define clear_csr(reg, bit) ({ unsigned long __tmp; \
\r
186 if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
\r
187 asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "i"(bit)); \
\r
189 asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "r"(bit)); \
\r
192 #define rdtime() read_csr(time)
\r
193 #define rdcycle() read_csr(cycle)
\r
194 #define rdinstret() read_csr(instret)
\r
196 #ifdef __riscv_atomic
\r
198 #define MASK(nr) (1UL << nr)
\r
199 #define MASK_NOT(nr) (~(1UL << nr))
\r
202 * atomic_read - read atomic variable
\r
203 * @v: pointer of type int
\r
205 * Atomically reads the value of @v.
\r
207 static inline int atomic_read(const int *v)
\r
209 return *((volatile int *)(v));
\r
213 * atomic_set - set atomic variable
\r
214 * @v: pointer of type int
\r
215 * @i: required value
\r
217 * Atomically sets the value of @v to @i.
\r
219 static inline void atomic_set(int *v, int i)
\r
225 * atomic_add - add integer to atomic variable
\r
226 * @i: integer value to add
\r
227 * @v: pointer of type int
\r
229 * Atomically adds @i to @v.
\r
231 static inline void atomic_add(int i, int *v)
\r
233 __asm__ __volatile__ (
\r
234 "amoadd.w zero, %1, %0"
\r
239 static inline int atomic_fetch_add(unsigned int mask, int *v)
\r
243 __asm__ __volatile__ (
\r
244 "amoadd.w %2, %1, %0"
\r
245 : "+A" (*v), "=r" (out)
\r
251 * atomic_sub - subtract integer from atomic variable
\r
252 * @i: integer value to subtract
\r
253 * @v: pointer of type int
\r
255 * Atomically subtracts @i from @v.
\r
257 static inline void atomic_sub(int i, int *v)
\r
262 static inline int atomic_fetch_sub(unsigned int mask, int *v)
\r
266 __asm__ __volatile__ (
\r
267 "amosub.w %2, %1, %0"
\r
268 : "+A" (*v), "=r" (out)
\r
274 * atomic_add_return - add integer to atomic variable
\r
275 * @i: integer value to add
\r
276 * @v: pointer of type int
\r
278 * Atomically adds @i to @v and returns the result
\r
280 static inline int atomic_add_return(int i, int *v)
\r
283 __asm__ __volatile__ (
\r
284 "amoadd.w %0, %2, %1"
\r
285 : "=r" (c), "+A" (*v)
\r
291 * atomic_sub_return - subtract integer from atomic variable
\r
292 * @i: integer value to subtract
\r
293 * @v: pointer of type int
\r
295 * Atomically subtracts @i from @v and returns the result
\r
297 static inline int atomic_sub_return(int i, int *v)
\r
299 return atomic_add_return(-i, v);
\r
303 * atomic_inc - increment atomic variable
\r
304 * @v: pointer of type int
\r
306 * Atomically increments @v by 1.
\r
308 static inline void atomic_inc(int *v)
\r
314 * atomic_dec - decrement atomic variable
\r
315 * @v: pointer of type int
\r
317 * Atomically decrements @v by 1.
\r
319 static inline void atomic_dec(int *v)
\r
324 static inline int atomic_inc_return(int *v)
\r
326 return atomic_add_return(1, v);
\r
329 static inline int atomic_dec_return(int *v)
\r
331 return atomic_sub_return(1, v);
\r
335 * atomic_sub_and_test - subtract value from variable and test result
\r
336 * @i: integer value to subtract
\r
337 * @v: pointer of type int
\r
339 * Atomically subtracts @i from @v and returns
\r
340 * true if the result is zero, or false for all
\r
343 static inline int atomic_sub_and_test(int i, int *v)
\r
345 return (atomic_sub_return(i, v) == 0);
\r
349 * atomic_inc_and_test - increment and test
\r
350 * @v: pointer of type int
\r
352 * Atomically increments @v by 1
\r
353 * and returns true if the result is zero, or false for all
\r
356 static inline int atomic_inc_and_test(int *v)
\r
358 return (atomic_inc_return(v) == 0);
\r
362 * atomic_dec_and_test - decrement and test
\r
363 * @v: pointer of type int
\r
365 * Atomically decrements @v by 1 and
\r
366 * returns true if the result is 0, or false for all other
\r
369 static inline int atomic_dec_and_test(int *v)
\r
371 return (atomic_dec_return(v) == 0);
\r
375 * atomic_add_negative - add and test if negative
\r
376 * @i: integer value to add
\r
377 * @v: pointer of type int
\r
379 * Atomically adds @i to @v and returns true
\r
380 * if the result is negative, or false when
\r
381 * result is greater than or equal to zero.
\r
383 static inline int atomic_add_negative(int i, int *v)
\r
385 return (atomic_add_return(i, v) < 0);
\r
388 static inline int atomic_xchg(int *v, int n)
\r
391 __asm__ __volatile__ (
\r
392 "amoswap.w %0, %2, %1"
\r
393 : "=r" (c), "+A" (*v)
\r
399 * atomic_and - Atomically clear bits in atomic variable
\r
400 * @mask: Mask of the bits to be retained
\r
401 * @v: pointer of type int
\r
403 * Atomically retains the bits set in @mask from @v
\r
405 static inline void atomic_and(unsigned int mask, int *v)
\r
407 __asm__ __volatile__ (
\r
408 "amoand.w zero, %1, %0"
\r
413 static inline int atomic_fetch_and(unsigned int mask, int *v)
\r
416 __asm__ __volatile__ (
\r
417 "amoand.w %2, %1, %0"
\r
418 : "+A" (*v), "=r" (out)
\r
424 * atomic_or - Atomically set bits in atomic variable
\r
425 * @mask: Mask of the bits to be set
\r
426 * @v: pointer of type int
\r
428 * Atomically sets the bits set in @mask in @v
\r
430 static inline void atomic_or(unsigned int mask, int *v)
\r
432 __asm__ __volatile__ (
\r
433 "amoor.w zero, %1, %0"
\r
438 static inline int atomic_fetch_or(unsigned int mask, int *v)
\r
441 __asm__ __volatile__ (
\r
442 "amoor.w %2, %1, %0"
\r
443 : "+A" (*v), "=r" (out)
\r
449 * atomic_xor - Atomically flips bits in atomic variable
\r
450 * @mask: Mask of the bits to be flipped
\r
451 * @v: pointer of type int
\r
453 * Atomically flips the bits set in @mask in @v
\r
455 static inline void atomic_xor(unsigned int mask, int *v)
\r
457 __asm__ __volatile__ (
\r
458 "amoxor.w zero, %1, %0"
\r
463 static inline int atomic_fetch_xor(unsigned int mask, int *v)
\r
466 __asm__ __volatile__ (
\r
467 "amoxor.w %2, %1, %0"
\r
468 : "+A" (*v), "=r" (out)
\r
473 /*----------------------------------------------------*/
\r
476 * test_and_set_bit - Set a bit and return its old value
\r
478 * @addr: Address to count from
\r
480 * This operation is atomic and cannot be reordered.
\r
481 * It also implies a memory barrier.
\r
483 static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
\r
485 unsigned long __res, __mask;
\r
487 __asm__ __volatile__ ( \
\r
488 "amoor.w %0, %2, %1" \
\r
489 : "=r" (__res), "+A" (*addr) \
\r
492 return ((__res & __mask) != 0);
\r
497 * test_and_clear_bit - Clear a bit and return its old value
\r
498 * @nr: Bit to clear
\r
499 * @addr: Address to count from
\r
501 * This operation is atomic and cannot be reordered.
\r
502 * It also implies a memory barrier.
\r
504 static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
\r
506 unsigned long __res, __mask;
\r
507 __mask = MASK_NOT(nr);
\r
508 __asm__ __volatile__ ( \
\r
509 "amoand.w %0, %2, %1" \
\r
510 : "=r" (__res), "+A" (*addr) \
\r
513 return ((__res & __mask) != 0);
\r
517 * test_and_change_bit - Change a bit and return its old value
\r
518 * @nr: Bit to change
\r
519 * @addr: Address to count from
\r
521 * This operation is atomic and cannot be reordered.
\r
522 * It also implies a memory barrier.
\r
524 static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
\r
527 unsigned long __res, __mask;
\r
529 __asm__ __volatile__ ( \
\r
530 "amoxor.w %0, %2, %1" \
\r
531 : "=r" (__res), "+A" (*addr) \
\r
534 return ((__res & __mask) != 0);
\r
538 * set_bit - Atomically set a bit in memory
\r
539 * @nr: the bit to set
\r
540 * @addr: the address to start counting from
\r
542 * This function is atomic and may not be reordered.
\r
545 static inline void set_bit(int nr, volatile unsigned long *addr)
\r
547 __asm__ __volatile__ ( \
\r
548 "AMOOR.w zero, %1, %0" \
\r
554 * clear_bit - Clears a bit in memory
\r
555 * @nr: Bit to clear
\r
556 * @addr: Address to start counting from
\r
558 * clear_bit() is atomic and may not be reordered.
\r
560 static inline void clear_bit(int nr, volatile unsigned long *addr)
\r
562 __asm__ __volatile__ ( \
\r
563 "AMOAND.w zero, %1, %0" \
\r
565 : "r" (MASK_NOT(nr)));
\r
569 * change_bit - Toggle a bit in memory
\r
570 * @nr: Bit to change
\r
571 * @addr: Address to start counting from
\r
573 * change_bit() is atomic and may not be reordered.
\r
575 static inline void change_bit(int nr, volatile unsigned long *addr)
\r
577 __asm__ __volatile__ ( \
\r
578 "AMOXOR.w zero, %1, %0" \
\r
583 #endif /* __riscv_atomic */
\r
585 #endif /*__GNUC__*/
\r
587 #endif /*__ASSEMBLER__*/
\r
595 #endif /*RISCV_CSR_ENCODING_H*/
\r