X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=include%2Fasm-mips%2Fio.h;h=3a0f33f204d11c4ff18c180d128e9ad062624d58;hb=1a9eeb78b825bfade31d7606a2fe3b9eca9e35be;hp=9a7aaf7f4b55b01fea96884b35f5de68a4ca0b1f;hpb=6069ff265362ef6239749b5f598b137f407b821e;p=u-boot diff --git a/include/asm-mips/io.h b/include/asm-mips/io.h index 9a7aaf7f4b..3a0f33f204 100644 --- a/include/asm-mips/io.h +++ b/include/asm-mips/io.h @@ -71,7 +71,21 @@ * instruction, so the lower 16 bits must be zero. Should be true on * on any sane architecture; generic code does not use this assumption. */ -extern unsigned long mips_io_port_base; +extern const unsigned long mips_io_port_base; + +/* + * Gcc will generate code to load the value of mips_io_port_base after each + * function call which may be fairly wasteful in some cases. So we don't + * play quite by the book. We tell gcc mips_io_port_base is a long variable + * which solves the code generation issue. Now we need to violate the + * aliasing rules a little to make initialization possible and finally we + * will need the barrier() to fight side effects of the aliasing chat. + * This trickery will eventually collapse under gcc's optimizer. Oh well. + */ +static inline void set_io_port_base(unsigned long base) +{ + * (unsigned long *) &mips_io_port_base = base; +} /* * Thanks to James van Artsdalen for a better timing-fix than @@ -106,7 +120,7 @@ extern unsigned long mips_io_port_base; */ extern inline unsigned long virt_to_phys(volatile void * address) { - return PHYSADDR(address); + return CPHYSADDR(address); } extern inline void * phys_to_virt(unsigned long address) @@ -119,7 +133,7 @@ extern inline void * phys_to_virt(unsigned long address) */ extern inline unsigned long virt_to_bus(volatile void * address) { - return PHYSADDR(address); + return CPHYSADDR(address); } extern inline void * bus_to_virt(unsigned long address) @@ -203,7 +217,7 @@ extern void iounmap(void *addr); #define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(b),(c),(d)) static inline int check_signature(unsigned long io_addr, - const unsigned char *signature, int length) + const unsigned char *signature, int length) { int retval = 0; do { @@ -286,15 +300,15 @@ extern inline void __outs##s(unsigned int port, const void * addr, unsigned long #define __OUTS2(m) \ if (count) \ __asm__ __volatile__ ( \ - ".set\tnoreorder\n\t" \ - ".set\tnoat\n" \ - "1:\tl" #m "\t$1,(%0)\n\t" \ - "subu\t%1,1\n\t" \ - "s" #m "\t$1,%4(%5)\n\t" \ - "bne\t$0,%1,1b\n\t" \ - "addiu\t%0,%6\n\t" \ - ".set\tat\n\t" \ - ".set\treorder" + ".set\tnoreorder\n\t" \ + ".set\tnoat\n" \ + "1:\tl" #m "\t$1,(%0)\n\t" \ + "subu\t%1,1\n\t" \ + "s" #m "\t$1,%4(%5)\n\t" \ + "bne\t$0,%1,1b\n\t" \ + "addiu\t%0,%6\n\t" \ + ".set\tat\n\t" \ + ".set\treorder" #define __OUTS(m,s,i) \ __OUTS1(s) __OUTS2(m) \ @@ -447,4 +461,32 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); #define dma_cache_wback(start,size) _dma_cache_wback(start,size) #define dma_cache_inv(start,size) _dma_cache_inv(start,size) +static inline void sync(void) +{ +} + +/* + * Given a physical address and a length, return a virtual address + * that can be used to access the memory range with the caching + * properties specified by "flags". + */ +#define MAP_NOCACHE (0) +#define MAP_WRCOMBINE (0) +#define MAP_WRBACK (0) +#define MAP_WRTHROUGH (0) + +static inline void * +map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags) +{ + return (void *)paddr; +} + +/* + * Take down a mapping set up by map_physmem(). + */ +static inline void unmap_physmem(void *vaddr, unsigned long flags) +{ + +} + #endif /* _ASM_IO_H */