}
/* 8K */
- dma_xfer((uint *)0x2000,0x2000,(uint *)0);
+ dmacpy(0x2000, 0, 0x2000);
/* 16K */
- dma_xfer((uint *)0x4000,0x4000,(uint *)0);
+ dmacpy(0x4000, 0, 0x4000);
/* 32K */
- dma_xfer((uint *)0x8000,0x8000,(uint *)0);
+ dmacpy(0x8000, 0, 0x8000);
/* 64K */
- dma_xfer((uint *)0x10000,0x10000,(uint *)0);
+ dmacpy(0x10000, 0, 0x10000);
/* 128k */
- dma_xfer((uint *)0x20000,0x20000,(uint *)0);
+ dmacpy(0x20000, 0, 0x20000);
/* 256k */
- dma_xfer((uint *)0x40000,0x40000,(uint *)0);
+ dmacpy(0x40000, 0, 0x40000);
/* 512k */
- dma_xfer((uint *)0x80000,0x80000,(uint *)0);
+ dmacpy(0x80000, 0, 0x80000);
/* 1M */
- dma_xfer((uint *)0x100000,0x100000,(uint *)0);
+ dmacpy(0x100000, 0, 0x100000);
/* 2M */
- dma_xfer((uint *)0x200000,0x200000,(uint *)0);
+ dmacpy(0x200000, 0, 0x200000);
/* 4M */
- dma_xfer((uint *)0x400000,0x400000,(uint *)0);
+ dmacpy(0x400000, 0, 0x400000);
- for (i = 1; i < dram_size / 0x800000; i++) {
- dma_xfer((uint *)(0x800000*i),0x800000,(uint *)0);
- }
+ for (i = 1; i < dram_size / 0x800000; i++)
+ dmacpy(0x800000 * i, 0, 0x800000);
/* Enable errors for ECC */
ddr->err_disable = 0x00000000;
}
/* 8K */
- dma_xfer((uint *)0x2000,0x2000,(uint *)0);
+ dmacpy(0x2000, 0, 0x2000);
/* 16K */
- dma_xfer((uint *)0x4000,0x4000,(uint *)0);
+ dmacpy(0x4000, 0, 0x4000);
/* 32K */
- dma_xfer((uint *)0x8000,0x8000,(uint *)0);
+ dmacpy(0x8000, 0, 0x8000);
/* 64K */
- dma_xfer((uint *)0x10000,0x10000,(uint *)0);
+ dmacpy(0x10000, 0, 0x10000);
/* 128k */
- dma_xfer((uint *)0x20000,0x20000,(uint *)0);
+ dmacpy(0x20000, 0, 0x20000);
/* 256k */
- dma_xfer((uint *)0x40000,0x40000,(uint *)0);
+ dmacpy(0x40000, 0, 0x40000);
/* 512k */
- dma_xfer((uint *)0x80000,0x80000,(uint *)0);
+ dmacpy(0x80000, 0, 0x80000);
/* 1M */
- dma_xfer((uint *)0x100000,0x100000,(uint *)0);
+ dmacpy(0x100000, 0, 0x100000);
/* 2M */
- dma_xfer((uint *)0x200000,0x200000,(uint *)0);
+ dmacpy(0x200000, 0, 0x200000);
/* 4M */
- dma_xfer((uint *)0x400000,0x400000,(uint *)0);
+ dmacpy(0x400000, 0, 0x400000);
- for (i = 1; i < dram_size / 0x800000; i++) {
- dma_xfer((uint *)(0x800000*i),0x800000,(uint *)0);
- }
+ for (i = 1; i < dram_size / 0x800000; i++)
+ dmacpy(0x800000 * i, 0, 0x800000);
/* Enable errors for ECC */
ddr->err_disable = 0x00000000;
return status;
}
-int dma_xfer(void *dest, u32 count, void *src)
+int dmacpy(phys_addr_t dest, phys_addr_t src, phys_size_t count)
{
volatile immap_t *immap = (immap_t *)CONFIG_SYS_IMMR;
volatile dma83xx_t *dma = &immap->dma;
/* initialize DMASARn, DMADAR and DMAABCRn */
dma->dmadar0 = swab32((u32)dest);
dma->dmasar0 = swab32((u32)src);
- dma->dmabcr0 = swab32(count);
+ dma->dmabcr0 = swab32((u32)count);
__asm__ __volatile__ ("sync");
__asm__ __volatile__ ("isync");
#if defined(CONFIG_DDR_ECC) && !defined(CONFIG_ECC_INIT_VIA_DDRC)
extern void dma_init(void);
extern uint dma_check(void);
-extern int dma_xfer(void *dest, uint count, void *src);
+extern int dmacpy(phys_addr_t dest, phys_addr_t src, phys_size_t n);
#endif
#ifndef CONFIG_SYS_READ_SPD
/* Initialise DMA for direct transfer */
dma_init();
/* Start DMA to transfer */
- dma_xfer((uint *)0x2000, 0x2000, (uint *)0); /* 8K */
- dma_xfer((uint *)0x4000, 0x4000, (uint *)0); /* 16K */
- dma_xfer((uint *)0x8000, 0x8000, (uint *)0); /* 32K */
- dma_xfer((uint *)0x10000, 0x10000, (uint *)0); /* 64K */
- dma_xfer((uint *)0x20000, 0x20000, (uint *)0); /* 128K */
- dma_xfer((uint *)0x40000, 0x40000, (uint *)0); /* 256K */
- dma_xfer((uint *)0x80000, 0x80000, (uint *)0); /* 512K */
- dma_xfer((uint *)0x100000, 0x100000, (uint *)0); /* 1M */
- dma_xfer((uint *)0x200000, 0x200000, (uint *)0); /* 2M */
- dma_xfer((uint *)0x400000, 0x400000, (uint *)0); /* 4M */
-
- for (i = 1; i < dram_size / 0x800000; i++) {
- dma_xfer((uint *)(0x800000*i), 0x800000, (uint *)0);
- }
+ dmacpy(0x2000, 0, 0x2000); /* 8K */
+ dmacpy(0x4000, 0, 0x4000); /* 16K */
+ dmacpy(0x8000, 0, 0x8000); /* 32K */
+ dmacpy(0x10000, 0, 0x10000); /* 64K */
+ dmacpy(0x20000, 0, 0x20000); /* 128K */
+ dmacpy(0x40000, 0, 0x40000); /* 256K */
+ dmacpy(0x80000, 0, 0x80000); /* 512K */
+ dmacpy(0x100000, 0, 0x100000); /* 1M */
+ dmacpy(0x200000, 0, 0x200000); /* 2M */
+ dmacpy(0x400000, 0, 0x400000); /* 4M */
+
+ for (i = 1; i < dram_size / 0x800000; i++)
+ dmacpy(0x800000 * i, 0, 0x800000);
#endif
t_end = get_tbms();
#if defined(CONFIG_DDR_ECC) && !defined(CONFIG_ECC_INIT_VIA_DDRCONTROLLER)
extern void dma_init(void);
extern uint dma_check(void);
-extern int dma_xfer(void *dest, uint count, void *src);
+extern int dmacpy(phys_addr_t dest, phys_addr_t src, phys_size_t n);
/*
* Initialize all of memory for ECC, then enable errors.
}
}
- dma_xfer((uint *)0x002000, 0x002000, (uint *)0); /* 8K */
- dma_xfer((uint *)0x004000, 0x004000, (uint *)0); /* 16K */
- dma_xfer((uint *)0x008000, 0x008000, (uint *)0); /* 32K */
- dma_xfer((uint *)0x010000, 0x010000, (uint *)0); /* 64K */
- dma_xfer((uint *)0x020000, 0x020000, (uint *)0); /* 128k */
- dma_xfer((uint *)0x040000, 0x040000, (uint *)0); /* 256k */
- dma_xfer((uint *)0x080000, 0x080000, (uint *)0); /* 512k */
- dma_xfer((uint *)0x100000, 0x100000, (uint *)0); /* 1M */
- dma_xfer((uint *)0x200000, 0x200000, (uint *)0); /* 2M */
- dma_xfer((uint *)0x400000, 0x400000, (uint *)0); /* 4M */
-
- for (i = 1; i < dram_size / 0x800000; i++) {
- dma_xfer((uint *)(0x800000*i), 0x800000, (uint *)0);
- }
+ dmacpy(0x002000, 0, 0x2000); /* 8K */
+ dmacpy(0x004000, 0, 0x4000); /* 16K */
+ dmacpy(0x008000, 0, 0x8000); /* 32K */
+ dmacpy(0x010000, 0, 0x10000); /* 64K */
+ dmacpy(0x020000, 0, 0x20000); /* 128K */
+ dmacpy(0x040000, 0, 0x40000); /* 256K */
+ dmacpy(0x080000, 0, 0x80000); /* 512K */
+ dmacpy(0x100000, 0, 0x100000); /* 1M */
+ dmacpy(0x200000, 0, 0x200000); /* 2M */
+ dmacpy(0x400000, 0, 0x400000); /* 4M */
+
+ for (i = 1; i < dram_size / 0x800000; i++)
+ dmacpy(0x800000 *i, 0, 0x800000);
/*
* Enable errors for ECC.
dma_sync();
}
-int dma_xfer(void *dest, uint count, void *src) {
+int dmacpy(phys_addr_t dest, phys_addr_t src, phys_size_t count) {
volatile fsl_dma_t *dma = &dma_base->dma[0];
uint xfer_size;