!= ERROR_OK)
return ret;
- mg_gen_ataid((mg_io_type_drv_info *)buff);
+ mg_gen_ataid((mg_io_type_drv_info *)(void *)buff);
if ((ret = mg_mflash_do_write_sects(buff, 0, 1, mg_vcmd_update_stgdrvinfo))
!= ERROR_OK)
buff[0] = mg_op_mode_snd; /* operation mode */
buff[1] = MG_UNLOCK_OTP_AREA;
buff[2] = 4; /* boot size */
- *((uint32_t *)(buff + 4)) = 0; /* XIP size */
+ *((uint32_t *)(void *)(buff + 4)) = 0; /* XIP size */
if ((ret = mg_mflash_do_write_sects(buff, 0, 1, mg_vcmd_update_xipinfo))
!= ERROR_OK)
target_read_memory(target, target_mem_base+SPARE_OFFS, 4, 16, ecc_flash_buffer);
target_read_memory(target, target_mem_base+ECC_OFFS, 4, 8, ecc_hw_buffer);
for(i=0;i<idx;i++){
- if( (0x00ffffff&*(uint32_t *)(ecc_hw_buffer+i*8)) != (0x00ffffff&*(uint32_t *)(ecc_flash_buffer+8+i*16)) )
+ if( (0x00ffffff&*(uint32_t *)(void *)(ecc_hw_buffer+i*8)) != (0x00ffffff&*(uint32_t *)(void *)(ecc_flash_buffer+8+i*16)) )
LOG_WARNING("ECC mismatch at 256 bytes size block= %d at page= 0x%" PRIx32,i*2+1,page);
- if( (0x00ffffff&*(uint32_t *)(ecc_hw_buffer+4+i*8)) != (0x00ffffff&*(uint32_t *)(ecc_flash_buffer+12+i*16)) )
+ if( (0x00ffffff&*(uint32_t *)(void *)(ecc_hw_buffer+4+i*8)) != (0x00ffffff&*(uint32_t *)(void *)(ecc_flash_buffer+12+i*16)) )
LOG_WARNING("ECC mismatch at 256 bytes size block= %d at page= 0x%" PRIx32,i*2+2,page);
}
}
// By using prototypes - we can detect what would
// be casting errors.
- return ((uint32_t *)(((char *)(pCfg)) + pList->struct_offset));
+ return ((uint32_t *)(void *)(((char *)(pCfg)) + pList->struct_offset));
}
// calculate where this one go..
// it is "possibly" this register.
- pPossible = ((uint32_t *)(((char *)(&(pChip->cfg))) + pReg->struct_offset));
+ pPossible = ((uint32_t *)(void *)(((char *)(&(pChip->cfg))) + pReg->struct_offset));
// well? Is it this register
if (pPossible == goes_here) {
*/
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
- (type *)( (char *)__mptr - offsetof(type,member) );})
+ (type *)( (void *) ( (char *)__mptr - offsetof(type,member) ) );})
/**
static __inline__ void virtexflip32(jtag_callback_data_t arg)
{
uint8_t *in = (uint8_t *)arg;
- *((uint32_t *)in) = flip_u32(le_to_h_u32(in), 32);
+ *((uint32_t *)arg) = flip_u32(le_to_h_u32(in), 32);
}
static int virtex2_receive_32(struct pld_device *pld_device,
{
uint32_t instr = !arm11_config_memrw_no_increment ? 0xecb05e01 : 0xed905e00;
/** \todo TODO: buffer cast to uint32_t* causes alignment warnings */
- uint32_t *words = (uint32_t *)buffer;
+ uint32_t *words = (uint32_t *)(void *)buffer;
/* LDC p14,c5,[R0],#4 */
/* LDC p14,c5,[R0] */
uint32_t instr = !no_increment ? 0xeca05e01 : 0xed805e00;
/** \todo TODO: buffer cast to uint32_t* causes alignment warnings */
- uint32_t *words = (uint32_t*)buffer;
+ uint32_t *words = (uint32_t*)(void *)buffer;
/* "burst" here just means trusting each instruction executes
* fully before we run the next one: per-word roundtrips, to
for (writecount = 0; writecount < blocksize; writecount++)
{
retval = dap_queue_ap_write(dap, AP_REG_DRW,
- *(uint32_t *) (buffer + 4 * writecount));
+ *(uint32_t *) ((void *) (buffer + 4 * writecount)));
if (retval != ERROR_OK)
break;
}
static __inline__ void arm7flip32(jtag_callback_data_t arg)
{
uint8_t *in = (uint8_t *)arg;
- *((uint32_t *)in) = flip_u32(le_to_h_u32(in), 32);
+ *((uint32_t *)arg) = flip_u32(le_to_h_u32(in), 32);
}
static __inline__ void arm_le_to_h_u32(jtag_callback_data_t arg)
{
uint8_t *in = (uint8_t *)arg;
- *((uint32_t *)in) = le_to_h_u32(in);
+ *((uint32_t *)arg) = le_to_h_u32(in);
}
switch (size)
{
case 4:
- return avr32_jtag_read_memory32(&ap7k->jtag, address, count, (uint32_t*)buffer);
+ return avr32_jtag_read_memory32(&ap7k->jtag, address, count, (uint32_t*)(void *)buffer);
break;
case 2:
- return avr32_jtag_read_memory16(&ap7k->jtag, address, count, (uint16_t*)buffer);
+ return avr32_jtag_read_memory16(&ap7k->jtag, address, count, (uint16_t*)(void *)buffer);
break;
case 1:
return avr32_jtag_read_memory8(&ap7k->jtag, address, count, buffer);
switch (size)
{
case 4:
- return avr32_jtag_write_memory32(&ap7k->jtag, address, count, (uint32_t*)buffer);
+ return avr32_jtag_write_memory32(&ap7k->jtag, address, count, (uint32_t*)(void *)buffer);
break;
case 2:
- return avr32_jtag_write_memory16(&ap7k->jtag, address, count, (uint16_t*)buffer);
+ return avr32_jtag_write_memory16(&ap7k->jtag, address, count, (uint16_t*)(void *)buffer);
break;
case 1:
return avr32_jtag_write_memory8(&ap7k->jtag, address, count, buffer);
if (addr & 3)
{
retval = avr32_jtag_mwa_read(jtag_info, SLAVE_HSB_UNCACHED,
- addr + i, (uint32_t*)data);
+ addr + i, (uint32_t*)(void *)data);
if (retval != ERROR_OK)
return retval;
for (; i < (count & ~3); i+=4)
{
retval = avr32_jtag_mwa_read(jtag_info, SLAVE_HSB_UNCACHED,
- addr + i, (uint32_t*)data);
+ addr + i, (uint32_t*)(void *)data);
if (retval != ERROR_OK)
return retval;
if (i < count)
{
retval = avr32_jtag_mwa_read(jtag_info, SLAVE_HSB_UNCACHED,
- addr + i, (uint32_t*)data);
+ addr + i, (uint32_t*)(void *)data);
if (retval != ERROR_OK)
return retval;
{
uint8_t *in = (uint8_t *)arg;
- *((uint32_t *)in) = buf_get_u32(in, 0, 32);
+ *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
}
}
retval = mips32_pracc_fastdata_xfer(ejtag_info, source, write_t, address,
- count, (uint32_t*) buffer);
+ count, (uint32_t*) (void *)buffer);
if (retval != ERROR_OK)
{
/* FASTDATA access failed, try normal memory write */
static void xscale_getbuf(jtag_callback_data_t arg)
{
uint8_t *in = (uint8_t *)arg;
- *((uint32_t *)in) = buf_get_u32(in, 0, 32);
+ *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
}
static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)