#define MVPP2_SRC_ADDR_HIGH 0x28
#define MVPP2_PHY_AN_CFG0_REG 0x34
#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
-#define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \
- 0x400 + (port) * 0x400)
-#define MVPP2_MIB_LATE_COLLISION 0x7c
-#define MVPP2_ISR_SUM_MASK_REG 0x220c
#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
-#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
+#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
/* Per-port registers */
#define MVPP2_GMAC_CTRL_0_REG 0x0
/* Sram result info bits assignment */
#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
#define MVPP2_PRS_RI_DSA_MASK 0x2
-#define MVPP2_PRS_RI_VLAN_MASK 0xc
-#define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_VLAN_NONE 0x0
#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
-#define MVPP2_PRS_RI_L2_CAST_MASK 0x600
-#define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10))
+#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
+#define MVPP2_PRS_RI_L2_UCAST 0x0
#define MVPP2_PRS_RI_L2_MCAST BIT(9)
#define MVPP2_PRS_RI_L2_BCAST BIT(10)
#define MVPP2_PRS_RI_PPPOE_MASK 0x800
-#define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000
-#define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_UN 0x0
#define MVPP2_PRS_RI_L3_IP4 BIT(12)
#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
#define MVPP2_PRS_RI_L3_IP6 BIT(14)
#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
-#define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000
-#define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_L3_UCAST 0x0
#define MVPP2_PRS_RI_L3_MCAST BIT(15)
#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
/* Tclk value */
u32 tclk;
+ /* HW version */
+ enum { MVPP21, MVPP22 } hw_version;
+
struct mii_dev *bus;
};
u8 packet_offset; /* the offset from the buffer beginning */
u8 phys_txq; /* destination queue ID */
u16 data_size; /* data size of transmitted packet in bytes */
- u32 buf_phys_addr; /* physical addr of transmitted buffer */
+ u32 buf_dma_addr; /* physical addr of transmitted buffer */
u32 buf_cookie; /* cookie for access to TX buffer in tx path */
u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
u32 reserved2; /* reserved (for future use) */
u32 status; /* info about received packet */
u16 reserved1; /* parser_info (for future use, PnC) */
u16 data_size; /* size of received packet in bytes */
- u32 buf_phys_addr; /* physical address of the buffer */
+ u32 buf_dma_addr; /* physical address of the buffer */
u32 buf_cookie; /* cookie for access to RX buffer in rx path */
u16 reserved2; /* gem_port_id (for future use, PON) */
u16 reserved3; /* csum_l4 (for future use, PnC) */
struct mvpp2_tx_desc *descs;
/* DMA address of the Tx DMA descriptors array */
- dma_addr_t descs_phys;
+ dma_addr_t descs_dma;
/* Index of the last Tx DMA descriptor */
int last_desc;
struct mvpp2_rx_desc *descs;
/* DMA address of the RX DMA descriptors array */
- dma_addr_t descs_phys;
+ dma_addr_t descs_dma;
/* Index of the last RX DMA descriptor */
int last_desc;
int pkt_size;
/* BPPE virtual base address */
- u32 *virt_addr;
- /* BPPE physical base address */
- dma_addr_t phys_addr;
+ unsigned long *virt_addr;
+ /* BPPE DMA base address */
+ dma_addr_t dma_addr;
/* Ports using BM pool */
u32 port_map;
int in_use_thresh;
};
-struct mvpp2_buff_hdr {
- u32 next_buff_phys_addr;
- u32 next_buff_virt_addr;
- u16 byte_count;
- u16 info;
- u8 reserved1; /* bm_qset (for future use, BM) */
-};
-
-/* Buffer header info bits */
-#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
-#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
-#define MVPP2_B_HDR_INFO_LAST_OFFS 12
-#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
-#define MVPP2_B_HDR_INFO_IS_LAST(info) \
- ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
-
/* Static declaractions */
/* Number of RXQs used by single port */
struct mvpp2_tx_desc *aggr_tx_descs;
struct mvpp2_tx_desc *tx_descs;
struct mvpp2_rx_desc *rx_descs;
- u32 *bm_pool[MVPP2_BM_POOLS_NUM];
- u32 *rx_buffer[MVPP2_BM_LONG_BUF_NUM];
+ unsigned long *bm_pool[MVPP2_BM_POOLS_NUM];
+ unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM];
int first_rxq;
};
return readl(priv->base + offset);
}
+static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ dma_addr_t dma_addr)
+{
+ tx_desc->buf_dma_addr = dma_addr;
+}
+
+static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ size_t size)
+{
+ tx_desc->data_size = size;
+}
+
+static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ unsigned int txq)
+{
+ tx_desc->phys_txq = txq;
+}
+
+static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ unsigned int command)
+{
+ tx_desc->command = command;
+}
+
+static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ unsigned int offset)
+{
+ tx_desc->packet_offset = offset;
+}
+
+static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ return rx_desc->buf_dma_addr;
+}
+
+static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ return rx_desc->buf_cookie;
+}
+
+static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ return rx_desc->data_size;
+}
+
+static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ return rx_desc->status;
+}
+
static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
{
txq_pcpu->txq_get_index++;
u32 val;
bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id];
- bm_pool->phys_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id];
+ bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id];
if (!bm_pool->virt_addr)
return -ENOMEM;
- if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
+ if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
+ MVPP2_BM_POOL_PTR_ALIGN)) {
dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
return -ENOMEM;
}
mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
- bm_pool->phys_addr);
+ bm_pool->dma_addr);
mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
}
/* Get pool number from a BM cookie */
-static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
+static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
{
return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
}
/* Release buffer to BM */
static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
- u32 buf_phys_addr, u32 buf_virt_addr)
+ dma_addr_t buf_dma_addr,
+ unsigned long buf_phys_addr)
{
- mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
- mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
+ /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
+ * returned in the "cookie" field of the RX
+ * descriptor. Instead of storing the virtual address, we
+ * store the physical address
+ */
+ mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
+ mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
}
/* Refill BM pool */
static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
- u32 phys_addr, u32 cookie)
+ dma_addr_t dma_addr,
+ phys_addr_t phys_addr)
{
int pool = mvpp2_bm_cookie_pool_get(bm);
- mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
}
/* Allocate buffers for the pool */
struct mvpp2_bm_pool *bm_pool, int buf_num)
{
int i;
- u32 bm;
if (buf_num < 0 ||
(buf_num + bm_pool->buf_num > bm_pool->size)) {
return 0;
}
- bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id);
for (i = 0; i < buf_num; i++) {
- mvpp2_pool_refill(port, bm, (u32)buffer_loc.rx_buffer[i],
- (u32)buffer_loc.rx_buffer[i]);
+ mvpp2_bm_pool_put(port, bm_pool->id,
+ (dma_addr_t)buffer_loc.rx_buffer[i],
+ (unsigned long)buffer_loc.rx_buffer[i]);
+
}
/* Update BM driver with number of buffers added to pool */
}
/* Obtain BM cookie information from descriptor */
-static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
+static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
{
- int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
- MVPP2_RXD_BM_POOL_ID_OFFS;
int cpu = smp_processor_id();
+ int pool;
+
+ pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
+ MVPP2_RXD_BM_POOL_ID_MASK) >>
+ MVPP2_RXD_BM_POOL_ID_OFFS;
return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
{
/* Allocate memory for TX descriptors */
aggr_txq->descs = buffer_loc.aggr_tx_descs;
- aggr_txq->descs_phys = (dma_addr_t)buffer_loc.aggr_tx_descs;
+ aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs;
if (!aggr_txq->descs)
return -ENOMEM;
/* Set Tx descriptors queue starting address */
/* indirect access */
mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
- aggr_txq->descs_phys);
+ aggr_txq->descs_dma);
mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
return 0;
/* Allocate memory for RX descriptors */
rxq->descs = buffer_loc.rx_descs;
- rxq->descs_phys = (dma_addr_t)buffer_loc.rx_descs;
+ rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs;
if (!rxq->descs)
return -ENOMEM;
/* Set Rx descriptors queue starting address - indirect access */
mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
+ mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_dma);
mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
for (i = 0; i < rx_received; i++) {
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
- u32 bm = mvpp2_bm_cookie_build(rx_desc);
+ u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
- mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
- rx_desc->buf_cookie);
+ mvpp2_pool_refill(port, bm,
+ mvpp2_rxdesc_dma_addr_get(port, rx_desc),
+ mvpp2_rxdesc_cookie_get(port, rx_desc));
}
mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
}
rxq->descs = NULL;
rxq->last_desc = 0;
rxq->next_desc_to_proc = 0;
- rxq->descs_phys = 0;
+ rxq->descs_dma = 0;
/* Clear Rx descriptors queue starting address and size;
* free descriptor number
/* Allocate memory for Tx descriptors */
txq->descs = buffer_loc.tx_descs;
- txq->descs_phys = (dma_addr_t)buffer_loc.tx_descs;
+ txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs;
if (!txq->descs)
return -ENOMEM;
/* Set Tx descriptors queue starting address - indirect access */
mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
+ mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma);
mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
MVPP2_TXQ_DESC_SIZE_MASK);
mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
txq->descs = NULL;
txq->last_desc = 0;
txq->next_desc_to_proc = 0;
- txq->descs_phys = 0;
+ txq->descs_dma = 0;
/* Set minimum bandwidth for disabled TXQs */
mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
static void mvpp2_rx_error(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
- u32 status = rx_desc->status;
+ u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
+ size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
switch (status & MVPP2_RXD_ERR_CODE_MASK) {
case MVPP2_RXD_ERR_CRC:
- netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
- status, rx_desc->data_size);
+ netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
+ status, sz);
break;
case MVPP2_RXD_ERR_OVERRUN:
- netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
- status, rx_desc->data_size);
+ netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
+ status, sz);
break;
case MVPP2_RXD_ERR_RESOURCE:
- netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
- status, rx_desc->data_size);
+ netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
+ status, sz);
break;
}
}
/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
static int mvpp2_rx_refill(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool,
- u32 bm, u32 phys_addr)
+ u32 bm, dma_addr_t dma_addr)
{
- mvpp2_pool_refill(port, bm, phys_addr, phys_addr);
+ mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr);
return 0;
}
struct mvpp2_port *port = dev_get_priv(dev);
struct mvpp2_rx_desc *rx_desc;
struct mvpp2_bm_pool *bm_pool;
- dma_addr_t phys_addr;
+ dma_addr_t dma_addr;
u32 bm, rx_status;
int pool, rx_bytes, err;
int rx_received;
return 0;
rx_desc = mvpp2_rxq_next_desc_get(rxq);
- rx_status = rx_desc->status;
- rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
- phys_addr = rx_desc->buf_phys_addr;
+ rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
+ rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
+ rx_bytes -= MVPP2_MH_SIZE;
+ dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
- bm = mvpp2_bm_cookie_build(rx_desc);
+ bm = mvpp2_bm_cookie_build(port, rx_desc);
pool = mvpp2_bm_cookie_pool_get(bm);
bm_pool = &port->priv->bm_pools[pool];
- /* Check if buffer header is used */
- if (rx_status & MVPP2_RXD_BUF_HDR)
- return 0;
-
/* In case of an error, release the requested buffer pointer
* to the Buffer Manager. This request process is controlled
* by the hardware, and the information about the buffer is
if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
mvpp2_rx_error(port, rx_desc);
/* Return the buffer to the pool */
- mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
- rx_desc->buf_cookie);
+ mvpp2_pool_refill(port, bm, dma_addr, dma_addr);
return 0;
}
- err = mvpp2_rx_refill(port, bm_pool, bm, phys_addr);
+ err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr);
if (err) {
netdev_err(port->dev, "failed to refill BM pools\n");
return 0;
mvpp2_rxq_status_update(port, rxq->id, 1, 1);
/* give packet to stack - skip on first n bytes */
- data = (u8 *)phys_addr + 2 + 32;
+ data = (u8 *)dma_addr + 2 + 32;
if (rx_bytes <= 0)
return 0;
/* Get a descriptor for the first part of the packet */
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
- tx_desc->phys_txq = txq->id;
- tx_desc->data_size = length;
- tx_desc->packet_offset = (u32)packet & MVPP2_TX_DESC_ALIGN;
- tx_desc->buf_phys_addr = (u32)packet & ~MVPP2_TX_DESC_ALIGN;
+ mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+ mvpp2_txdesc_size_set(port, tx_desc, length);
+ mvpp2_txdesc_offset_set(port, tx_desc,
+ (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN);
+ mvpp2_txdesc_dma_addr_set(port, tx_desc,
+ (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN);
/* First and Last descriptor */
- tx_desc->command = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE
- | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
+ mvpp2_txdesc_cmd_set(port, tx_desc,
+ MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE
+ | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC);
/* Flush tx data */
- flush_dcache_range((u32)packet, (u32)packet + length);
+ flush_dcache_range((unsigned long)packet,
+ (unsigned long)packet + ALIGN(length, PKTALIGN));
/* Enable transmit */
mb();
u32 size = 0;
int i;
+ /* Save hw-version */
+ priv->hw_version = dev_get_driver_data(dev);
+
/*
* U-Boot special buffer handling:
*
/* Align buffer area for descs and rx_buffers to 1MiB */
bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
- mmu_set_region_dcache_behaviour((u32)bd_space, BD_SPACE, DCACHE_OFF);
+ mmu_set_region_dcache_behaviour((unsigned long)bd_space,
+ BD_SPACE, DCACHE_OFF);
buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space;
size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE;
- buffer_loc.tx_descs = (struct mvpp2_tx_desc *)((u32)bd_space + size);
+ buffer_loc.tx_descs =
+ (struct mvpp2_tx_desc *)((unsigned long)bd_space + size);
size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE;
- buffer_loc.rx_descs = (struct mvpp2_rx_desc *)((u32)bd_space + size);
+ buffer_loc.rx_descs =
+ (struct mvpp2_rx_desc *)((unsigned long)bd_space + size);
size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE;
for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
- buffer_loc.bm_pool[i] = (u32 *)((u32)bd_space + size);
+ buffer_loc.bm_pool[i] =
+ (unsigned long *)((unsigned long)bd_space + size);
size += MVPP2_BM_POOL_SIZE_MAX * sizeof(u32);
}
for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) {
- buffer_loc.rx_buffer[i] = (u32 *)((u32)bd_space + size);
+ buffer_loc.rx_buffer[i] =
+ (unsigned long *)((unsigned long)bd_space + size);
size += RX_BUFFER_SIZE;
}
}
static const struct udevice_id mvpp2_ids[] = {
- { .compatible = "marvell,armada-375-pp2" },
+ {
+ .compatible = "marvell,armada-375-pp2",
+ .data = MVPP21,
+ },
{ }
};