]> git.sur5r.net Git - u-boot/commitdiff
dma: keystone_nav: generalize driver usage
authorKhoronzhuk, Ivan <ivan.khoronzhuk@ti.com>
Fri, 5 Sep 2014 16:02:48 +0000 (19:02 +0300)
committerTom Rini <trini@ti.com>
Thu, 23 Oct 2014 15:27:04 +0000 (11:27 -0400)
The keystone_nav driver is general driver intended to be used for
working with queue manager and pktdma for different IPs like NETCP,
AIF, FFTC, etc. So the it's API shouldn't be named like it works only
with one of them, it should be general names. The names with prefix
like netcp_* rather do for drivers/net/keystone_net.c driver. So it's
good to generalize this driver to be used for different IP's and
delete confusion with real NETCP driver.

The current netcp_* functions of keystone navigator can be used for
other settings of pktdma, not only for NETCP. The API of this driver
is used by the keystone_net driver to work with NETCP, so net driver
also should be corrected. For convenience collect pkdma
configurations in drivers/dma/keystone_nav_cfg.c.

Acked-by: Vitaly Andrianov <vitalya@ti.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
arch/arm/include/asm/ti-common/keystone_nav.h
drivers/dma/Makefile
drivers/dma/keystone_nav.c
drivers/dma/keystone_nav_cfg.c [new file with mode: 0644]
drivers/net/keystone_net.c
include/configs/k2hk_evm.h

index 646c2f3a5697c21434fe1a1e699de3f56175d759..696d8c6fc09edac6c73546f6c804f7548bef8150 100644 (file)
@@ -169,6 +169,8 @@ struct pktdma_cfg {
        u32                     rx_flow; /* flow that is used for RX */
 };
 
+extern struct pktdma_cfg netcp_pktdma;
+
 /*
  * packet dma user allocates memory for rx buffers
  * and describe it in the following structure
@@ -180,10 +182,10 @@ struct rx_buff_desc {
        u32     rx_flow;
 };
 
-int netcp_close(void);
-int netcp_init(struct rx_buff_desc *rx_buffers);
-int netcp_send(u32 *pkt, int num_bytes, u32 swinfo2);
-void *netcp_recv(u32 **pkt, int *num_bytes);
-void netcp_release_rxhd(void *hd);
+int ksnav_close(struct pktdma_cfg *pktdma);
+int ksnav_init(struct pktdma_cfg *pktdma, struct rx_buff_desc *rx_buffers);
+int ksnav_send(struct pktdma_cfg *pktdma, u32 *pkt, int num_bytes, u32 swinfo2);
+void *ksnav_recv(struct pktdma_cfg *pktdma, u32 **pkt, int *num_bytes);
+void ksnav_release_rxhd(struct pktdma_cfg *pktdma, void *hd);
 
 #endif  /* _KEYSTONE_NAV_H_ */
index 519dd0d3475f083ec9a8887565651e023ec909da..15b0780b5d9b599b5a90b11a11c3a53d1169516c 100644 (file)
@@ -8,4 +8,4 @@
 obj-$(CONFIG_FSLDMAFEC) += MCD_tasksInit.o MCD_dmaApi.o MCD_tasks.o
 obj-$(CONFIG_APBH_DMA) += apbh_dma.o
 obj-$(CONFIG_FSL_DMA) += fsl_dma.o
-obj-$(CONFIG_TI_KSNAV) += keystone_nav.o
+obj-$(CONFIG_TI_KSNAV) += keystone_nav.o keystone_nav_cfg.o
index d960fbb2663652b757e931307ccc8f0ca0f777da..77707c21098f45ca1c33d42470416717f8851f62 100644 (file)
@@ -156,35 +156,20 @@ void queue_close(u32 qnum)
 /**
  * DMA API
  */
-struct pktdma_cfg netcp_pktdma = {
-       .global         = (void *)CONFIG_KSNAV_NETCP_PDMA_CTRL_BASE,
-       .tx_ch          = (void *)CONFIG_KSNAV_NETCP_PDMA_TX_BASE,
-       .tx_ch_num      = CONFIG_KSNAV_NETCP_PDMA_TX_CH_NUM,
-       .rx_ch          = (void *)CONFIG_KSNAV_NETCP_PDMA_RX_BASE,
-       .rx_ch_num      = CONFIG_KSNAV_NETCP_PDMA_RX_CH_NUM,
-       .tx_sched       = (u32 *)CONFIG_KSNAV_NETCP_PDMA_SCHED_BASE,
-       .rx_flows       = (void *)CONFIG_KSNAV_NETCP_PDMA_RX_FLOW_BASE,
-       .rx_flow_num    = CONFIG_KSNAV_NETCP_PDMA_RX_FLOW_NUM,
-       .rx_free_q      = CONFIG_KSNAV_NETCP_PDMA_RX_FREE_QUEUE,
-       .rx_rcv_q       = CONFIG_KSNAV_NETCP_PDMA_RX_RCV_QUEUE,
-       .tx_snd_q       = CONFIG_KSNAV_NETCP_PDMA_TX_SND_QUEUE,
-};
-
-struct pktdma_cfg *netcp;
 
-static int netcp_rx_disable(void)
+static int ksnav_rx_disable(struct pktdma_cfg *pktdma)
 {
        u32 j, v, k;
 
-       for (j = 0; j < netcp->rx_ch_num; j++) {
-               v = readl(&netcp->rx_ch[j].cfg_a);
+       for (j = 0; j < pktdma->rx_ch_num; j++) {
+               v = readl(&pktdma->rx_ch[j].cfg_a);
                if (!(v & CPDMA_CHAN_A_ENABLE))
                        continue;
 
-               writel(v | CPDMA_CHAN_A_TDOWN, &netcp->rx_ch[j].cfg_a);
+               writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->rx_ch[j].cfg_a);
                for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) {
                        udelay(100);
-                       v = readl(&netcp->rx_ch[j].cfg_a);
+                       v = readl(&pktdma->rx_ch[j].cfg_a);
                        if (!(v & CPDMA_CHAN_A_ENABLE))
                                continue;
                }
@@ -192,33 +177,33 @@ static int netcp_rx_disable(void)
        }
 
        /* Clear all of the flow registers */
-       for (j = 0; j < netcp->rx_flow_num; j++) {
-               writel(0, &netcp->rx_flows[j].control);
-               writel(0, &netcp->rx_flows[j].tags);
-               writel(0, &netcp->rx_flows[j].tag_sel);
-               writel(0, &netcp->rx_flows[j].fdq_sel[0]);
-               writel(0, &netcp->rx_flows[j].fdq_sel[1]);
-               writel(0, &netcp->rx_flows[j].thresh[0]);
-               writel(0, &netcp->rx_flows[j].thresh[1]);
-               writel(0, &netcp->rx_flows[j].thresh[2]);
+       for (j = 0; j < pktdma->rx_flow_num; j++) {
+               writel(0, &pktdma->rx_flows[j].control);
+               writel(0, &pktdma->rx_flows[j].tags);
+               writel(0, &pktdma->rx_flows[j].tag_sel);
+               writel(0, &pktdma->rx_flows[j].fdq_sel[0]);
+               writel(0, &pktdma->rx_flows[j].fdq_sel[1]);
+               writel(0, &pktdma->rx_flows[j].thresh[0]);
+               writel(0, &pktdma->rx_flows[j].thresh[1]);
+               writel(0, &pktdma->rx_flows[j].thresh[2]);
        }
 
        return QM_OK;
 }
 
-static int netcp_tx_disable(void)
+static int ksnav_tx_disable(struct pktdma_cfg *pktdma)
 {
        u32 j, v, k;
 
-       for (j = 0; j < netcp->tx_ch_num; j++) {
-               v = readl(&netcp->tx_ch[j].cfg_a);
+       for (j = 0; j < pktdma->tx_ch_num; j++) {
+               v = readl(&pktdma->tx_ch[j].cfg_a);
                if (!(v & CPDMA_CHAN_A_ENABLE))
                        continue;
 
-               writel(v | CPDMA_CHAN_A_TDOWN, &netcp->tx_ch[j].cfg_a);
+               writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->tx_ch[j].cfg_a);
                for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) {
                        udelay(100);
-                       v = readl(&netcp->tx_ch[j].cfg_a);
+                       v = readl(&pktdma->tx_ch[j].cfg_a);
                        if (!(v & CPDMA_CHAN_A_ENABLE))
                                continue;
                }
@@ -228,19 +213,17 @@ static int netcp_tx_disable(void)
        return QM_OK;
 }
 
-static int _netcp_init(struct pktdma_cfg *netcp_cfg,
-                      struct rx_buff_desc *rx_buffers)
+int ksnav_init(struct pktdma_cfg *pktdma, struct rx_buff_desc *rx_buffers)
 {
        u32 j, v;
        struct qm_host_desc *hd;
        u8 *rx_ptr;
 
-       if (netcp_cfg == NULL || rx_buffers == NULL ||
+       if (pktdma == NULL || rx_buffers == NULL ||
            rx_buffers->buff_ptr == NULL || qm_cfg == NULL)
                return QM_ERR;
 
-       netcp = netcp_cfg;
-       netcp->rx_flow = rx_buffers->rx_flow;
+       pktdma->rx_flow = rx_buffers->rx_flow;
 
        /* init rx queue */
        rx_ptr = rx_buffers->buff_ptr;
@@ -250,69 +233,64 @@ static int _netcp_init(struct pktdma_cfg *netcp_cfg,
                if (hd == NULL)
                        return QM_ERR;
 
-               qm_buff_push(hd, netcp->rx_free_q,
+               qm_buff_push(hd, pktdma->rx_free_q,
                             rx_ptr, rx_buffers->buff_len);
 
                rx_ptr += rx_buffers->buff_len;
        }
 
-       netcp_rx_disable();
+       ksnav_rx_disable(pktdma);
 
        /* configure rx channels */
-       v = CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, netcp->rx_rcv_q);
-       writel(v, &netcp->rx_flows[netcp->rx_flow].control);
-       writel(0, &netcp->rx_flows[netcp->rx_flow].tags);
-       writel(0, &netcp->rx_flows[netcp->rx_flow].tag_sel);
+       v = CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, pktdma->rx_rcv_q);
+       writel(v, &pktdma->rx_flows[pktdma->rx_flow].control);
+       writel(0, &pktdma->rx_flows[pktdma->rx_flow].tags);
+       writel(0, &pktdma->rx_flows[pktdma->rx_flow].tag_sel);
 
-       v = CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, netcp->rx_free_q, 0,
-                                        netcp->rx_free_q);
+       v = CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, pktdma->rx_free_q, 0,
+                                        pktdma->rx_free_q);
 
-       writel(v, &netcp->rx_flows[netcp->rx_flow].fdq_sel[0]);
-       writel(v, &netcp->rx_flows[netcp->rx_flow].fdq_sel[1]);
-       writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[0]);
-       writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[1]);
-       writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[2]);
+       writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[0]);
+       writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[1]);
+       writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[0]);
+       writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[1]);
+       writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[2]);
 
-       for (j = 0; j < netcp->rx_ch_num; j++)
-               writel(CPDMA_CHAN_A_ENABLE, &netcp->rx_ch[j].cfg_a);
+       for (j = 0; j < pktdma->rx_ch_num; j++)
+               writel(CPDMA_CHAN_A_ENABLE, &pktdma->rx_ch[j].cfg_a);
 
        /* configure tx channels */
        /* Disable loopback in the tx direction */
-       writel(0, &netcp->global->emulation_control);
+       writel(0, &pktdma->global->emulation_control);
 
        /* Set QM base address, only for K2x devices */
-       writel(CONFIG_KSNAV_QM_BASE_ADDRESS, &netcp->global->qm_base_addr[0]);
+       writel(CONFIG_KSNAV_QM_BASE_ADDRESS, &pktdma->global->qm_base_addr[0]);
 
        /* Enable all channels. The current state isn't important */
-       for (j = 0; j < netcp->tx_ch_num; j++)  {
-               writel(0, &netcp->tx_ch[j].cfg_b);
-               writel(CPDMA_CHAN_A_ENABLE, &netcp->tx_ch[j].cfg_a);
+       for (j = 0; j < pktdma->tx_ch_num; j++)  {
+               writel(0, &pktdma->tx_ch[j].cfg_b);
+               writel(CPDMA_CHAN_A_ENABLE, &pktdma->tx_ch[j].cfg_a);
        }
 
        return QM_OK;
 }
 
-int netcp_init(struct rx_buff_desc *rx_buffers)
-{
-       return _netcp_init(&netcp_pktdma, rx_buffers);
-}
-
-int netcp_close(void)
+int ksnav_close(struct pktdma_cfg *pktdma)
 {
-       if (!netcp)
+       if (!pktdma)
                return QM_ERR;
 
-       netcp_tx_disable();
-       netcp_rx_disable();
+       ksnav_tx_disable(pktdma);
+       ksnav_rx_disable(pktdma);
 
-       queue_close(netcp->rx_free_q);
-       queue_close(netcp->rx_rcv_q);
-       queue_close(netcp->tx_snd_q);
+       queue_close(pktdma->rx_free_q);
+       queue_close(pktdma->rx_rcv_q);
+       queue_close(pktdma->tx_snd_q);
 
        return QM_OK;
 }
 
-int netcp_send(u32 *pkt, int num_bytes, u32 swinfo2)
+int ksnav_send(struct pktdma_cfg *pktdma, u32 *pkt, int num_bytes, u32 swinfo2)
 {
        struct qm_host_desc *hd;
 
@@ -324,16 +302,16 @@ int netcp_send(u32 *pkt, int num_bytes, u32 swinfo2)
        hd->swinfo[2]   = swinfo2;
        hd->packet_info = qm_cfg->qpool_num;
 
-       qm_buff_push(hd, netcp->tx_snd_q, pkt, num_bytes);
+       qm_buff_push(hd, pktdma->tx_snd_q, pkt, num_bytes);
 
        return QM_OK;
 }
 
-void *netcp_recv(u32 **pkt, int *num_bytes)
+void *ksnav_recv(struct pktdma_cfg *pktdma, u32 **pkt, int *num_bytes)
 {
        struct qm_host_desc *hd;
 
-       hd = qm_pop(netcp->rx_rcv_q);
+       hd = qm_pop(pktdma->rx_rcv_q);
        if (!hd)
                return NULL;
 
@@ -343,12 +321,12 @@ void *netcp_recv(u32 **pkt, int *num_bytes)
        return hd;
 }
 
-void netcp_release_rxhd(void *hd)
+void ksnav_release_rxhd(struct pktdma_cfg *pktdma, void *hd)
 {
        struct qm_host_desc *_hd = (struct qm_host_desc *)hd;
 
        _hd->buff_len = _hd->orig_buff_len;
        _hd->buff_ptr = _hd->orig_buff_ptr;
 
-       qm_push(_hd, netcp->rx_free_q);
+       qm_push(_hd, pktdma->rx_free_q);
 }
diff --git a/drivers/dma/keystone_nav_cfg.c b/drivers/dma/keystone_nav_cfg.c
new file mode 100644 (file)
index 0000000..bdd30a0
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Multicore Navigator driver for TI Keystone 2 devices.
+ *
+ * (C) Copyright 2012-2014
+ *     Texas Instruments Incorporated, <www.ti.com>
+ *
+ * SPDX-License-Identifier:     GPL-2.0+
+ */
+
+#include <asm/ti-common/keystone_nav.h>
+
+#ifdef CONFIG_KSNAV_PKTDMA_NETCP
+/* NETCP Pktdma */
+struct pktdma_cfg netcp_pktdma = {
+       .global         = (void *)CONFIG_KSNAV_NETCP_PDMA_CTRL_BASE,
+       .tx_ch          = (void *)CONFIG_KSNAV_NETCP_PDMA_TX_BASE,
+       .tx_ch_num      = CONFIG_KSNAV_NETCP_PDMA_TX_CH_NUM,
+       .rx_ch          = (void *)CONFIG_KSNAV_NETCP_PDMA_RX_BASE,
+       .rx_ch_num      = CONFIG_KSNAV_NETCP_PDMA_RX_CH_NUM,
+       .tx_sched       = (u32 *)CONFIG_KSNAV_NETCP_PDMA_SCHED_BASE,
+       .rx_flows       = (void *)CONFIG_KSNAV_NETCP_PDMA_RX_FLOW_BASE,
+       .rx_flow_num    = CONFIG_KSNAV_NETCP_PDMA_RX_FLOW_NUM,
+       .rx_free_q      = CONFIG_KSNAV_NETCP_PDMA_RX_FREE_QUEUE,
+       .rx_rcv_q       = CONFIG_KSNAV_NETCP_PDMA_RX_RCV_QUEUE,
+       .tx_snd_q       = CONFIG_KSNAV_NETCP_PDMA_TX_SND_QUEUE,
+};
+#endif
index 1cfe6542d89e07d1600b20c01cf5863091e32f97..66532eef205753544b547f39e0d8e7f492837cb2 100644 (file)
@@ -393,7 +393,8 @@ int32_t cpmac_drv_send(u32 *buffer, int num_bytes, int slave_port_num)
        if (num_bytes < EMAC_MIN_ETHERNET_PKT_SIZE)
                num_bytes = EMAC_MIN_ETHERNET_PKT_SIZE;
 
-       return netcp_send(buffer, num_bytes, (slave_port_num) << 16);
+       return ksnav_send(&netcp_pktdma, buffer,
+                         num_bytes, (slave_port_num) << 16);
 }
 
 /* Eth device open */
@@ -431,7 +432,7 @@ static int keystone2_eth_open(struct eth_device *dev, bd_t *bis)
                printf("ERROR: qm_init()\n");
                return -1;
        }
-       if (netcp_init(&net_rx_buffs)) {
+       if (ksnav_init(&netcp_pktdma, &net_rx_buffs)) {
                qm_close();
                printf("ERROR: netcp_init()\n");
                return -1;
@@ -456,7 +457,7 @@ static int keystone2_eth_open(struct eth_device *dev, bd_t *bis)
 
                link = keystone_get_link_status(dev);
                if (link == 0) {
-                       netcp_close();
+                       ksnav_close(&netcp_pktdma);
                        qm_close();
                        return -1;
                }
@@ -483,7 +484,7 @@ void keystone2_eth_close(struct eth_device *dev)
 
        ethss_stop();
 
-       netcp_close();
+       ksnav_close(&netcp_pktdma);
        qm_close();
 
        emac_open = 0;
@@ -530,13 +531,13 @@ static int keystone2_eth_rcv_packet(struct eth_device *dev)
        int  pkt_size;
        u32  *pkt;
 
-       hd = netcp_recv(&pkt, &pkt_size);
+       hd = ksnav_recv(&netcp_pktdma, &pkt, &pkt_size);
        if (hd == NULL)
                return 0;
 
        NetReceive((uchar *)pkt, pkt_size);
 
-       netcp_release_rxhd(hd);
+       ksnav_release_rxhd(&netcp_pktdma, hd);
 
        return pkt_size;
 }
index c3139075bc343254c11710f47323c7b2996acfcb..d0c5ff17afdef8e6c3eb7c8e5d6fa271a7ec7b15 100644 (file)
@@ -37,5 +37,6 @@
 /* Network */
 #define CONFIG_DRIVER_TI_KEYSTONE_NET
 #define CONFIG_TI_KSNAV
+#define CONFIG_KSNAV_PKTDMA_NETCP
 
 #endif /* __CONFIG_K2HK_EVM_H */