]> git.sur5r.net Git - u-boot/commitdiff
dma: keystone_nav: move keystone_nav driver to driver/dma/
authorKhoronzhuk, Ivan <ivan.khoronzhuk@ti.com>
Fri, 5 Sep 2014 16:02:47 +0000 (19:02 +0300)
committerTom Rini <trini@ti.com>
Thu, 23 Oct 2014 15:27:04 +0000 (11:27 -0400)
The keystone_nav is used by drivers/net/keystone_net.c driver to
send and receive packets, but currently it's placed at keystone
arch sources. So it should be in the drivers directory also.
It's separate driver that can be used for sending and receiving
pktdma packets by others drivers also.

This patch just move this driver to appropriate directory and
doesn't add any functional changes.

Acked-by: Murali Karicheri <m-karicheri2@ti.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
arch/arm/cpu/armv7/keystone/Makefile
arch/arm/cpu/armv7/keystone/keystone_nav.c [deleted file]
arch/arm/include/asm/arch-keystone/keystone_nav.h [deleted file]
arch/arm/include/asm/ti-common/keystone_nav.h [new file with mode: 0644]
drivers/dma/Makefile
drivers/dma/keystone_nav.c [new file with mode: 0644]
drivers/net/keystone_net.c
include/configs/k2hk_evm.h
include/configs/ks2_evm.h

index f8519c040355f4202e4c701c3a710c96689051d9..3d8fb70f619ea63448031dd93e5f0f5e125bb439 100644 (file)
@@ -12,7 +12,6 @@ obj-$(CONFIG_SOC_K2HK) += clock-k2hk.o
 obj-$(CONFIG_SOC_K2E) += clock-k2e.o
 obj-y  += cmd_clock.o
 obj-y  += cmd_mon.o
-obj-$(CONFIG_DRIVER_TI_KEYSTONE_NET) += keystone_nav.o
 obj-y  += msmc.o
 obj-$(CONFIG_SPL_BUILD)        += spl.o
 obj-y  += ddr3.o
diff --git a/arch/arm/cpu/armv7/keystone/keystone_nav.c b/arch/arm/cpu/armv7/keystone/keystone_nav.c
deleted file mode 100644 (file)
index 46483b1..0000000
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- * Multicore Navigator driver for TI Keystone 2 devices.
- *
- * (C) Copyright 2012-2014
- *     Texas Instruments Incorporated, <www.ti.com>
- *
- * SPDX-License-Identifier:     GPL-2.0+
- */
-#include <common.h>
-#include <asm/io.h>
-#include <asm/arch/keystone_nav.h>
-
-struct qm_config qm_memmap = {
-       .stat_cfg       = KS2_QM_QUEUE_STATUS_BASE,
-       .queue          = (void *)KS2_QM_MANAGER_QUEUES_BASE,
-       .mngr_vbusm     = KS2_QM_BASE_ADDRESS,
-       .i_lram         = KS2_QM_LINK_RAM_BASE,
-       .proxy          = (void *)KS2_QM_MANAGER_Q_PROXY_BASE,
-       .status_ram     = KS2_QM_STATUS_RAM_BASE,
-       .mngr_cfg       = (void *)KS2_QM_CONF_BASE,
-       .intd_cfg       = KS2_QM_INTD_CONF_BASE,
-       .desc_mem       = (void *)KS2_QM_DESC_SETUP_BASE,
-       .region_num     = KS2_QM_REGION_NUM,
-       .pdsp_cmd       = KS2_QM_PDSP1_CMD_BASE,
-       .pdsp_ctl       = KS2_QM_PDSP1_CTRL_BASE,
-       .pdsp_iram      = KS2_QM_PDSP1_IRAM_BASE,
-       .qpool_num      = KS2_QM_QPOOL_NUM,
-};
-
-/*
- * We are going to use only one type of descriptors - host packet
- * descriptors. We staticaly allocate memory for them here
- */
-struct qm_host_desc desc_pool[HDESC_NUM] __aligned(sizeof(struct qm_host_desc));
-
-static struct qm_config *qm_cfg;
-
-inline int num_of_desc_to_reg(int num_descr)
-{
-       int j, num;
-
-       for (j = 0, num = 32; j < 15; j++, num *= 2) {
-               if (num_descr <= num)
-                       return j;
-       }
-
-       return 15;
-}
-
-int _qm_init(struct qm_config *cfg)
-{
-       u32 j;
-
-       qm_cfg = cfg;
-
-       qm_cfg->mngr_cfg->link_ram_base0        = qm_cfg->i_lram;
-       qm_cfg->mngr_cfg->link_ram_size0        = HDESC_NUM * 8;
-       qm_cfg->mngr_cfg->link_ram_base1        = 0;
-       qm_cfg->mngr_cfg->link_ram_size1        = 0;
-       qm_cfg->mngr_cfg->link_ram_base2        = 0;
-
-       qm_cfg->desc_mem[0].base_addr = (u32)desc_pool;
-       qm_cfg->desc_mem[0].start_idx = 0;
-       qm_cfg->desc_mem[0].desc_reg_size =
-               (((sizeof(struct qm_host_desc) >> 4) - 1) << 16) |
-               num_of_desc_to_reg(HDESC_NUM);
-
-       memset(desc_pool, 0, sizeof(desc_pool));
-       for (j = 0; j < HDESC_NUM; j++)
-               qm_push(&desc_pool[j], qm_cfg->qpool_num);
-
-       return QM_OK;
-}
-
-int qm_init(void)
-{
-       return _qm_init(&qm_memmap);
-}
-
-void qm_close(void)
-{
-       u32     j;
-
-       if (qm_cfg == NULL)
-               return;
-
-       queue_close(qm_cfg->qpool_num);
-
-       qm_cfg->mngr_cfg->link_ram_base0        = 0;
-       qm_cfg->mngr_cfg->link_ram_size0        = 0;
-       qm_cfg->mngr_cfg->link_ram_base1        = 0;
-       qm_cfg->mngr_cfg->link_ram_size1        = 0;
-       qm_cfg->mngr_cfg->link_ram_base2        = 0;
-
-       for (j = 0; j < qm_cfg->region_num; j++) {
-               qm_cfg->desc_mem[j].base_addr = 0;
-               qm_cfg->desc_mem[j].start_idx = 0;
-               qm_cfg->desc_mem[j].desc_reg_size = 0;
-       }
-
-       qm_cfg = NULL;
-}
-
-void qm_push(struct qm_host_desc *hd, u32 qnum)
-{
-       u32 regd;
-
-       if (!qm_cfg)
-               return;
-
-       cpu_to_bus((u32 *)hd, sizeof(struct qm_host_desc)/4);
-       regd = (u32)hd | ((sizeof(struct qm_host_desc) >> 4) - 1);
-       writel(regd, &qm_cfg->queue[qnum].ptr_size_thresh);
-}
-
-void qm_buff_push(struct qm_host_desc *hd, u32 qnum,
-                   void *buff_ptr, u32 buff_len)
-{
-       hd->orig_buff_len = buff_len;
-       hd->buff_len = buff_len;
-       hd->orig_buff_ptr = (u32)buff_ptr;
-       hd->buff_ptr = (u32)buff_ptr;
-       qm_push(hd, qnum);
-}
-
-struct qm_host_desc *qm_pop(u32 qnum)
-{
-       u32 uhd;
-
-       if (!qm_cfg)
-               return NULL;
-
-       uhd = readl(&qm_cfg->queue[qnum].ptr_size_thresh) & ~0xf;
-       if (uhd)
-               cpu_to_bus((u32 *)uhd, sizeof(struct qm_host_desc)/4);
-
-       return (struct qm_host_desc *)uhd;
-}
-
-struct qm_host_desc *qm_pop_from_free_pool(void)
-{
-       if (!qm_cfg)
-               return NULL;
-
-       return qm_pop(qm_cfg->qpool_num);
-}
-
-void queue_close(u32 qnum)
-{
-       struct qm_host_desc *hd;
-
-       while ((hd = qm_pop(qnum)))
-               ;
-}
-
-/**
- * DMA API
- */
-struct pktdma_cfg netcp_pktdma = {
-       .global         = (void *)KS2_NETCP_PDMA_CTRL_BASE,
-       .tx_ch          = (void *)KS2_NETCP_PDMA_TX_BASE,
-       .tx_ch_num      = KS2_NETCP_PDMA_TX_CH_NUM,
-       .rx_ch          = (void *)KS2_NETCP_PDMA_RX_BASE,
-       .rx_ch_num      = KS2_NETCP_PDMA_RX_CH_NUM,
-       .tx_sched       = (u32 *)KS2_NETCP_PDMA_SCHED_BASE,
-       .rx_flows       = (void *)KS2_NETCP_PDMA_RX_FLOW_BASE,
-       .rx_flow_num    = KS2_NETCP_PDMA_RX_FLOW_NUM,
-       .rx_free_q      = KS2_NETCP_PDMA_RX_FREE_QUEUE,
-       .rx_rcv_q       = KS2_NETCP_PDMA_RX_RCV_QUEUE,
-       .tx_snd_q       = KS2_NETCP_PDMA_TX_SND_QUEUE,
-};
-
-struct pktdma_cfg *netcp;
-
-static int netcp_rx_disable(void)
-{
-       u32 j, v, k;
-
-       for (j = 0; j < netcp->rx_ch_num; j++) {
-               v = readl(&netcp->rx_ch[j].cfg_a);
-               if (!(v & CPDMA_CHAN_A_ENABLE))
-                       continue;
-
-               writel(v | CPDMA_CHAN_A_TDOWN, &netcp->rx_ch[j].cfg_a);
-               for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) {
-                       udelay(100);
-                       v = readl(&netcp->rx_ch[j].cfg_a);
-                       if (!(v & CPDMA_CHAN_A_ENABLE))
-                               continue;
-               }
-               /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */
-       }
-
-       /* Clear all of the flow registers */
-       for (j = 0; j < netcp->rx_flow_num; j++) {
-               writel(0, &netcp->rx_flows[j].control);
-               writel(0, &netcp->rx_flows[j].tags);
-               writel(0, &netcp->rx_flows[j].tag_sel);
-               writel(0, &netcp->rx_flows[j].fdq_sel[0]);
-               writel(0, &netcp->rx_flows[j].fdq_sel[1]);
-               writel(0, &netcp->rx_flows[j].thresh[0]);
-               writel(0, &netcp->rx_flows[j].thresh[1]);
-               writel(0, &netcp->rx_flows[j].thresh[2]);
-       }
-
-       return QM_OK;
-}
-
-static int netcp_tx_disable(void)
-{
-       u32 j, v, k;
-
-       for (j = 0; j < netcp->tx_ch_num; j++) {
-               v = readl(&netcp->tx_ch[j].cfg_a);
-               if (!(v & CPDMA_CHAN_A_ENABLE))
-                       continue;
-
-               writel(v | CPDMA_CHAN_A_TDOWN, &netcp->tx_ch[j].cfg_a);
-               for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) {
-                       udelay(100);
-                       v = readl(&netcp->tx_ch[j].cfg_a);
-                       if (!(v & CPDMA_CHAN_A_ENABLE))
-                               continue;
-               }
-               /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */
-       }
-
-       return QM_OK;
-}
-
-static int _netcp_init(struct pktdma_cfg *netcp_cfg,
-                      struct rx_buff_desc *rx_buffers)
-{
-       u32 j, v;
-       struct qm_host_desc *hd;
-       u8 *rx_ptr;
-
-       if (netcp_cfg == NULL || rx_buffers == NULL ||
-           rx_buffers->buff_ptr == NULL || qm_cfg == NULL)
-               return QM_ERR;
-
-       netcp = netcp_cfg;
-       netcp->rx_flow = rx_buffers->rx_flow;
-
-       /* init rx queue */
-       rx_ptr = rx_buffers->buff_ptr;
-
-       for (j = 0; j < rx_buffers->num_buffs; j++) {
-               hd = qm_pop(qm_cfg->qpool_num);
-               if (hd == NULL)
-                       return QM_ERR;
-
-               qm_buff_push(hd, netcp->rx_free_q,
-                            rx_ptr, rx_buffers->buff_len);
-
-               rx_ptr += rx_buffers->buff_len;
-       }
-
-       netcp_rx_disable();
-
-       /* configure rx channels */
-       v = CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, netcp->rx_rcv_q);
-       writel(v, &netcp->rx_flows[netcp->rx_flow].control);
-       writel(0, &netcp->rx_flows[netcp->rx_flow].tags);
-       writel(0, &netcp->rx_flows[netcp->rx_flow].tag_sel);
-
-       v = CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, netcp->rx_free_q, 0,
-                                        netcp->rx_free_q);
-
-       writel(v, &netcp->rx_flows[netcp->rx_flow].fdq_sel[0]);
-       writel(v, &netcp->rx_flows[netcp->rx_flow].fdq_sel[1]);
-       writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[0]);
-       writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[1]);
-       writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[2]);
-
-       for (j = 0; j < netcp->rx_ch_num; j++)
-               writel(CPDMA_CHAN_A_ENABLE, &netcp->rx_ch[j].cfg_a);
-
-       /* configure tx channels */
-       /* Disable loopback in the tx direction */
-       writel(0, &netcp->global->emulation_control);
-
-       /* Set QM base address, only for K2x devices */
-       writel(KS2_QM_BASE_ADDRESS, &netcp->global->qm_base_addr[0]);
-
-       /* Enable all channels. The current state isn't important */
-       for (j = 0; j < netcp->tx_ch_num; j++)  {
-               writel(0, &netcp->tx_ch[j].cfg_b);
-               writel(CPDMA_CHAN_A_ENABLE, &netcp->tx_ch[j].cfg_a);
-       }
-
-       return QM_OK;
-}
-
-int netcp_init(struct rx_buff_desc *rx_buffers)
-{
-       return _netcp_init(&netcp_pktdma, rx_buffers);
-}
-
-int netcp_close(void)
-{
-       if (!netcp)
-               return QM_ERR;
-
-       netcp_tx_disable();
-       netcp_rx_disable();
-
-       queue_close(netcp->rx_free_q);
-       queue_close(netcp->rx_rcv_q);
-       queue_close(netcp->tx_snd_q);
-
-       return QM_OK;
-}
-
-int netcp_send(u32 *pkt, int num_bytes, u32 swinfo2)
-{
-       struct qm_host_desc *hd;
-
-       hd = qm_pop(qm_cfg->qpool_num);
-       if (hd == NULL)
-               return QM_ERR;
-
-       hd->desc_info   = num_bytes;
-       hd->swinfo[2]   = swinfo2;
-       hd->packet_info = qm_cfg->qpool_num;
-
-       qm_buff_push(hd, netcp->tx_snd_q, pkt, num_bytes);
-
-       return QM_OK;
-}
-
-void *netcp_recv(u32 **pkt, int *num_bytes)
-{
-       struct qm_host_desc *hd;
-
-       hd = qm_pop(netcp->rx_rcv_q);
-       if (!hd)
-               return NULL;
-
-       *pkt = (u32 *)hd->buff_ptr;
-       *num_bytes = hd->desc_info & 0x3fffff;
-
-       return hd;
-}
-
-void netcp_release_rxhd(void *hd)
-{
-       struct qm_host_desc *_hd = (struct qm_host_desc *)hd;
-
-       _hd->buff_len = _hd->orig_buff_len;
-       _hd->buff_ptr = _hd->orig_buff_ptr;
-
-       qm_push(_hd, netcp->rx_free_q);
-}
diff --git a/arch/arm/include/asm/arch-keystone/keystone_nav.h b/arch/arm/include/asm/arch-keystone/keystone_nav.h
deleted file mode 100644 (file)
index 646c2f3..0000000
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Multicore Navigator definitions
- *
- * (C) Copyright 2012-2014
- *     Texas Instruments Incorporated, <www.ti.com>
- *
- * SPDX-License-Identifier:     GPL-2.0+
- */
-
-#ifndef _KEYSTONE_NAV_H_
-#define _KEYSTONE_NAV_H_
-
-#include <asm/arch/hardware.h>
-#include <asm/io.h>
-
-#define QM_OK                    0
-#define QM_ERR                  -1
-#define QM_DESC_TYPE_HOST        0
-#define QM_DESC_PSINFO_IN_DESCR  0
-#define QM_DESC_DEFAULT_DESCINFO    (QM_DESC_TYPE_HOST << 30) | \
-                                       (QM_DESC_PSINFO_IN_DESCR << 22)
-
-/* Packet Info */
-#define QM_DESC_PINFO_EPIB              1
-#define QM_DESC_PINFO_RETURN_OWN        1
-#define QM_DESC_DEFAULT_PINFO           (QM_DESC_PINFO_EPIB << 31) | \
-                                       (QM_DESC_PINFO_RETURN_OWN << 15)
-
-struct qm_cfg_reg {
-       u32     revision;
-       u32     __pad1;
-       u32     divert;
-       u32     link_ram_base0;
-       u32     link_ram_size0;
-       u32     link_ram_base1;
-       u32     link_ram_size1;
-       u32     link_ram_base2;
-       u32     starvation[0];
-};
-
-struct descr_mem_setup_reg {
-       u32     base_addr;
-       u32     start_idx;
-       u32     desc_reg_size;
-       u32     _res0;
-};
-
-struct qm_reg_queue {
-       u32     entry_count;
-       u32     byte_count;
-       u32     packet_size;
-       u32     ptr_size_thresh;
-};
-
-struct qm_config {
-       /* QM module addresses */
-       u32     stat_cfg;       /* status and config            */
-       struct qm_reg_queue *queue;     /* management region    */
-       u32     mngr_vbusm;     /* management region (VBUSM)    */
-       u32     i_lram;         /* internal linking RAM         */
-       struct qm_reg_queue *proxy;
-       u32     status_ram;
-       struct qm_cfg_reg *mngr_cfg;
-                               /* Queue manager config region  */
-       u32     intd_cfg;       /* QMSS INTD config region      */
-       struct  descr_mem_setup_reg *desc_mem;
-                               /* descritor memory setup region*/
-       u32     region_num;
-       u32     pdsp_cmd;       /* PDSP1 command interface      */
-       u32     pdsp_ctl;       /* PDSP1 control registers      */
-       u32     pdsp_iram;
-       /* QM configuration parameters */
-
-       u32     qpool_num;      /* */
-};
-
-struct qm_host_desc {
-       u32 desc_info;
-       u32 tag_info;
-       u32 packet_info;
-       u32 buff_len;
-       u32 buff_ptr;
-       u32 next_bdptr;
-       u32 orig_buff_len;
-       u32 orig_buff_ptr;
-       u32 timestamp;
-       u32 swinfo[3];
-       u32 ps_data[20];
-};
-
-#define HDESC_NUM        256
-
-int    qm_init(void);
-void   qm_close(void);
-void   qm_push(struct qm_host_desc *hd, u32 qnum);
-struct qm_host_desc *qm_pop(u32 qnum);
-
-void   qm_buff_push(struct qm_host_desc *hd, u32 qnum,
-                    void *buff_ptr, u32 buff_len);
-
-struct qm_host_desc *qm_pop_from_free_pool(void);
-void   queue_close(u32 qnum);
-
-/*
- * DMA API
- */
-#define CPDMA_REG_VAL_MAKE_RX_FLOW_A(einfo, psinfo, rxerr, desc, \
-                                    psloc, sopoff, qmgr, qnum) \
-       (((einfo & 1) << 30)  | \
-        ((psinfo & 1) << 29) | \
-        ((rxerr & 1) << 28)  | \
-        ((desc & 3) << 26)   | \
-        ((psloc & 1) << 25)  | \
-        ((sopoff & 0x1ff) << 16) | \
-        ((qmgr & 3) << 12)   | \
-        ((qnum & 0xfff) << 0))
-
-#define CPDMA_REG_VAL_MAKE_RX_FLOW_D(fd0qm, fd0qnum, fd1qm, fd1qnum) \
-       (((fd0qm & 3) << 28)  | \
-        ((fd0qnum & 0xfff) << 16) | \
-        ((fd1qm & 3) << 12)  | \
-        ((fd1qnum & 0xfff) <<  0))
-
-#define CPDMA_CHAN_A_ENABLE ((u32)1 << 31)
-#define CPDMA_CHAN_A_TDOWN  (1 << 30)
-#define TDOWN_TIMEOUT_COUNT  100
-
-struct global_ctl_regs {
-       u32     revision;
-       u32     perf_control;
-       u32     emulation_control;
-       u32     priority_control;
-       u32     qm_base_addr[4];
-};
-
-struct tx_chan_regs {
-       u32     cfg_a;
-       u32     cfg_b;
-       u32     res[6];
-};
-
-struct rx_chan_regs {
-       u32     cfg_a;
-       u32     res[7];
-};
-
-struct rx_flow_regs {
-       u32     control;
-       u32     tags;
-       u32     tag_sel;
-       u32     fdq_sel[2];
-       u32     thresh[3];
-};
-
-struct pktdma_cfg {
-       struct global_ctl_regs  *global;
-       struct tx_chan_regs     *tx_ch;
-       u32                     tx_ch_num;
-       struct rx_chan_regs     *rx_ch;
-       u32                     rx_ch_num;
-       u32                     *tx_sched;
-       struct rx_flow_regs     *rx_flows;
-       u32                     rx_flow_num;
-
-       u32                     rx_free_q;
-       u32                     rx_rcv_q;
-       u32                     tx_snd_q;
-
-       u32                     rx_flow; /* flow that is used for RX */
-};
-
-/*
- * packet dma user allocates memory for rx buffers
- * and describe it in the following structure
- */
-struct rx_buff_desc {
-       u8      *buff_ptr;
-       u32     num_buffs;
-       u32     buff_len;
-       u32     rx_flow;
-};
-
-int netcp_close(void);
-int netcp_init(struct rx_buff_desc *rx_buffers);
-int netcp_send(u32 *pkt, int num_bytes, u32 swinfo2);
-void *netcp_recv(u32 **pkt, int *num_bytes);
-void netcp_release_rxhd(void *hd);
-
-#endif  /* _KEYSTONE_NAV_H_ */
diff --git a/arch/arm/include/asm/ti-common/keystone_nav.h b/arch/arm/include/asm/ti-common/keystone_nav.h
new file mode 100644 (file)
index 0000000..646c2f3
--- /dev/null
@@ -0,0 +1,189 @@
+/*
+ * Multicore Navigator definitions
+ *
+ * (C) Copyright 2012-2014
+ *     Texas Instruments Incorporated, <www.ti.com>
+ *
+ * SPDX-License-Identifier:     GPL-2.0+
+ */
+
+#ifndef _KEYSTONE_NAV_H_
+#define _KEYSTONE_NAV_H_
+
+#include <asm/arch/hardware.h>
+#include <asm/io.h>
+
+#define QM_OK                    0
+#define QM_ERR                  -1
+#define QM_DESC_TYPE_HOST        0
+#define QM_DESC_PSINFO_IN_DESCR  0
+#define QM_DESC_DEFAULT_DESCINFO    (QM_DESC_TYPE_HOST << 30) | \
+                                       (QM_DESC_PSINFO_IN_DESCR << 22)
+
+/* Packet Info */
+#define QM_DESC_PINFO_EPIB              1
+#define QM_DESC_PINFO_RETURN_OWN        1
+#define QM_DESC_DEFAULT_PINFO           (QM_DESC_PINFO_EPIB << 31) | \
+                                       (QM_DESC_PINFO_RETURN_OWN << 15)
+
+struct qm_cfg_reg {
+       u32     revision;
+       u32     __pad1;
+       u32     divert;
+       u32     link_ram_base0;
+       u32     link_ram_size0;
+       u32     link_ram_base1;
+       u32     link_ram_size1;
+       u32     link_ram_base2;
+       u32     starvation[0];
+};
+
+struct descr_mem_setup_reg {
+       u32     base_addr;
+       u32     start_idx;
+       u32     desc_reg_size;
+       u32     _res0;
+};
+
+struct qm_reg_queue {
+       u32     entry_count;
+       u32     byte_count;
+       u32     packet_size;
+       u32     ptr_size_thresh;
+};
+
+struct qm_config {
+       /* QM module addresses */
+       u32     stat_cfg;       /* status and config            */
+       struct qm_reg_queue *queue;     /* management region    */
+       u32     mngr_vbusm;     /* management region (VBUSM)    */
+       u32     i_lram;         /* internal linking RAM         */
+       struct qm_reg_queue *proxy;
+       u32     status_ram;
+       struct qm_cfg_reg *mngr_cfg;
+                               /* Queue manager config region  */
+       u32     intd_cfg;       /* QMSS INTD config region      */
+       struct  descr_mem_setup_reg *desc_mem;
+                               /* descritor memory setup region*/
+       u32     region_num;
+       u32     pdsp_cmd;       /* PDSP1 command interface      */
+       u32     pdsp_ctl;       /* PDSP1 control registers      */
+       u32     pdsp_iram;
+       /* QM configuration parameters */
+
+       u32     qpool_num;      /* */
+};
+
+struct qm_host_desc {
+       u32 desc_info;
+       u32 tag_info;
+       u32 packet_info;
+       u32 buff_len;
+       u32 buff_ptr;
+       u32 next_bdptr;
+       u32 orig_buff_len;
+       u32 orig_buff_ptr;
+       u32 timestamp;
+       u32 swinfo[3];
+       u32 ps_data[20];
+};
+
+#define HDESC_NUM        256
+
+int    qm_init(void);
+void   qm_close(void);
+void   qm_push(struct qm_host_desc *hd, u32 qnum);
+struct qm_host_desc *qm_pop(u32 qnum);
+
+void   qm_buff_push(struct qm_host_desc *hd, u32 qnum,
+                    void *buff_ptr, u32 buff_len);
+
+struct qm_host_desc *qm_pop_from_free_pool(void);
+void   queue_close(u32 qnum);
+
+/*
+ * DMA API
+ */
+#define CPDMA_REG_VAL_MAKE_RX_FLOW_A(einfo, psinfo, rxerr, desc, \
+                                    psloc, sopoff, qmgr, qnum) \
+       (((einfo & 1) << 30)  | \
+        ((psinfo & 1) << 29) | \
+        ((rxerr & 1) << 28)  | \
+        ((desc & 3) << 26)   | \
+        ((psloc & 1) << 25)  | \
+        ((sopoff & 0x1ff) << 16) | \
+        ((qmgr & 3) << 12)   | \
+        ((qnum & 0xfff) << 0))
+
+#define CPDMA_REG_VAL_MAKE_RX_FLOW_D(fd0qm, fd0qnum, fd1qm, fd1qnum) \
+       (((fd0qm & 3) << 28)  | \
+        ((fd0qnum & 0xfff) << 16) | \
+        ((fd1qm & 3) << 12)  | \
+        ((fd1qnum & 0xfff) <<  0))
+
+#define CPDMA_CHAN_A_ENABLE ((u32)1 << 31)
+#define CPDMA_CHAN_A_TDOWN  (1 << 30)
+#define TDOWN_TIMEOUT_COUNT  100
+
+struct global_ctl_regs {
+       u32     revision;
+       u32     perf_control;
+       u32     emulation_control;
+       u32     priority_control;
+       u32     qm_base_addr[4];
+};
+
+struct tx_chan_regs {
+       u32     cfg_a;
+       u32     cfg_b;
+       u32     res[6];
+};
+
+struct rx_chan_regs {
+       u32     cfg_a;
+       u32     res[7];
+};
+
+struct rx_flow_regs {
+       u32     control;
+       u32     tags;
+       u32     tag_sel;
+       u32     fdq_sel[2];
+       u32     thresh[3];
+};
+
+struct pktdma_cfg {
+       struct global_ctl_regs  *global;
+       struct tx_chan_regs     *tx_ch;
+       u32                     tx_ch_num;
+       struct rx_chan_regs     *rx_ch;
+       u32                     rx_ch_num;
+       u32                     *tx_sched;
+       struct rx_flow_regs     *rx_flows;
+       u32                     rx_flow_num;
+
+       u32                     rx_free_q;
+       u32                     rx_rcv_q;
+       u32                     tx_snd_q;
+
+       u32                     rx_flow; /* flow that is used for RX */
+};
+
+/*
+ * packet dma user allocates memory for rx buffers
+ * and describe it in the following structure
+ */
+struct rx_buff_desc {
+       u8      *buff_ptr;
+       u32     num_buffs;
+       u32     buff_len;
+       u32     rx_flow;
+};
+
+int netcp_close(void);
+int netcp_init(struct rx_buff_desc *rx_buffers);
+int netcp_send(u32 *pkt, int num_bytes, u32 swinfo2);
+void *netcp_recv(u32 **pkt, int *num_bytes);
+void netcp_release_rxhd(void *hd);
+
+#endif  /* _KEYSTONE_NAV_H_ */
index a79c3919ddc7d452a41ca8bc8676b2c97c88ad54..519dd0d3475f083ec9a8887565651e023ec909da 100644 (file)
@@ -8,3 +8,4 @@
 obj-$(CONFIG_FSLDMAFEC) += MCD_tasksInit.o MCD_dmaApi.o MCD_tasks.o
 obj-$(CONFIG_APBH_DMA) += apbh_dma.o
 obj-$(CONFIG_FSL_DMA) += fsl_dma.o
+obj-$(CONFIG_TI_KSNAV) += keystone_nav.o
diff --git a/drivers/dma/keystone_nav.c b/drivers/dma/keystone_nav.c
new file mode 100644 (file)
index 0000000..d960fbb
--- /dev/null
@@ -0,0 +1,354 @@
+/*
+ * Multicore Navigator driver for TI Keystone 2 devices.
+ *
+ * (C) Copyright 2012-2014
+ *     Texas Instruments Incorporated, <www.ti.com>
+ *
+ * SPDX-License-Identifier:     GPL-2.0+
+ */
+#include <common.h>
+#include <asm/io.h>
+#include <asm/ti-common/keystone_nav.h>
+
+struct qm_config qm_memmap = {
+       .stat_cfg       = CONFIG_KSNAV_QM_QUEUE_STATUS_BASE,
+       .queue          = (void *)CONFIG_KSNAV_QM_MANAGER_QUEUES_BASE,
+       .mngr_vbusm     = CONFIG_KSNAV_QM_BASE_ADDRESS,
+       .i_lram         = CONFIG_KSNAV_QM_LINK_RAM_BASE,
+       .proxy          = (void *)CONFIG_KSNAV_QM_MANAGER_Q_PROXY_BASE,
+       .status_ram     = CONFIG_KSNAV_QM_STATUS_RAM_BASE,
+       .mngr_cfg       = (void *)CONFIG_KSNAV_QM_CONF_BASE,
+       .intd_cfg       = CONFIG_KSNAV_QM_INTD_CONF_BASE,
+       .desc_mem       = (void *)CONFIG_KSNAV_QM_DESC_SETUP_BASE,
+       .region_num     = CONFIG_KSNAV_QM_REGION_NUM,
+       .pdsp_cmd       = CONFIG_KSNAV_QM_PDSP1_CMD_BASE,
+       .pdsp_ctl       = CONFIG_KSNAV_QM_PDSP1_CTRL_BASE,
+       .pdsp_iram      = CONFIG_KSNAV_QM_PDSP1_IRAM_BASE,
+       .qpool_num      = CONFIG_KSNAV_QM_QPOOL_NUM,
+};
+
+/*
+ * We are going to use only one type of descriptors - host packet
+ * descriptors. We staticaly allocate memory for them here
+ */
+struct qm_host_desc desc_pool[HDESC_NUM] __aligned(sizeof(struct qm_host_desc));
+
+static struct qm_config *qm_cfg;
+
+inline int num_of_desc_to_reg(int num_descr)
+{
+       int j, num;
+
+       for (j = 0, num = 32; j < 15; j++, num *= 2) {
+               if (num_descr <= num)
+                       return j;
+       }
+
+       return 15;
+}
+
+int _qm_init(struct qm_config *cfg)
+{
+       u32 j;
+
+       qm_cfg = cfg;
+
+       qm_cfg->mngr_cfg->link_ram_base0        = qm_cfg->i_lram;
+       qm_cfg->mngr_cfg->link_ram_size0        = HDESC_NUM * 8;
+       qm_cfg->mngr_cfg->link_ram_base1        = 0;
+       qm_cfg->mngr_cfg->link_ram_size1        = 0;
+       qm_cfg->mngr_cfg->link_ram_base2        = 0;
+
+       qm_cfg->desc_mem[0].base_addr = (u32)desc_pool;
+       qm_cfg->desc_mem[0].start_idx = 0;
+       qm_cfg->desc_mem[0].desc_reg_size =
+               (((sizeof(struct qm_host_desc) >> 4) - 1) << 16) |
+               num_of_desc_to_reg(HDESC_NUM);
+
+       memset(desc_pool, 0, sizeof(desc_pool));
+       for (j = 0; j < HDESC_NUM; j++)
+               qm_push(&desc_pool[j], qm_cfg->qpool_num);
+
+       return QM_OK;
+}
+
+int qm_init(void)
+{
+       return _qm_init(&qm_memmap);
+}
+
+void qm_close(void)
+{
+       u32     j;
+
+       if (qm_cfg == NULL)
+               return;
+
+       queue_close(qm_cfg->qpool_num);
+
+       qm_cfg->mngr_cfg->link_ram_base0        = 0;
+       qm_cfg->mngr_cfg->link_ram_size0        = 0;
+       qm_cfg->mngr_cfg->link_ram_base1        = 0;
+       qm_cfg->mngr_cfg->link_ram_size1        = 0;
+       qm_cfg->mngr_cfg->link_ram_base2        = 0;
+
+       for (j = 0; j < qm_cfg->region_num; j++) {
+               qm_cfg->desc_mem[j].base_addr = 0;
+               qm_cfg->desc_mem[j].start_idx = 0;
+               qm_cfg->desc_mem[j].desc_reg_size = 0;
+       }
+
+       qm_cfg = NULL;
+}
+
+void qm_push(struct qm_host_desc *hd, u32 qnum)
+{
+       u32 regd;
+
+       if (!qm_cfg)
+               return;
+
+       cpu_to_bus((u32 *)hd, sizeof(struct qm_host_desc)/4);
+       regd = (u32)hd | ((sizeof(struct qm_host_desc) >> 4) - 1);
+       writel(regd, &qm_cfg->queue[qnum].ptr_size_thresh);
+}
+
+void qm_buff_push(struct qm_host_desc *hd, u32 qnum,
+                   void *buff_ptr, u32 buff_len)
+{
+       hd->orig_buff_len = buff_len;
+       hd->buff_len = buff_len;
+       hd->orig_buff_ptr = (u32)buff_ptr;
+       hd->buff_ptr = (u32)buff_ptr;
+       qm_push(hd, qnum);
+}
+
+struct qm_host_desc *qm_pop(u32 qnum)
+{
+       u32 uhd;
+
+       if (!qm_cfg)
+               return NULL;
+
+       uhd = readl(&qm_cfg->queue[qnum].ptr_size_thresh) & ~0xf;
+       if (uhd)
+               cpu_to_bus((u32 *)uhd, sizeof(struct qm_host_desc)/4);
+
+       return (struct qm_host_desc *)uhd;
+}
+
+struct qm_host_desc *qm_pop_from_free_pool(void)
+{
+       if (!qm_cfg)
+               return NULL;
+
+       return qm_pop(qm_cfg->qpool_num);
+}
+
+void queue_close(u32 qnum)
+{
+       struct qm_host_desc *hd;
+
+       while ((hd = qm_pop(qnum)))
+               ;
+}
+
+/**
+ * DMA API
+ */
+struct pktdma_cfg netcp_pktdma = {
+       .global         = (void *)CONFIG_KSNAV_NETCP_PDMA_CTRL_BASE,
+       .tx_ch          = (void *)CONFIG_KSNAV_NETCP_PDMA_TX_BASE,
+       .tx_ch_num      = CONFIG_KSNAV_NETCP_PDMA_TX_CH_NUM,
+       .rx_ch          = (void *)CONFIG_KSNAV_NETCP_PDMA_RX_BASE,
+       .rx_ch_num      = CONFIG_KSNAV_NETCP_PDMA_RX_CH_NUM,
+       .tx_sched       = (u32 *)CONFIG_KSNAV_NETCP_PDMA_SCHED_BASE,
+       .rx_flows       = (void *)CONFIG_KSNAV_NETCP_PDMA_RX_FLOW_BASE,
+       .rx_flow_num    = CONFIG_KSNAV_NETCP_PDMA_RX_FLOW_NUM,
+       .rx_free_q      = CONFIG_KSNAV_NETCP_PDMA_RX_FREE_QUEUE,
+       .rx_rcv_q       = CONFIG_KSNAV_NETCP_PDMA_RX_RCV_QUEUE,
+       .tx_snd_q       = CONFIG_KSNAV_NETCP_PDMA_TX_SND_QUEUE,
+};
+
+struct pktdma_cfg *netcp;
+
+static int netcp_rx_disable(void)
+{
+       u32 j, v, k;
+
+       for (j = 0; j < netcp->rx_ch_num; j++) {
+               v = readl(&netcp->rx_ch[j].cfg_a);
+               if (!(v & CPDMA_CHAN_A_ENABLE))
+                       continue;
+
+               writel(v | CPDMA_CHAN_A_TDOWN, &netcp->rx_ch[j].cfg_a);
+               for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) {
+                       udelay(100);
+                       v = readl(&netcp->rx_ch[j].cfg_a);
+                       if (!(v & CPDMA_CHAN_A_ENABLE))
+                               continue;
+               }
+               /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */
+       }
+
+       /* Clear all of the flow registers */
+       for (j = 0; j < netcp->rx_flow_num; j++) {
+               writel(0, &netcp->rx_flows[j].control);
+               writel(0, &netcp->rx_flows[j].tags);
+               writel(0, &netcp->rx_flows[j].tag_sel);
+               writel(0, &netcp->rx_flows[j].fdq_sel[0]);
+               writel(0, &netcp->rx_flows[j].fdq_sel[1]);
+               writel(0, &netcp->rx_flows[j].thresh[0]);
+               writel(0, &netcp->rx_flows[j].thresh[1]);
+               writel(0, &netcp->rx_flows[j].thresh[2]);
+       }
+
+       return QM_OK;
+}
+
+static int netcp_tx_disable(void)
+{
+       u32 j, v, k;
+
+       for (j = 0; j < netcp->tx_ch_num; j++) {
+               v = readl(&netcp->tx_ch[j].cfg_a);
+               if (!(v & CPDMA_CHAN_A_ENABLE))
+                       continue;
+
+               writel(v | CPDMA_CHAN_A_TDOWN, &netcp->tx_ch[j].cfg_a);
+               for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) {
+                       udelay(100);
+                       v = readl(&netcp->tx_ch[j].cfg_a);
+                       if (!(v & CPDMA_CHAN_A_ENABLE))
+                               continue;
+               }
+               /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */
+       }
+
+       return QM_OK;
+}
+
+static int _netcp_init(struct pktdma_cfg *netcp_cfg,
+                      struct rx_buff_desc *rx_buffers)
+{
+       u32 j, v;
+       struct qm_host_desc *hd;
+       u8 *rx_ptr;
+
+       if (netcp_cfg == NULL || rx_buffers == NULL ||
+           rx_buffers->buff_ptr == NULL || qm_cfg == NULL)
+               return QM_ERR;
+
+       netcp = netcp_cfg;
+       netcp->rx_flow = rx_buffers->rx_flow;
+
+       /* init rx queue */
+       rx_ptr = rx_buffers->buff_ptr;
+
+       for (j = 0; j < rx_buffers->num_buffs; j++) {
+               hd = qm_pop(qm_cfg->qpool_num);
+               if (hd == NULL)
+                       return QM_ERR;
+
+               qm_buff_push(hd, netcp->rx_free_q,
+                            rx_ptr, rx_buffers->buff_len);
+
+               rx_ptr += rx_buffers->buff_len;
+       }
+
+       netcp_rx_disable();
+
+       /* configure rx channels */
+       v = CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, netcp->rx_rcv_q);
+       writel(v, &netcp->rx_flows[netcp->rx_flow].control);
+       writel(0, &netcp->rx_flows[netcp->rx_flow].tags);
+       writel(0, &netcp->rx_flows[netcp->rx_flow].tag_sel);
+
+       v = CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, netcp->rx_free_q, 0,
+                                        netcp->rx_free_q);
+
+       writel(v, &netcp->rx_flows[netcp->rx_flow].fdq_sel[0]);
+       writel(v, &netcp->rx_flows[netcp->rx_flow].fdq_sel[1]);
+       writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[0]);
+       writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[1]);
+       writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[2]);
+
+       for (j = 0; j < netcp->rx_ch_num; j++)
+               writel(CPDMA_CHAN_A_ENABLE, &netcp->rx_ch[j].cfg_a);
+
+       /* configure tx channels */
+       /* Disable loopback in the tx direction */
+       writel(0, &netcp->global->emulation_control);
+
+       /* Set QM base address, only for K2x devices */
+       writel(CONFIG_KSNAV_QM_BASE_ADDRESS, &netcp->global->qm_base_addr[0]);
+
+       /* Enable all channels. The current state isn't important */
+       for (j = 0; j < netcp->tx_ch_num; j++)  {
+               writel(0, &netcp->tx_ch[j].cfg_b);
+               writel(CPDMA_CHAN_A_ENABLE, &netcp->tx_ch[j].cfg_a);
+       }
+
+       return QM_OK;
+}
+
+int netcp_init(struct rx_buff_desc *rx_buffers)
+{
+       return _netcp_init(&netcp_pktdma, rx_buffers);
+}
+
+int netcp_close(void)
+{
+       if (!netcp)
+               return QM_ERR;
+
+       netcp_tx_disable();
+       netcp_rx_disable();
+
+       queue_close(netcp->rx_free_q);
+       queue_close(netcp->rx_rcv_q);
+       queue_close(netcp->tx_snd_q);
+
+       return QM_OK;
+}
+
+int netcp_send(u32 *pkt, int num_bytes, u32 swinfo2)
+{
+       struct qm_host_desc *hd;
+
+       hd = qm_pop(qm_cfg->qpool_num);
+       if (hd == NULL)
+               return QM_ERR;
+
+       hd->desc_info   = num_bytes;
+       hd->swinfo[2]   = swinfo2;
+       hd->packet_info = qm_cfg->qpool_num;
+
+       qm_buff_push(hd, netcp->tx_snd_q, pkt, num_bytes);
+
+       return QM_OK;
+}
+
+void *netcp_recv(u32 **pkt, int *num_bytes)
+{
+       struct qm_host_desc *hd;
+
+       hd = qm_pop(netcp->rx_rcv_q);
+       if (!hd)
+               return NULL;
+
+       *pkt = (u32 *)hd->buff_ptr;
+       *num_bytes = hd->desc_info & 0x3fffff;
+
+       return hd;
+}
+
+void netcp_release_rxhd(void *hd)
+{
+       struct qm_host_desc *_hd = (struct qm_host_desc *)hd;
+
+       _hd->buff_len = _hd->orig_buff_len;
+       _hd->buff_ptr = _hd->orig_buff_ptr;
+
+       qm_push(_hd, netcp->rx_free_q);
+}
index d22b722bc53de033f162171eee3da22b766a1db7..1cfe6542d89e07d1600b20c01cf5863091e32f97 100644 (file)
@@ -14,7 +14,7 @@
 #include <malloc.h>
 #include <asm/arch/emac_defs.h>
 #include <asm/arch/psc_defs.h>
-#include <asm/arch/keystone_nav.h>
+#include <asm/ti-common/keystone_nav.h>
 
 unsigned int emac_dbg;
 
index 8aa616da0e0d7ce717440e4eaa46fb04f71b891a..c3139075bc343254c11710f47323c7b2996acfcb 100644 (file)
@@ -36,5 +36,6 @@
 
 /* Network */
 #define CONFIG_DRIVER_TI_KEYSTONE_NET
+#define CONFIG_TI_KSNAV
 
 #endif /* __CONFIG_K2HK_EVM_H */
index 51926f721f17e6305d408dfe2e08d1816e97a5cd..60f8ffd5623a70ba574d67de1ea5161a3e128fef 100644 (file)
 #define CONFIG_SYS_SGMII_LINERATE_MHZ  1250
 #define CONFIG_SYS_SGMII_RATESCALE     2
 
+/* Keyston Navigator Configuration */
+#define CONFIG_KSNAV_QM_BASE_ADDRESS           KS2_QM_BASE_ADDRESS
+#define CONFIG_KSNAV_QM_CONF_BASE              KS2_QM_CONF_BASE
+#define CONFIG_KSNAV_QM_DESC_SETUP_BASE                KS2_QM_DESC_SETUP_BASE
+#define CONFIG_KSNAV_QM_STATUS_RAM_BASE                KS2_QM_STATUS_RAM_BASE
+#define CONFIG_KSNAV_QM_INTD_CONF_BASE         KS2_QM_INTD_CONF_BASE
+#define CONFIG_KSNAV_QM_PDSP1_CMD_BASE         KS2_QM_PDSP1_CMD_BASE
+#define CONFIG_KSNAV_QM_PDSP1_CTRL_BASE                KS2_QM_PDSP1_CTRL_BASE
+#define CONFIG_KSNAV_QM_PDSP1_IRAM_BASE                KS2_QM_PDSP1_IRAM_BASE
+#define CONFIG_KSNAV_QM_MANAGER_QUEUES_BASE    KS2_QM_MANAGER_QUEUES_BASE
+#define CONFIG_KSNAV_QM_MANAGER_Q_PROXY_BASE   KS2_QM_MANAGER_Q_PROXY_BASE
+#define CONFIG_KSNAV_QM_QUEUE_STATUS_BASE      KS2_QM_QUEUE_STATUS_BASE
+#define CONFIG_KSNAV_QM_LINK_RAM_BASE          KS2_QM_LINK_RAM_BASE
+#define CONFIG_KSNAV_QM_REGION_NUM             KS2_QM_REGION_NUM
+#define CONFIG_KSNAV_QM_QPOOL_NUM              KS2_QM_QPOOL_NUM
+
+/* NETCP pktdma */
+#define CONFIG_KSNAV_NETCP_PDMA_CTRL_BASE      KS2_NETCP_PDMA_CTRL_BASE
+#define CONFIG_KSNAV_NETCP_PDMA_TX_BASE                KS2_NETCP_PDMA_TX_BASE
+#define CONFIG_KSNAV_NETCP_PDMA_TX_CH_NUM      KS2_NETCP_PDMA_TX_CH_NUM
+#define CONFIG_KSNAV_NETCP_PDMA_RX_BASE                KS2_NETCP_PDMA_RX_BASE
+#define CONFIG_KSNAV_NETCP_PDMA_RX_CH_NUM      KS2_NETCP_PDMA_RX_CH_NUM
+#define CONFIG_KSNAV_NETCP_PDMA_SCHED_BASE     KS2_NETCP_PDMA_SCHED_BASE
+#define CONFIG_KSNAV_NETCP_PDMA_RX_FLOW_BASE   KS2_NETCP_PDMA_RX_FLOW_BASE
+#define CONFIG_KSNAV_NETCP_PDMA_RX_FLOW_NUM    KS2_NETCP_PDMA_RX_FLOW_NUM
+#define CONFIG_KSNAV_NETCP_PDMA_RX_FREE_QUEUE  KS2_NETCP_PDMA_RX_FREE_QUEUE
+#define CONFIG_KSNAV_NETCP_PDMA_RX_RCV_QUEUE   KS2_NETCP_PDMA_RX_RCV_QUEUE
+#define CONFIG_KSNAV_NETCP_PDMA_TX_SND_QUEUE   KS2_NETCP_PDMA_TX_SND_QUEUE
+
 /* AEMIF */
 #define CONFIG_TI_AEMIF
 #define CONFIG_AEMIF_CNTRL_BASE                KS2_AEMIF_CNTRL_BASE