int error = 0;
#ifdef CONFIG_FSL_MC_ENET
- error = mc_init(bis);
+ error = fsl_mc_ldpaa_init(bis);
#endif
return error;
}
#define _ASM_ARMV8_FSL_LSCH3_CONFIG_
#include <fsl_ddrc_version.h>
+
+#define CONFIG_SYS_PAGE_SIZE 0x10000
#define CONFIG_MP
#define CONFIG_SYS_FSL_OCRAM_BASE 0x18000000 /* initial RAM */
/* Link Definitions */
#ifdef CONFIG_FSL_MC_ENET
fdt_fixup_board_enet(blob);
+ fsl_mc_ldpaa_exit(bd);
#endif
return 0;
# Layerscape MC driver
obj-y += mc.o \
mc_sys.o \
- dpmng.o
+ dpmng.o \
+ dprc.o \
+ dpbp.o \
+ dpni.o
+obj-y += dpio/
--- /dev/null
+/*
+ * Freescale Layerscape MC I/O wrapper
+ *
+ * Copyright (C) 2013-2015 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+#include <fsl-mc/fsl_mc_sys.h>
+#include <fsl-mc/fsl_mc_cmd.h>
+#include <fsl-mc/fsl_dpbp.h>
+
+int dpbp_open(struct fsl_mc_io *mc_io, int dpbp_id, uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
+ MC_CMD_PRI_LOW, 0);
+ DPBP_CMD_OPEN(cmd, dpbp_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+
+ return err;
+}
+
+int dpbp_close(struct fsl_mc_io *mc_io, uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, MC_CMD_PRI_HIGH,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpbp_enable(struct fsl_mc_io *mc_io, uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, MC_CMD_PRI_LOW,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpbp_disable(struct fsl_mc_io *mc_io, uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE,
+ MC_CMD_PRI_LOW, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpbp_reset(struct fsl_mc_io *mc_io, uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET,
+ MC_CMD_PRI_LOW, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dpbp_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR,
+ MC_CMD_PRI_LOW, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPBP_RSP_GET_ATTRIBUTES(cmd, attr);
+
+ return 0;
+}
--- /dev/null
+#
+# Copyright 2014 Freescale Semiconductor, Inc.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+# Layerscape MC DPIO driver
+obj-y += dpio.o \
+ qbman_portal.o
--- /dev/null
+/*
+ * Copyright (C) 2013-2015 Freescale Semiconductor
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <fsl-mc/fsl_mc_sys.h>
+#include <fsl-mc/fsl_mc_cmd.h>
+#include <fsl-mc/fsl_dpio.h>
+
+int dpio_open(struct fsl_mc_io *mc_io, int dpio_id, uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
+ MC_CMD_PRI_LOW, 0);
+ DPIO_CMD_OPEN(cmd, dpio_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+
+ return 0;
+}
+
+int dpio_close(struct fsl_mc_io *mc_io, uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
+ MC_CMD_PRI_HIGH, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpio_enable(struct fsl_mc_io *mc_io, uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
+ MC_CMD_PRI_LOW, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpio_disable(struct fsl_mc_io *mc_io, uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
+ MC_CMD_PRI_LOW,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpio_reset(struct fsl_mc_io *mc_io, uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET,
+ MC_CMD_PRI_LOW, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dpio_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR,
+ MC_CMD_PRI_LOW,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPIO_RSP_GET_ATTR(cmd, attr);
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Freescale Semiconductor
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include "qbman_portal.h"
+
+/* QBMan portal management command codes */
+#define QBMAN_MC_ACQUIRE 0x30
+#define QBMAN_WQCHAN_CONFIGURE 0x46
+
+/* CINH register offsets */
+#define QBMAN_CINH_SWP_EQAR 0x8c0
+#define QBMAN_CINH_SWP_DCAP 0xac0
+#define QBMAN_CINH_SWP_SDQCR 0xb00
+#define QBMAN_CINH_SWP_RAR 0xcc0
+
+/* CENA register offsets */
+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_CR 0x600
+#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
+#define QBMAN_CENA_SWP_VDQCR 0x780
+
+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0xff) >> 6)
+
+/*******************************/
+/* Pre-defined attribute codes */
+/*******************************/
+
+struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7);
+struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8);
+
+/*************************/
+/* SDQCR attribute codes */
+/*************************/
+
+/* we put these here because at least some of them are required by
+ * qbman_swp_init() */
+struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2);
+struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1);
+struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8);
+#define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1)
+enum qbman_sdqcr_dct {
+ qbman_sdqcr_dct_null = 0,
+ qbman_sdqcr_dct_prio_ics,
+ qbman_sdqcr_dct_active_ics,
+ qbman_sdqcr_dct_active
+};
+enum qbman_sdqcr_fc {
+ qbman_sdqcr_fc_one = 0,
+ qbman_sdqcr_fc_up_to_3 = 1
+};
+
+/*********************************/
+/* Portal constructor/destructor */
+/*********************************/
+
+/* Software portals should always be in the power-on state when we initialise,
+ * due to the CCSR-based portal reset functionality that MC has. */
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
+{
+ int ret;
+ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
+
+ if (!p)
+ return NULL;
+ p->desc = d;
+#ifdef QBMAN_CHECKING
+ p->mc.check = swp_mc_can_start;
+#endif
+ p->mc.valid_bit = QB_VALID_BIT;
+ p->sdq = 0;
+ qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics);
+ qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3);
+ qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb);
+ p->vdq.busy = 0; /* TODO: convert to atomic_t */
+ p->vdq.valid_bit = QB_VALID_BIT;
+ p->dqrr.next_idx = 0;
+ p->dqrr.valid_bit = QB_VALID_BIT;
+ ret = qbman_swp_sys_init(&p->sys, d);
+ if (ret) {
+ free(p);
+ printf("qbman_swp_sys_init() failed %d\n", ret);
+ return NULL;
+ }
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, p->sdq);
+ return p;
+}
+
+/***********************/
+/* Management commands */
+/***********************/
+
+/*
+ * Internal code common to all types of management commands.
+ */
+
+void *qbman_swp_mc_start(struct qbman_swp *p)
+{
+ void *ret;
+#ifdef QBMAN_CHECKING
+ BUG_ON(p->mc.check != swp_mc_can_start);
+#endif
+ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
+#ifdef QBMAN_CHECKING
+ if (!ret)
+ p->mc.check = swp_mc_can_submit;
+#endif
+ return ret;
+}
+
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb)
+{
+ uint32_t *v = cmd;
+#ifdef QBMAN_CHECKING
+ BUG_ON(!p->mc.check != swp_mc_can_submit);
+#endif
+ lwsync();
+ /* TBD: "|=" is going to hurt performance. Need to move as many fields
+ * out of word zero, and for those that remain, the "OR" needs to occur
+ * at the caller side. This debug check helps to catch cases where the
+ * caller wants to OR but has forgotten to do so. */
+ BUG_ON((*v & cmd_verb) != *v);
+ *v = cmd_verb | p->mc.valid_bit;
+ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
+ /* TODO: add prefetch support for GPP */
+#ifdef QBMAN_CHECKING
+ p->mc.check = swp_mc_can_poll;
+#endif
+}
+
+void *qbman_swp_mc_result(struct qbman_swp *p)
+{
+ uint32_t *ret, verb;
+#ifdef QBMAN_CHECKING
+ BUG_ON(p->mc.check != swp_mc_can_poll);
+#endif
+ ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+ /* Remove the valid-bit - command completed iff the rest is non-zero */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+#ifdef QBMAN_CHECKING
+ p->mc.check = swp_mc_can_start;
+#endif
+ p->mc.valid_bit ^= QB_VALID_BIT;
+ return ret;
+}
+
+/***********/
+/* Enqueue */
+/***********/
+
+/* These should be const, eventually */
+static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2);
+static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1);
+static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24);
+/* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */
+static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1);
+static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16);
+static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4);
+static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1);
+static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32);
+static struct qb_attr_code code_eq_rsp_hi = QB_CODE(7, 0, 32);
+
+enum qbman_eq_cmd_e {
+ /* No enqueue, primarily for plugging ORP gaps for dropped frames */
+ qbman_eq_cmd_empty,
+ /* DMA an enqueue response once complete */
+ qbman_eq_cmd_respond,
+ /* DMA an enqueue response only if the enqueue fails */
+ qbman_eq_cmd_respond_reject
+};
+
+void qbman_eq_desc_clear(struct qbman_eq_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+}
+
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_eq_orp_en, cl, 0);
+ qb_attr_code_encode(&code_eq_cmd, cl,
+ respond_success ? qbman_eq_cmd_respond :
+ qbman_eq_cmd_respond_reject);
+}
+
+void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
+ dma_addr_t storage_phys,
+ int stash)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_eq_rsp_lo, cl, lower32(storage_phys));
+ qb_attr_code_encode(&code_eq_rsp_hi, cl, upper32(storage_phys));
+ qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash);
+}
+
+
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
+ uint32_t qd_bin, uint32_t qd_prio)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_eq_qd_en, cl, 1);
+ qb_attr_code_encode(&code_eq_tgt_id, cl, qdid);
+ qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin);
+ qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio);
+}
+
+#define EQAR_IDX(eqar) ((eqar) & 0x7)
+#define EQAR_VB(eqar) ((eqar) & 0x80)
+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
+
+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
+ debug("EQAR=%08x\n", eqar);
+ if (!EQAR_SUCCESS(eqar))
+ return -EBUSY;
+ p = qbman_cena_write_start(&s->sys,
+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ word_copy(&p[1], &cl[1], 7);
+ word_copy(&p[8], fd, sizeof(*fd) >> 2);
+ lwsync();
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p[0] = cl[0] | EQAR_VB(eqar);
+ qbman_cena_write_complete(&s->sys,
+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)),
+ p);
+ return 0;
+}
+
+/***************************/
+/* Volatile (pull) dequeue */
+/***************************/
+
+/* These should be const, eventually */
+static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2);
+static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2);
+static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1);
+static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1);
+static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4);
+static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8);
+static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24);
+static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32);
+static struct qb_attr_code code_pull_rsp_hi = QB_CODE(3, 0, 32);
+
+enum qb_pull_dt_e {
+ qb_pull_dt_channel,
+ qb_pull_dt_workqueue,
+ qb_pull_dt_framequeue
+};
+
+void qbman_pull_desc_clear(struct qbman_pull_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+}
+
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+ struct ldpaa_dq *storage,
+ dma_addr_t storage_phys,
+ int stash)
+{
+ uint32_t *cl = qb_cl(d);
+
+ /* Squiggle the pointer 'storage' into the extra 2 words of the
+ * descriptor (which aren't copied to the hw command) */
+ *(void **)&cl[4] = storage;
+ if (!storage) {
+ qb_attr_code_encode(&code_pull_rls, cl, 0);
+ return;
+ }
+ qb_attr_code_encode(&code_pull_rls, cl, 1);
+ qb_attr_code_encode(&code_pull_stash, cl, !!stash);
+ qb_attr_code_encode(&code_pull_rsp_lo, cl, lower32(storage_phys));
+ qb_attr_code_encode(&code_pull_rsp_hi, cl, upper32(storage_phys));
+}
+
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
+{
+ uint32_t *cl = qb_cl(d);
+
+ BUG_ON(!numframes || (numframes > 16));
+ qb_attr_code_encode(&code_pull_numframes, cl,
+ (uint32_t)(numframes - 1));
+}
+
+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_pull_token, cl, token);
+}
+
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_pull_dct, cl, 1);
+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue);
+ qb_attr_code_encode(&code_pull_dqsource, cl, fqid);
+}
+
+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
+{
+ uint32_t *p;
+ uint32_t *cl = qb_cl(d);
+
+ /* TODO: convert to atomic_t */
+ if (s->vdq.busy)
+ return -EBUSY;
+ s->vdq.busy = 1;
+ s->vdq.storage = *(void **)&cl[4];
+ s->vdq.token = qb_attr_code_decode(&code_pull_token, cl);
+ p = qbman_cena_write_start(&s->sys, QBMAN_CENA_SWP_VDQCR);
+ word_copy(&p[1], &cl[1], 3);
+ lwsync();
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p[0] = cl[0] | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+ qbman_cena_write_complete(&s->sys, QBMAN_CENA_SWP_VDQCR, p);
+ return 0;
+}
+
+/****************/
+/* Polling DQRR */
+/****************/
+
+static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8);
+static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7);
+static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8);
+
+#define QBMAN_DQRR_RESPONSE_DQ 0x60
+#define QBMAN_DQRR_RESPONSE_FQRN 0x21
+#define QBMAN_DQRR_RESPONSE_FQRNI 0x22
+#define QBMAN_DQRR_RESPONSE_FQPN 0x24
+#define QBMAN_DQRR_RESPONSE_FQDAN 0x25
+#define QBMAN_DQRR_RESPONSE_CDAN 0x26
+#define QBMAN_DQRR_RESPONSE_CSCN_MEM 0x27
+#define QBMAN_DQRR_RESPONSE_CGCU 0x28
+#define QBMAN_DQRR_RESPONSE_BPSCN 0x29
+#define QBMAN_DQRR_RESPONSE_CSCN_WQ 0x2a
+
+
+/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order. */
+const struct ldpaa_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
+{
+ uint32_t verb;
+ uint32_t response_verb;
+ const struct ldpaa_dq *dq = qbman_cena_read(&s->sys,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ const uint32_t *p = qb_cl(dq);
+
+ verb = qb_attr_code_decode(&code_dqrr_verb, p);
+ /* If the valid-bit isn't of the expected polarity, nothing there */
+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
+ qbman_cena_invalidate_prefetch(&s->sys,
+ QBMAN_CENA_SWP_DQRR(
+ s->dqrr.next_idx));
+ return NULL;
+ }
+ /* There's something there. Move "next_idx" attention to the next ring
+ * entry (and prefetch it) before returning what we found. */
+ s->dqrr.next_idx++;
+ s->dqrr.next_idx &= 3; /* Wrap around at 4 */
+ /* TODO: it's possible to do all this without conditionals, optimise it
+ * later. */
+ if (!s->dqrr.next_idx)
+ s->dqrr.valid_bit ^= QB_VALID_BIT;
+ /* VDQCR "no longer busy" hook - if VDQCR shows "busy" and this is a
+ * VDQCR result, mark it as non-busy. */
+ if (s->vdq.busy) {
+ uint32_t flags = ldpaa_dq_flags(dq);
+
+ response_verb = qb_attr_code_decode(&code_dqrr_response, &verb);
+ if ((response_verb == QBMAN_DQRR_RESPONSE_DQ) &&
+ (flags & LDPAA_DQ_STAT_VOLATILE))
+ s->vdq.busy = 0;
+ }
+ qbman_cena_invalidate_prefetch(&s->sys,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ return dq;
+}
+
+/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct ldpaa_dq *dq)
+{
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
+}
+
+/*********************************/
+/* Polling user-provided storage */
+/*********************************/
+
+void qbman_dq_entry_set_oldtoken(struct ldpaa_dq *dq,
+ unsigned int num_entries,
+ uint8_t oldtoken)
+{
+ memset(dq, oldtoken, num_entries * sizeof(*dq));
+}
+
+int qbman_dq_entry_has_newtoken(struct qbman_swp *s,
+ const struct ldpaa_dq *dq,
+ uint8_t newtoken)
+{
+ /* To avoid converting the little-endian DQ entry to host-endian prior
+ * to us knowing whether there is a valid entry or not (and run the
+ * risk of corrupting the incoming hardware LE write), we detect in
+ * hardware endianness rather than host. This means we need a different
+ * "code" depending on whether we are BE or LE in software, which is
+ * where DQRR_TOK_OFFSET comes in... */
+ static struct qb_attr_code code_dqrr_tok_detect =
+ QB_CODE(0, DQRR_TOK_OFFSET, 8);
+ /* The user trying to poll for a result treats "dq" as const. It is
+ * however the same address that was provided to us non-const in the
+ * first place, for directing hardware DMA to. So we can cast away the
+ * const because it is mutable from our perspective. */
+ uint32_t *p = qb_cl((struct ldpaa_dq *)dq);
+ uint32_t token;
+
+ token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
+ if (token != newtoken)
+ return 0;
+
+ /* Only now do we convert from hardware to host endianness. Also, as we
+ * are returning success, the user has promised not to call us again, so
+ * there's no risk of us converting the endianness twice... */
+ make_le32_n(p, 16);
+
+ /* VDQCR "no longer busy" hook - not quite the same as DQRR, because the
+ * fact "VDQCR" shows busy doesn't mean that the result we're looking at
+ * is from the same command. Eg. we may be looking at our 10th dequeue
+ * result from our first VDQCR command, yet the second dequeue command
+ * could have been kicked off already, after seeing the 1st result. Ie.
+ * the result we're looking at is not necessarily proof that we can
+ * reset "busy". We instead base the decision on whether the current
+ * result is sitting at the first 'storage' location of the busy
+ * command. */
+ if (s->vdq.busy && (s->vdq.storage == dq))
+ s->vdq.busy = 0;
+ return 1;
+}
+
+/********************************/
+/* Categorising dequeue entries */
+/********************************/
+
+static inline int __qbman_dq_entry_is_x(const struct ldpaa_dq *dq, uint32_t x)
+{
+ const uint32_t *p = qb_cl(dq);
+ uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p);
+
+ return response_verb == x;
+}
+
+int qbman_dq_entry_is_DQ(const struct ldpaa_dq *dq)
+{
+ return __qbman_dq_entry_is_x(dq, QBMAN_DQRR_RESPONSE_DQ);
+}
+
+/*********************************/
+/* Parsing frame dequeue results */
+/*********************************/
+
+/* These APIs assume qbman_dq_entry_is_DQ() is TRUE */
+
+uint32_t ldpaa_dq_flags(const struct ldpaa_dq *dq)
+{
+ const uint32_t *p = qb_cl(dq);
+
+ return qb_attr_code_decode(&code_dqrr_stat, p);
+}
+
+const struct dpaa_fd *ldpaa_dq_fd(const struct ldpaa_dq *dq)
+{
+ const uint32_t *p = qb_cl(dq);
+
+ return (const struct dpaa_fd *)&p[8];
+}
+
+/******************/
+/* Buffer release */
+/******************/
+
+/* These should be const, eventually */
+/* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */
+static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1);
+static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16);
+
+void qbman_release_desc_clear(struct qbman_release_desc *d)
+{
+ uint32_t *cl;
+
+ memset(d, 0, sizeof(*d));
+ cl = qb_cl(d);
+ qb_attr_code_encode(&code_release_set_me, cl, 1);
+}
+
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_release_bpid, cl, bpid);
+}
+
+#define RAR_IDX(rar) ((rar) & 0x7)
+#define RAR_VB(rar) ((rar) & 0x80)
+#define RAR_SUCCESS(rar) ((rar) & 0x100)
+
+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
+ const uint64_t *buffers, unsigned int num_buffers)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
+ debug("RAR=%08x\n", rar);
+ if (!RAR_SUCCESS(rar))
+ return -EBUSY;
+ BUG_ON(!num_buffers || (num_buffers > 7));
+ /* Start the release command */
+ p = qbman_cena_write_start(&s->sys,
+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+ /* Copy the caller's buffer pointers to the command */
+ u64_to_le32_copy(&p[2], buffers, num_buffers);
+ lwsync();
+ /* Set the verb byte, have to substitute in the valid-bit and the number
+ * of buffers. */
+ p[0] = cl[0] | RAR_VB(rar) | num_buffers;
+ qbman_cena_write_complete(&s->sys,
+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)),
+ p);
+ return 0;
+}
+
+/*******************/
+/* Buffer acquires */
+/*******************/
+
+/* These should be const, eventually */
+static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16);
+static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3);
+static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3);
+
+int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,
+ unsigned int num_buffers)
+{
+ uint32_t *p;
+ uint32_t verb, rslt, num;
+
+ BUG_ON(!num_buffers || (num_buffers > 7));
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+
+ if (!p)
+ return -EBUSY;
+
+ /* Encode the caller-provided attributes */
+ qb_attr_code_encode(&code_acquire_bpid, p, bpid);
+ qb_attr_code_encode(&code_acquire_num, p, num_buffers);
+
+ /* Complete the management command */
+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE);
+
+ /* Decode the outcome */
+ verb = qb_attr_code_decode(&code_generic_verb, p);
+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
+ num = qb_attr_code_decode(&code_acquire_r_num, p);
+ BUG_ON(verb != QBMAN_MC_ACQUIRE);
+
+ /* Determine success or failure */
+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
+ printf("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
+ bpid, rslt);
+ return -EIO;
+ }
+ BUG_ON(num > num_buffers);
+ /* Copy the acquired buffers to the caller's array */
+ u64_from_le32_copy(buffers, &p[2], num);
+ return (int)num;
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Freescale Semiconductor
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include "qbman_private.h"
+#include <fsl-mc/fsl_qbman_portal.h>
+#include <fsl-mc/fsl_dpaa_fd.h>
+
+/* All QBMan command and result structures use this "valid bit" encoding */
+#define QB_VALID_BIT ((uint32_t)0x80)
+
+/* Management command result codes */
+#define QBMAN_MC_RSLT_OK 0xf0
+
+/* --------------------- */
+/* portal data structure */
+/* --------------------- */
+
+struct qbman_swp {
+ const struct qbman_swp_desc *desc;
+ /* The qbman_sys (ie. arch/OS-specific) support code can put anything it
+ * needs in here. */
+ struct qbman_swp_sys sys;
+ /* Management commands */
+ struct {
+#ifdef QBMAN_CHECKING
+ enum swp_mc_check {
+ swp_mc_can_start, /* call __qbman_swp_mc_start() */
+ swp_mc_can_submit, /* call __qbman_swp_mc_submit() */
+ swp_mc_can_poll, /* call __qbman_swp_mc_result() */
+ } check;
+#endif
+ uint32_t valid_bit; /* 0x00 or 0x80 */
+ } mc;
+ /* Push dequeues */
+ uint32_t sdq;
+ /* Volatile dequeues */
+ struct {
+ /* VDQCR supports a "1 deep pipeline", meaning that if you know
+ * the last-submitted command is already executing in the
+ * hardware (as evidenced by at least 1 valid dequeue result),
+ * you can write another dequeue command to the register, the
+ * hardware will start executing it as soon as the
+ * already-executing command terminates. (This minimises latency
+ * and stalls.) With that in mind, this "busy" variable refers
+ * to whether or not a command can be submitted, not whether or
+ * not a previously-submitted command is still executing. In
+ * other words, once proof is seen that the previously-submitted
+ * command is executing, "vdq" is no longer "busy". TODO:
+ * convert this to "atomic_t" so that it is thread-safe (without
+ * locking). */
+ int busy;
+ uint32_t valid_bit; /* 0x00 or 0x80 */
+ /* We need to determine when vdq is no longer busy. This depends
+ * on whether the "busy" (last-submitted) dequeue command is
+ * targetting DQRR or main-memory, and detected is based on the
+ * presence of the dequeue command's "token" showing up in
+ * dequeue entries in DQRR or main-memory (respectively). Debug
+ * builds will, when submitting vdq commands, verify that the
+ * dequeue result location is not already equal to the command's
+ * token value. */
+ struct ldpaa_dq *storage; /* NULL if DQRR */
+ uint32_t token;
+ } vdq;
+ /* DQRR */
+ struct {
+ uint32_t next_idx;
+ uint32_t valid_bit;
+ } dqrr;
+};
+
+/* -------------------------- */
+/* portal management commands */
+/* -------------------------- */
+
+/* Different management commands all use this common base layer of code to issue
+ * commands and poll for results. The first function returns a pointer to where
+ * the caller should fill in their MC command (though they should ignore the
+ * verb byte), the second function commits merges in the caller-supplied command
+ * verb (which should not include the valid-bit) and submits the command to
+ * hardware, and the third function checks for a completed response (returns
+ * non-NULL if only if the response is complete). */
+void *qbman_swp_mc_start(struct qbman_swp *p);
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb);
+void *qbman_swp_mc_result(struct qbman_swp *p);
+
+/* Wraps up submit + poll-for-result */
+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
+ uint32_t cmd_verb)
+{
+ int loopvar;
+
+ qbman_swp_mc_submit(swp, cmd, cmd_verb);
+ DBG_POLL_START(loopvar);
+ do {
+ DBG_POLL_CHECK(loopvar);
+ cmd = qbman_swp_mc_result(swp);
+ } while (!cmd);
+ return cmd;
+}
+
+/* ------------ */
+/* qb_attr_code */
+/* ------------ */
+
+/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which
+ * is either serving as a configuration command or a query result. The
+ * representation is inherently little-endian, as the indexing of the words is
+ * itself little-endian in nature and layerscape is little endian for anything
+ * that crosses a word boundary too (64-bit fields are the obvious examples).
+ */
+struct qb_attr_code {
+ unsigned int word; /* which uint32_t[] array member encodes the field */
+ unsigned int lsoffset; /* encoding offset from ls-bit */
+ unsigned int width; /* encoding width. (bool must be 1.) */
+};
+
+/* Macros to define codes */
+#define QB_CODE(a, b, c) { a, b, c}
+
+/* decode a field from a cacheline */
+static inline uint32_t qb_attr_code_decode(const struct qb_attr_code *code,
+ const uint32_t *cacheline)
+{
+ return d32_uint32_t(code->lsoffset, code->width, cacheline[code->word]);
+}
+
+/* encode a field to a cacheline */
+static inline void qb_attr_code_encode(const struct qb_attr_code *code,
+ uint32_t *cacheline, uint32_t val)
+{
+ cacheline[code->word] =
+ r32_uint32_t(code->lsoffset, code->width, cacheline[code->word])
+ | e32_uint32_t(code->lsoffset, code->width, val);
+}
+
+/* ---------------------- */
+/* Descriptors/cachelines */
+/* ---------------------- */
+
+/* To avoid needless dynamic allocation, the driver API often gives the caller
+ * a "descriptor" type that the caller can instantiate however they like.
+ * Ultimately though, it is just a cacheline of binary storage (or something
+ * smaller when it is known that the descriptor doesn't need all 64 bytes) for
+ * holding pre-formatted pieces of harware commands. The performance-critical
+ * code can then copy these descriptors directly into hardware command
+ * registers more efficiently than trying to construct/format commands
+ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in
+ * order for the compiler to know its size, but the internal details are not
+ * exposed. The following macro is used within the driver for converting *any*
+ * descriptor pointer to a usable array pointer. The use of a macro (instead of
+ * an inline) is necessary to work with different descriptor types and to work
+ * correctly with const and non-const inputs (and similarly-qualified outputs).
+ */
+#define qb_cl(d) (&(d)->dont_manipulate_directly[0])
--- /dev/null
+/*
+ * Copyright (C) 2014 Freescale Semiconductor
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+/* Perform extra checking */
+#include <common.h>
+#include <errno.h>
+#include <asm/io.h>
+#include <linux/types.h>
+#include <linux/compat.h>
+#include <malloc.h>
+#include <fsl-mc/fsl_qbman_base.h>
+
+#define QBMAN_CHECKING
+
+/* Any time there is a register interface which we poll on, this provides a
+ * "break after x iterations" scheme for it. It's handy for debugging, eg.
+ * where you don't want millions of lines of log output from a polling loop
+ * that won't, because such things tend to drown out the earlier log output
+ * that might explain what caused the problem. (NB: put ";" after each macro!)
+ * TODO: we should probably remove this once we're done sanitising the
+ * simulator...
+ */
+#define DBG_POLL_START(loopvar) (loopvar = 10)
+#define DBG_POLL_CHECK(loopvar) \
+ do {if (!(loopvar--)) BUG_ON(NULL == "DBG_POLL_CHECK"); } while (0)
+
+/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets
+ * and widths, these macro-generated encode/decode/isolate/remove inlines can
+ * be used.
+ *
+ * Eg. to "d"ecode a 14-bit field out of a register (into a "uint16_t" type),
+ * where the field is located 3 bits "up" from the least-significant bit of the
+ * register (ie. the field location within the 32-bit register corresponds to a
+ * mask of 0x0001fff8), you would do;
+ * uint16_t field = d32_uint16_t(3, 14, reg_value);
+ *
+ * Or to "e"ncode a 1-bit boolean value (input type is "int", zero is FALSE,
+ * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the "!!"
+ * operator) into a register at bit location 0x00080000 (19 bits "in" from the
+ * LS bit), do;
+ * reg_value |= e32_int(19, 1, !!field);
+ *
+ * If you wish to read-modify-write a register, such that you leave the 14-bit
+ * field as-is but have all other fields set to zero, then "i"solate the 14-bit
+ * value using;
+ * reg_value = i32_uint16_t(3, 14, reg_value);
+ *
+ * Alternatively, you could "r"emove the 1-bit boolean field (setting it to
+ * zero) but leaving all other fields as-is;
+ * reg_val = r32_int(19, 1, reg_value);
+ *
+ */
+#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \
+ (uint32_t)((1 << width) - 1))
+#define DECLARE_CODEC32(t) \
+static inline uint32_t e32_##t(uint32_t lsoffset, uint32_t width, t val) \
+{ \
+ BUG_ON(width > (sizeof(t) * 8)); \
+ return ((uint32_t)val & MAKE_MASK32(width)) << lsoffset; \
+} \
+static inline t d32_##t(uint32_t lsoffset, uint32_t width, uint32_t val) \
+{ \
+ BUG_ON(width > (sizeof(t) * 8)); \
+ return (t)((val >> lsoffset) & MAKE_MASK32(width)); \
+} \
+static inline uint32_t i32_##t(uint32_t lsoffset, uint32_t width, \
+ uint32_t val) \
+{ \
+ BUG_ON(width > (sizeof(t) * 8)); \
+ return e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \
+} \
+static inline uint32_t r32_##t(uint32_t lsoffset, uint32_t width, \
+ uint32_t val) \
+{ \
+ BUG_ON(width > (sizeof(t) * 8)); \
+ return ~(MAKE_MASK32(width) << lsoffset) & val; \
+}
+DECLARE_CODEC32(uint32_t)
+DECLARE_CODEC32(uint16_t)
+DECLARE_CODEC32(uint8_t)
+DECLARE_CODEC32(int)
+
+ /*********************/
+ /* Debugging assists */
+ /*********************/
+
+static inline void __hexdump(unsigned long start, unsigned long end,
+ unsigned long p, size_t sz, const unsigned char *c)
+{
+ while (start < end) {
+ unsigned int pos = 0;
+ char buf[64];
+ int nl = 0;
+
+ pos += sprintf(buf + pos, "%08lx: ", start);
+ do {
+ if ((start < p) || (start >= (p + sz)))
+ pos += sprintf(buf + pos, "..");
+ else
+ pos += sprintf(buf + pos, "%02x", *(c++));
+ if (!(++start & 15)) {
+ buf[pos++] = '\n';
+ nl = 1;
+ } else {
+ nl = 0;
+ if (!(start & 1))
+ buf[pos++] = ' ';
+ if (!(start & 3))
+ buf[pos++] = ' ';
+ }
+ } while (start & 15);
+ if (!nl)
+ buf[pos++] = '\n';
+ buf[pos] = '\0';
+ debug("%s", buf);
+ }
+}
+static inline void hexdump(const void *ptr, size_t sz)
+{
+ unsigned long p = (unsigned long)ptr;
+ unsigned long start = p & ~(unsigned long)15;
+ unsigned long end = (p + sz + 15) & ~(unsigned long)15;
+ const unsigned char *c = ptr;
+
+ __hexdump(start, end, p, sz, c);
+}
+
+#if defined(__BIG_ENDIAN)
+#define DQRR_TOK_OFFSET 0
+#else
+#define DQRR_TOK_OFFSET 24
+#endif
+
+/* Similarly-named functions */
+#define upper32(a) upper_32_bits(a)
+#define lower32(a) lower_32_bits(a)
+
+ /****************/
+ /* arch assists */
+ /****************/
+
+static inline void dcbz(void *ptr)
+{
+ uint32_t *p = ptr;
+ BUG_ON((unsigned long)ptr & 63);
+ p[0] = 0;
+ p[1] = 0;
+ p[2] = 0;
+ p[3] = 0;
+ p[4] = 0;
+ p[5] = 0;
+ p[6] = 0;
+ p[7] = 0;
+ p[8] = 0;
+ p[9] = 0;
+ p[10] = 0;
+ p[11] = 0;
+ p[12] = 0;
+ p[13] = 0;
+ p[14] = 0;
+ p[15] = 0;
+}
+
+#define lwsync()
+
+#include "qbman_sys.h"
--- /dev/null
+/*
+ * Copyright (C) 2014 Freescale Semiconductor
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+/* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the
+ * driver. They are only included via qbman_private.h, which is itself a
+ * platform-independent file and is included by all the other driver source.
+ *
+ * qbman_sys_decl.h is included prior to all other declarations and logic, and
+ * it exists to provide compatibility with any linux interfaces our
+ * single-source driver code is dependent on (eg. kmalloc). Ie. this file
+ * provides linux compatibility.
+ *
+ * This qbman_sys.h header, on the other hand, is included *after* any common
+ * and platform-neutral declarations and logic in qbman_private.h, and exists to
+ * implement any platform-specific logic of the qbman driver itself. Ie. it is
+ * *not* to provide linux compatibility.
+ */
+
+/* Trace the 3 different classes of read/write access to QBMan. #undef as
+ * required. */
+#undef QBMAN_CCSR_TRACE
+#undef QBMAN_CINH_TRACE
+#undef QBMAN_CENA_TRACE
+
+/* Temporarily define this to get around the fact that cache enabled mapping is
+ * not working right now. Will remove this after uboot could map the cache
+ * enabled portal memory.
+ */
+#define QBMAN_CINH_ONLY
+
+static inline void word_copy(void *d, const void *s, unsigned int cnt)
+{
+ uint32_t *dd = d;
+ const uint32_t *ss = s;
+
+ while (cnt--)
+ *(dd++) = *(ss++);
+}
+
+/* Currently, the CENA support code expects each 32-bit word to be written in
+ * host order, and these are converted to hardware (little-endian) order on
+ * command submission. However, 64-bit quantities are must be written (and read)
+ * as two 32-bit words with the least-significant word first, irrespective of
+ * host endianness. */
+static inline void u64_to_le32_copy(void *d, const uint64_t *s,
+ unsigned int cnt)
+{
+ uint32_t *dd = d;
+ const uint32_t *ss = (const uint32_t *)s;
+
+ while (cnt--) {
+ /* TBD: the toolchain was choking on the use of 64-bit types up
+ * until recently so this works entirely with 32-bit variables.
+ * When 64-bit types become usable again, investigate better
+ * ways of doing this. */
+#if defined(__BIG_ENDIAN)
+ *(dd++) = ss[1];
+ *(dd++) = ss[0];
+ ss += 2;
+#else
+ *(dd++) = *(ss++);
+ *(dd++) = *(ss++);
+#endif
+ }
+}
+static inline void u64_from_le32_copy(uint64_t *d, const void *s,
+ unsigned int cnt)
+{
+ const uint32_t *ss = s;
+ uint32_t *dd = (uint32_t *)d;
+
+ while (cnt--) {
+#if defined(__BIG_ENDIAN)
+ dd[1] = *(ss++);
+ dd[0] = *(ss++);
+ dd += 2;
+#else
+ *(dd++) = *(ss++);
+ *(dd++) = *(ss++);
+#endif
+ }
+}
+
+/* Convert a host-native 32bit value into little endian */
+#if defined(__BIG_ENDIAN)
+static inline uint32_t make_le32(uint32_t val)
+{
+ return ((val & 0xff) << 24) | ((val & 0xff00) << 8) |
+ ((val & 0xff0000) >> 8) | ((val & 0xff000000) >> 24);
+}
+#else
+#define make_le32(val) (val)
+#endif
+static inline void make_le32_n(uint32_t *val, unsigned int num)
+{
+ while (num--) {
+ *val = make_le32(*val);
+ val++;
+ }
+}
+
+ /******************/
+ /* Portal access */
+ /******************/
+struct qbman_swp_sys {
+ /* On GPP, the sys support for qbman_swp is here. The CENA region isi
+ * not an mmap() of the real portal registers, but an allocated
+ * place-holder, because the actual writes/reads to/from the portal are
+ * marshalled from these allocated areas using QBMan's "MC access
+ * registers". CINH accesses are atomic so there's no need for a
+ * place-holder. */
+ void *cena;
+ void __iomem *addr_cena;
+ void __iomem *addr_cinh;
+};
+
+/* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal
+ * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH)
+ * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index
+ * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal)
+ * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE)
+ * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete)
+ */
+
+static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset,
+ uint32_t val)
+{
+ __raw_writel(val, s->addr_cinh + offset);
+#ifdef QBMAN_CINH_TRACE
+ pr_info("qbman_cinh_write(%p:0x%03x) 0x%08x\n",
+ s->addr_cinh, offset, val);
+#endif
+}
+
+static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset)
+{
+ uint32_t reg = __raw_readl(s->addr_cinh + offset);
+
+#ifdef QBMAN_CINH_TRACE
+ pr_info("qbman_cinh_read(%p:0x%03x) 0x%08x\n",
+ s->addr_cinh, offset, reg);
+#endif
+ return reg;
+}
+
+static inline void *qbman_cena_write_start(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+ void *shadow = s->cena + offset;
+
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_write_start(%p:0x%03x) %p\n",
+ s->addr_cena, offset, shadow);
+#endif
+ BUG_ON(offset & 63);
+ dcbz(shadow);
+ return shadow;
+}
+
+static inline void qbman_cena_write_complete(struct qbman_swp_sys *s,
+ uint32_t offset, void *cmd)
+{
+ const uint32_t *shadow = cmd;
+ int loop;
+
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_write_complete(%p:0x%03x) %p\n",
+ s->addr_cena, offset, shadow);
+ hexdump(cmd, 64);
+#endif
+ for (loop = 15; loop >= 0; loop--)
+#ifdef QBMAN_CINH_ONLY
+ __raw_writel(shadow[loop], s->addr_cinh +
+ offset + loop * 4);
+#else
+ __raw_writel(shadow[loop], s->addr_cena +
+ offset + loop * 4);
+#endif
+}
+
+static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset)
+{
+ uint32_t *shadow = s->cena + offset;
+ unsigned int loop;
+
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_read(%p:0x%03x) %p\n",
+ s->addr_cena, offset, shadow);
+#endif
+
+ for (loop = 0; loop < 16; loop++)
+#ifdef QBMAN_CINH_ONLY
+ shadow[loop] = __raw_readl(s->addr_cinh + offset
+ + loop * 4);
+#else
+ shadow[loop] = __raw_readl(s->addr_cena + offset
+ + loop * 4);
+#endif
+#ifdef QBMAN_CENA_TRACE
+ hexdump(shadow, 64);
+#endif
+ return shadow;
+}
+
+static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+}
+
+ /******************/
+ /* Portal support */
+ /******************/
+
+/* The SWP_CFG portal register is special, in that it is used by the
+ * platform-specific code rather than the platform-independent code in
+ * qbman_portal.c. So use of it is declared locally here. */
+#define QBMAN_CINH_SWP_CFG 0xd00
+
+/* For MC portal use, we always configure with
+ * DQRR_MF is (SWP_CFG,20,3) - DQRR max fill (<- 0x4)
+ * EST is (SWP_CFG,16,3) - EQCR_CI stashing threshold (<- 0x0)
+ * RPM is (SWP_CFG,12,2) - RCR production notification mode (<- 0x3)
+ * DCM is (SWP_CFG,10,2) - DQRR consumption notification mode (<- 0x2)
+ * EPM is (SWP_CFG,8,2) - EQCR production notification mode (<- 0x3)
+ * SD is (SWP_CFG,5,1) - memory stashing drop enable (<- FALSE)
+ * SP is (SWP_CFG,4,1) - memory stashing priority (<- TRUE)
+ * SE is (SWP_CFG,3,1) - memory stashing enable (<- 0x0)
+ * DP is (SWP_CFG,2,1) - dequeue stashing priority (<- TRUE)
+ * DE is (SWP_CFG,1,1) - dequeue stashing enable (<- 0x0)
+ * EP is (SWP_CFG,0,1) - EQCR_CI stashing priority (<- FALSE)
+ */
+static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn,
+ uint8_t est, uint8_t rpm, uint8_t dcm,
+ uint8_t epm, int sd, int sp, int se,
+ int dp, int de, int ep)
+{
+ uint32_t reg;
+
+ reg = e32_uint8_t(20, 3, max_fill) | e32_uint8_t(16, 3, est) |
+ e32_uint8_t(12, 2, rpm) | e32_uint8_t(10, 2, dcm) |
+ e32_uint8_t(8, 2, epm) | e32_int(5, 1, sd) |
+ e32_int(4, 1, sp) | e32_int(3, 1, se) | e32_int(2, 1, dp) |
+ e32_int(1, 1, de) | e32_int(0, 1, ep) | e32_uint8_t(14, 1, wn);
+ return reg;
+}
+
+static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
+ const struct qbman_swp_desc *d)
+{
+ uint32_t reg;
+
+ s->addr_cena = d->cena_bar;
+ s->addr_cinh = d->cinh_bar;
+ s->cena = (void *)valloc(CONFIG_SYS_PAGE_SIZE);
+ memset((void *)s->cena, 0x00, CONFIG_SYS_PAGE_SIZE);
+ if (!s->cena) {
+ printf("Could not allocate page for cena shadow\n");
+ return -1;
+ }
+
+#ifdef QBMAN_CHECKING
+ /* We should never be asked to initialise for a portal that isn't in
+ * the power-on state. (Ie. don't forget to reset portals when they are
+ * decommissioned!)
+ */
+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
+ BUG_ON(reg);
+#endif
+#ifdef QBMAN_CINH_ONLY
+ reg = qbman_set_swp_cfg(4, 1, 0, 3, 2, 3, 0, 1, 0, 1, 0, 0);
+#else
+ reg = qbman_set_swp_cfg(4, 0, 0, 3, 2, 3, 0, 1, 0, 1, 0, 0);
+#endif
+ qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
+ if (!reg) {
+ printf("The portal is not enabled!\n");
+ free(s->cena);
+ return -1;
+ }
+ return 0;
+}
+
+static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s)
+{
+ free((void *)s->cena);
+}
-/* Copyright 2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
return 0;
}
-
-int dpmng_reset_aiop(struct fsl_mc_io *mc_io, int container_id,
- int aiop_tile_id)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMNG_CMDID_RESET_AIOP,
- MC_CMD_PRI_LOW, 0);
- DPMNG_CMD_RESET_AIOP(cmd, container_id, aiop_tile_id);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-int dpmng_load_aiop(struct fsl_mc_io *mc_io,
- int container_id,
- int aiop_tile_id,
- uint64_t img_iova,
- uint32_t img_size)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMNG_CMDID_LOAD_AIOP,
- MC_CMD_PRI_LOW,
- 0);
- DPMNG_CMD_LOAD_AIOP(cmd, container_id, aiop_tile_id, img_size,
- img_iova);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-int dpmng_run_aiop(struct fsl_mc_io *mc_io,
- int container_id,
- int aiop_tile_id,
- const struct dpmng_aiop_run_cfg *cfg)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMNG_CMDID_RUN_AIOP,
- MC_CMD_PRI_LOW,
- 0);
- DPMNG_CMD_RUN_AIOP(cmd, container_id, aiop_tile_id, cfg);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-int dpmng_reset_mc_portal(struct fsl_mc_io *mc_io)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMNG_CMDID_RESET_MC_PORTAL,
- MC_CMD_PRI_LOW,
- 0);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
--- /dev/null
+/*
+ * Copyright (C) 2013-2015 Freescale Semiconductor
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <fsl-mc/fsl_mc_sys.h>
+#include <fsl-mc/fsl_mc_cmd.h>
+#include <fsl-mc/fsl_dpni.h>
+
+int dpni_open(struct fsl_mc_io *mc_io, int dpni_id, uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
+ MC_CMD_PRI_LOW, 0);
+ DPNI_CMD_OPEN(cmd, dpni_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+
+ return 0;
+}
+
+int dpni_close(struct fsl_mc_io *mc_io, uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
+ MC_CMD_PRI_HIGH, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_set_pools(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const struct dpni_pools_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
+ MC_CMD_PRI_LOW,
+ token);
+ DPNI_CMD_SET_POOLS(cmd, cfg);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_enable(struct fsl_mc_io *mc_io, uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
+ MC_CMD_PRI_LOW, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_disable(struct fsl_mc_io *mc_io, uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
+ MC_CMD_PRI_LOW,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_reset(struct fsl_mc_io *mc_io, uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
+ MC_CMD_PRI_LOW, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_attributes(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dpni_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
+ MC_CMD_PRI_LOW,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_ATTR(cmd, attr);
+
+ return 0;
+}
+
+int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dpni_buffer_layout *layout)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_BUFFER_LAYOUT,
+ MC_CMD_PRI_LOW, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout);
+
+ return 0;
+}
+
+int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const struct dpni_buffer_layout *layout)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_BUFFER_LAYOUT,
+ MC_CMD_PRI_LOW, token);
+ DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dpni_buffer_layout *layout)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_BUFFER_LAYOUT,
+ MC_CMD_PRI_LOW, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout);
+
+ return 0;
+}
+
+int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const struct dpni_buffer_layout *layout)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_BUFFER_LAYOUT,
+ MC_CMD_PRI_LOW, token);
+ DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dpni_buffer_layout *layout)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT,
+ MC_CMD_PRI_LOW, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout);
+
+ return 0;
+}
+
+int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const struct dpni_buffer_layout *layout)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT,
+ MC_CMD_PRI_LOW, token);
+ DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_qdid(struct fsl_mc_io *mc_io, uint16_t token, uint16_t *qdid)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
+ MC_CMD_PRI_LOW,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_QDID(cmd, *qdid);
+
+ return 0;
+}
+
+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ uint16_t *data_offset)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
+ MC_CMD_PRI_LOW, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_TX_DATA_OFFSET(cmd, *data_offset);
+
+ return 0;
+}
+
+int dpni_get_counter(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ enum dpni_counter counter,
+ uint64_t *value)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_COUNTER,
+ MC_CMD_PRI_LOW, token);
+ DPNI_CMD_GET_COUNTER(cmd, counter);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_COUNTER(cmd, *value);
+
+ return 0;
+}
+
+int dpni_set_counter(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ enum dpni_counter counter,
+ uint64_t value)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_COUNTER,
+ MC_CMD_PRI_LOW, token);
+ DPNI_CMD_SET_COUNTER(cmd, counter, value);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dpni_link_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
+ MC_CMD_PRI_LOW, token);
+ DPNI_CMD_SET_LINK_CFG(cmd, cfg);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_link_state(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dpni_link_state *state)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
+ MC_CMD_PRI_LOW, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_LINK_STATE(cmd, state);
+
+ return 0;
+}
+
+
+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
+ MC_CMD_PRI_LOW, token);
+ DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
+ MC_CMD_PRI_LOW, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr);
+
+ return 0;
+}
+
+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
+ MC_CMD_PRI_LOW, token);
+ DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
+ MC_CMD_PRI_LOW, token);
+ DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_set_tx_flow(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ uint16_t *flow_id,
+ const struct dpni_tx_flow_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_FLOW,
+ MC_CMD_PRI_LOW, token);
+ DPNI_CMD_SET_TX_FLOW(cmd, *flow_id, cfg);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_SET_TX_FLOW(cmd, *flow_id);
+
+ return 0;
+}
+
+int dpni_get_tx_flow(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ uint16_t flow_id,
+ struct dpni_tx_flow_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_FLOW,
+ MC_CMD_PRI_LOW, token);
+ DPNI_CMD_GET_TX_FLOW(cmd, flow_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_TX_FLOW(cmd, attr);
+
+ return 0;
+}
+
+int dpni_set_rx_flow(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ uint8_t tc_id,
+ uint16_t flow_id,
+ const struct dpni_queue_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FLOW,
+ MC_CMD_PRI_LOW, token);
+ DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_rx_flow(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ uint8_t tc_id,
+ uint16_t flow_id,
+ struct dpni_queue_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_FLOW,
+ MC_CMD_PRI_LOW, token);
+ DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_RX_FLOW(cmd, attr);
+
+ return 0;
+}
--- /dev/null
+/*
+ * Freescale Layerscape MC I/O wrapper
+ *
+ * Copyright (C) 2013-2015 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <fsl-mc/fsl_mc_sys.h>
+#include <fsl-mc/fsl_mc_cmd.h>
+#include <fsl-mc/fsl_dprc.h>
+
+int dprc_get_container_id(struct fsl_mc_io *mc_io, int *container_id)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
+ MC_CMD_PRI_LOW, 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPRC_RSP_GET_CONTAINER_ID(cmd, *container_id);
+
+ return 0;
+}
+
+int dprc_open(struct fsl_mc_io *mc_io, int container_id, uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, MC_CMD_PRI_LOW,
+ 0);
+ DPRC_CMD_OPEN(cmd, container_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+
+ return 0;
+}
+
+int dprc_close(struct fsl_mc_io *mc_io, uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, MC_CMD_PRI_HIGH,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dprc_reset_container(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ int child_container_id)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_RESET_CONT,
+ MC_CMD_PRI_LOW, token);
+ DPRC_CMD_RESET_CONTAINER(cmd, child_container_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dprc_get_attributes(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dprc_attributes *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR,
+ MC_CMD_PRI_LOW,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPRC_RSP_GET_ATTRIBUTES(cmd, attr);
+
+ return 0;
+}
+
+int dprc_get_obj_count(struct fsl_mc_io *mc_io, uint16_t token, int *obj_count)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT,
+ MC_CMD_PRI_LOW, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPRC_RSP_GET_OBJ_COUNT(cmd, *obj_count);
+
+ return 0;
+}
+
+int dprc_get_obj(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ int obj_index,
+ struct dprc_obj_desc *obj_desc)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ,
+ MC_CMD_PRI_LOW,
+ token);
+ DPRC_CMD_GET_OBJ(cmd, obj_index);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPRC_RSP_GET_OBJ(cmd, obj_desc);
+
+ return 0;
+}
+
+int dprc_get_res_count(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ char *type,
+ int *res_count)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ *res_count = 0;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_COUNT,
+ MC_CMD_PRI_LOW, token);
+ DPRC_CMD_GET_RES_COUNT(cmd, type);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPRC_RSP_GET_RES_COUNT(cmd, *res_count);
+
+ return 0;
+}
+
+int dprc_get_res_ids(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ char *type,
+ struct dprc_res_ids_range_desc *range_desc)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS,
+ MC_CMD_PRI_LOW, token);
+ DPRC_CMD_GET_RES_IDS(cmd, range_desc, type);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPRC_RSP_GET_RES_IDS(cmd, range_desc);
+
+ return 0;
+}
+
+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ char *obj_type,
+ int obj_id,
+ uint8_t region_index,
+ struct dprc_region_desc *region_desc)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
+ MC_CMD_PRI_LOW, token);
+ DPRC_CMD_GET_OBJ_REGION(cmd, obj_type, obj_id, region_index);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPRC_RSP_GET_OBJ_REGION(cmd, region_desc);
+
+ return 0;
+}
+
+int dprc_connect(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const struct dprc_endpoint *endpoint1,
+ const struct dprc_endpoint *endpoint2)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT,
+ MC_CMD_PRI_LOW,
+ token);
+ DPRC_CMD_CONNECT(cmd, endpoint1, endpoint2);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dprc_disconnect(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const struct dprc_endpoint *endpoint)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT,
+ MC_CMD_PRI_LOW,
+ token);
+ DPRC_CMD_DISCONNECT(cmd, endpoint);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
+ MC_CMD_PRI_LOW,
+ token);
+ DPRC_CMD_GET_CONNECTION(cmd, endpoint1);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPRC_RSP_GET_CONNECTION(cmd, endpoint2, *state);
+
+ return 0;
+}
-/* Copyright 2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
/* Command IDs */
#define DPMNG_CMDID_GET_VERSION 0x831
-#define DPMNG_CMDID_RESET_AIOP 0x832
-#define DPMNG_CMDID_LOAD_AIOP 0x833
-#define DPMNG_CMDID_RUN_AIOP 0x834
-#define DPMNG_CMDID_RESET_MC_PORTAL 0x835
/* cmd, param, offset, width, type, arg_name */
#define DPMNG_RSP_GET_VERSION(cmd, mc_ver_info) \
MC_RSP_OP(cmd, 1, 0, 32, uint32_t, mc_ver_info->minor); \
} while (0)
-/* cmd, param, offset, width, type, arg_name */
-#define DPMNG_CMD_RESET_AIOP(cmd, container_id, aiop_tile_id) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, int, aiop_tile_id); \
- MC_CMD_OP(cmd, 0, 32, 32, int, container_id); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMNG_CMD_LOAD_AIOP(cmd, container_id, aiop_tile_id, img_size, \
- img_iova) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, int, aiop_tile_id); \
- MC_CMD_OP(cmd, 0, 32, 32, int, container_id); \
- MC_CMD_OP(cmd, 1, 0, 32, uint32_t, img_size); \
- MC_CMD_OP(cmd, 2, 0, 64, uint64_t, img_iova); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMNG_CMD_RUN_AIOP(cmd, container_id, aiop_tile_id, cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, int, aiop_tile_id); \
- MC_CMD_OP(cmd, 0, 32, 32, int, container_id); \
- MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->cores_mask); \
- MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options); \
-} while (0)
-
#endif /* __FSL_DPMNG_CMD_H */
#include <asm/io.h>
#include <fsl-mc/fsl_mc.h>
#include <fsl-mc/fsl_mc_sys.h>
+#include <fsl-mc/fsl_mc_private.h>
#include <fsl-mc/fsl_dpmng.h>
#include <fsl_debug_server.h>
+#include <fsl-mc/fsl_dprc.h>
+#include <fsl-mc/fsl_dpio.h>
+#include <fsl-mc/fsl_qbman_portal.h>
DECLARE_GLOBAL_DATA_PTR;
static int mc_boot_status;
+struct fsl_mc_io *dflt_mc_io = NULL;
+uint16_t dflt_dprc_handle = 0;
+struct fsl_dpbp_obj *dflt_dpbp = NULL;
+struct fsl_dpio_obj *dflt_dpio = NULL;
+uint16_t dflt_dpio_handle = NULL;
/**
* Copying MC firmware or DPL image to DDR
return 0;
}
-int mc_init(bd_t *bis)
+int mc_init(void)
{
int error = 0;
int timeout = 200000;
+ int portal_id = 0;
struct mc_ccsr_registers __iomem *mc_ccsr_regs = MC_CCSR_BASE_ADDR;
u64 mc_ram_addr;
u64 mc_dpl_offset;
int dpl_size;
const void *raw_image_addr;
size_t raw_image_size = 0;
- struct fsl_mc_io mc_io;
- int portal_id;
struct mc_version mc_ver_info;
/*
portal_id = 0;
/*
- * Check that the MC firmware is responding portal commands:
+ * Initialize the global default MC portal
+ * And check that the MC firmware is responding portal commands:
*/
- mc_io.mmio_regs = SOC_MC_PORTAL_ADDR(portal_id);
+ dflt_mc_io = (struct fsl_mc_io *)malloc(sizeof(struct fsl_mc_io));
+ if (!dflt_mc_io) {
+ printf(" No memory: malloc() failed\n");
+ return -ENOMEM;
+ }
+
+ dflt_mc_io->mmio_regs = SOC_MC_PORTAL_ADDR(portal_id);
debug("Checking access to MC portal of root DPRC container (portal_id %d, portal physical addr %p)\n",
- portal_id, mc_io.mmio_regs);
+ portal_id, dflt_mc_io->mmio_regs);
- error = mc_get_version(&mc_io, &mc_ver_info);
+ error = mc_get_version(dflt_mc_io, &mc_ver_info);
if (error != 0) {
printf("fsl-mc: ERROR: Firmware version check failed (error: %d)\n",
error);
{
return CONFIG_SYS_LS_MC_DRAM_BLOCK_MIN_SIZE;
}
+
+int dpio_init(struct dprc_obj_desc obj_desc)
+{
+ struct qbman_swp_desc p_des;
+ struct dpio_attr attr;
+ int err = 0;
+
+ dflt_dpio = (struct fsl_dpio_obj *)malloc(sizeof(struct fsl_dpio_obj));
+ if (!dflt_dpio) {
+ printf(" No memory: malloc() failed\n");
+ return -ENOMEM;
+ }
+
+ dflt_dpio->dpio_id = obj_desc.id;
+
+ err = dpio_open(dflt_mc_io, obj_desc.id, &dflt_dpio_handle);
+ if (err) {
+ printf("dpio_open() failed\n");
+ goto err_open;
+ }
+
+ err = dpio_get_attributes(dflt_mc_io, dflt_dpio_handle, &attr);
+ if (err) {
+ printf("dpio_get_attributes() failed %d\n", err);
+ goto err_get_attr;
+ }
+
+ err = dpio_enable(dflt_mc_io, dflt_dpio_handle);
+ if (err) {
+ printf("dpio_enable() failed %d\n", err);
+ goto err_get_enable;
+ }
+ debug("ce_paddr=0x%llx, ci_paddr=0x%llx, portalid=%d, prios=%d\n",
+ attr.qbman_portal_ce_paddr,
+ attr.qbman_portal_ci_paddr,
+ attr.qbman_portal_id,
+ attr.num_priorities);
+
+ p_des.cena_bar = (void *)attr.qbman_portal_ce_paddr;
+ p_des.cinh_bar = (void *)attr.qbman_portal_ci_paddr;
+
+ dflt_dpio->sw_portal = qbman_swp_init(&p_des);
+ if (dflt_dpio->sw_portal == NULL) {
+ printf("qbman_swp_init() failed\n");
+ goto err_get_swp_init;
+ }
+ return 0;
+
+err_get_swp_init:
+err_get_enable:
+ dpio_disable(dflt_mc_io, dflt_dpio_handle);
+err_get_attr:
+ dpio_close(dflt_mc_io, dflt_dpio_handle);
+err_open:
+ free(dflt_dpio);
+ return err;
+}
+
+int dpbp_init(struct dprc_obj_desc obj_desc)
+{
+ dflt_dpbp = (struct fsl_dpbp_obj *)malloc(sizeof(struct fsl_dpbp_obj));
+ if (!dflt_dpbp) {
+ printf(" No memory: malloc() failed\n");
+ return -ENOMEM;
+ }
+ dflt_dpbp->dpbp_attr.id = obj_desc.id;
+
+ return 0;
+}
+
+int dprc_init_container_obj(struct dprc_obj_desc obj_desc)
+{
+ int error = 0;
+ if (!strcmp(obj_desc.type, "dpbp")) {
+ if (!dflt_dpbp) {
+ error = dpbp_init(obj_desc);
+ if (error < 0)
+ printf("dpbp_init failed\n");
+ }
+ } else if (!strcmp(obj_desc.type, "dpio")) {
+ if (!dflt_dpio) {
+ error = dpio_init(obj_desc);
+ if (error < 0)
+ printf("dpio_init failed\n");
+ }
+ }
+
+ return error;
+}
+
+int dprc_scan_container_obj(uint16_t dprc_handle, char *obj_type, int i)
+{
+ int error = 0;
+ struct dprc_obj_desc obj_desc;
+
+ memset((void *)&obj_desc, 0x00, sizeof(struct dprc_obj_desc));
+
+ error = dprc_get_obj(dflt_mc_io, dprc_handle,
+ i, &obj_desc);
+ if (error < 0) {
+ printf("dprc_get_obj(i=%d) failed: %d\n",
+ i, error);
+ return error;
+ }
+
+ if (!strcmp(obj_desc.type, obj_type)) {
+ debug("Discovered object: type %s, id %d, req %s\n",
+ obj_desc.type, obj_desc.id, obj_type);
+
+ error = dprc_init_container_obj(obj_desc);
+ if (error < 0) {
+ printf("dprc_init_container_obj(i=%d) failed: %d\n",
+ i, error);
+ return error;
+ }
+ }
+
+ return error;
+}
+
+int fsl_mc_ldpaa_init(bd_t *bis)
+{
+ int i, error = 0;
+ int dprc_opened = 0, container_id;
+ int num_child_objects = 0;
+
+ error = mc_init();
+
+ error = dprc_get_container_id(dflt_mc_io, &container_id);
+ if (error < 0) {
+ printf("dprc_get_container_id() failed: %d\n", error);
+ goto error;
+ }
+
+ debug("fsl-mc: Container id=0x%x\n", container_id);
+
+ error = dprc_open(dflt_mc_io, container_id, &dflt_dprc_handle);
+ if (error < 0) {
+ printf("dprc_open() failed: %d\n", error);
+ goto error;
+ }
+ dprc_opened = true;
+
+ error = dprc_get_obj_count(dflt_mc_io,
+ dflt_dprc_handle,
+ &num_child_objects);
+ if (error < 0) {
+ printf("dprc_get_obj_count() failed: %d\n", error);
+ goto error;
+ }
+ debug("Total child in container %d = %d\n", container_id,
+ num_child_objects);
+
+ if (num_child_objects != 0) {
+ /*
+ * Discover objects currently in the DPRC container in the MC:
+ */
+ for (i = 0; i < num_child_objects; i++)
+ error = dprc_scan_container_obj(dflt_dprc_handle,
+ "dpbp", i);
+
+ for (i = 0; i < num_child_objects; i++)
+ error = dprc_scan_container_obj(dflt_dprc_handle,
+ "dpio", i);
+
+ for (i = 0; i < num_child_objects; i++)
+ error = dprc_scan_container_obj(dflt_dprc_handle,
+ "dpni", i);
+ }
+error:
+ if (dprc_opened)
+ dprc_close(dflt_mc_io, dflt_dprc_handle);
+
+ return error;
+}
+
+void fsl_mc_ldpaa_exit(bd_t *bis)
+{
+ int err;
+
+
+ err = dpio_disable(dflt_mc_io, dflt_dpio_handle);
+ if (err < 0) {
+ printf("dpio_disable() failed: %d\n", err);
+ return;
+ }
+ err = dpio_reset(dflt_mc_io, dflt_dpio_handle);
+ if (err < 0) {
+ printf("dpio_reset() failed: %d\n", err);
+ return;
+ }
+ err = dpio_close(dflt_mc_io, dflt_dpio_handle);
+ if (err < 0) {
+ printf("dpio_close() failed: %d\n", err);
+ return;
+ }
+
+ free(dflt_dpio);
+ free(dflt_dpbp);
+ free(dflt_mc_io);
+}
/*
* Freescale Layerscape MC I/O wrapper
*
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ * Copyright (C) 2013-2015 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
* SPDX-License-Identifier: GPL-2.0+
struct mc_command *cmd)
{
enum mc_cmd_status status;
- int timeout = 2000;
+ int timeout = 6000;
mc_write_command(mc_io->mmio_regs, cmd);
if (status != MC_CMD_STATUS_OK) {
printf("Error: MC command failed (portal: %p, obj handle: %#x, command: %#x, status: %#x)\n",
mc_io->mmio_regs,
- (unsigned int)MC_CMD_HDR_READ_AUTHID(cmd->header),
+ (unsigned int)MC_CMD_HDR_READ_TOKEN(cmd->header),
(unsigned int)MC_CMD_HDR_READ_CMDID(cmd->header),
(unsigned int)status);
--- /dev/null
+/*
+ * Copyright (C) 2014 Freescale Semiconductor
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+#ifndef __FSL_DPAA_FD_H
+#define __FSL_DPAA_FD_H
+
+/* Place-holder for FDs, we represent it via the simplest form that we need for
+ * now. Different overlays may be needed to support different options, etc. (It
+ * is impractical to define One True Struct, because the resulting encoding
+ * routines (lots of read-modify-writes) would be worst-case performance whether
+ * or not circumstances required them.) */
+struct dpaa_fd {
+ union {
+ u32 words[8];
+ struct dpaa_fd_simple {
+ u32 addr_lo;
+ u32 addr_hi;
+ u32 len;
+ /* offset in the MS 16 bits, BPID in the LS 16 bits */
+ u32 bpid_offset;
+ u32 frc; /* frame context */
+ /* "err", "va", "cbmt", "asal", [...] */
+ u32 ctrl;
+ /* flow context */
+ u32 flc_lo;
+ u32 flc_hi;
+ } simple;
+ };
+};
+
+enum dpaa_fd_format {
+ dpaa_fd_single = 0,
+ dpaa_fd_list,
+ dpaa_fd_sg
+};
+
+static inline u64 ldpaa_fd_get_addr(const struct dpaa_fd *fd)
+{
+ return (u64)((((uint64_t)fd->simple.addr_hi) << 32)
+ + fd->simple.addr_lo);
+}
+
+static inline void ldpaa_fd_set_addr(struct dpaa_fd *fd, u64 addr)
+{
+ fd->simple.addr_hi = upper_32_bits(addr);
+ fd->simple.addr_lo = lower_32_bits(addr);
+}
+
+static inline u32 ldpaa_fd_get_len(const struct dpaa_fd *fd)
+{
+ return fd->simple.len;
+}
+
+static inline void ldpaa_fd_set_len(struct dpaa_fd *fd, u32 len)
+{
+ fd->simple.len = len;
+}
+
+static inline uint16_t ldpaa_fd_get_offset(const struct dpaa_fd *fd)
+{
+ return (uint16_t)(fd->simple.bpid_offset >> 16) & 0x0FFF;
+}
+
+static inline void ldpaa_fd_set_offset(struct dpaa_fd *fd, uint16_t offset)
+{
+ fd->simple.bpid_offset &= 0xF000FFFF;
+ fd->simple.bpid_offset |= (u32)offset << 16;
+}
+
+static inline uint16_t ldpaa_fd_get_bpid(const struct dpaa_fd *fd)
+{
+ return (uint16_t)(fd->simple.bpid_offset & 0xFFFF);
+}
+
+static inline void ldpaa_fd_set_bpid(struct dpaa_fd *fd, uint16_t bpid)
+{
+ fd->simple.bpid_offset &= 0xFFFF0000;
+ fd->simple.bpid_offset |= (u32)bpid;
+}
+
+/* When frames are dequeued, the FDs show up inside "dequeue" result structures
+ * (if at all, not all dequeue results contain valid FDs). This structure type
+ * is intentionally defined without internal detail, and the only reason it
+ * isn't declared opaquely (without size) is to allow the user to provide
+ * suitably-sized (and aligned) memory for these entries. */
+struct ldpaa_dq {
+ uint32_t dont_manipulate_directly[16];
+};
+
+/* Parsing frame dequeue results */
+#define LDPAA_DQ_STAT_FQEMPTY 0x80
+#define LDPAA_DQ_STAT_HELDACTIVE 0x40
+#define LDPAA_DQ_STAT_FORCEELIGIBLE 0x20
+#define LDPAA_DQ_STAT_VALIDFRAME 0x10
+#define LDPAA_DQ_STAT_ODPVALID 0x04
+#define LDPAA_DQ_STAT_VOLATILE 0x02
+#define LDPAA_DQ_STAT_EXPIRED 0x01
+uint32_t ldpaa_dq_flags(const struct ldpaa_dq *);
+static inline int ldpaa_dq_is_pull(const struct ldpaa_dq *dq)
+{
+ return (int)(ldpaa_dq_flags(dq) & LDPAA_DQ_STAT_VOLATILE);
+}
+static inline int ldpaa_dq_is_pull_complete(
+ const struct ldpaa_dq *dq)
+{
+ return (int)(ldpaa_dq_flags(dq) & LDPAA_DQ_STAT_EXPIRED);
+}
+/* seqnum/odpid are valid only if VALIDFRAME and ODPVALID flags are TRUE */
+uint16_t ldpaa_dq_seqnum(const struct ldpaa_dq *);
+uint16_t ldpaa_dq_odpid(const struct ldpaa_dq *);
+uint32_t ldpaa_dq_fqid(const struct ldpaa_dq *);
+uint32_t ldpaa_dq_byte_count(const struct ldpaa_dq *);
+uint32_t ldpaa_dq_frame_count(const struct ldpaa_dq *);
+uint32_t ldpaa_dq_fqd_ctx_hi(const struct ldpaa_dq *);
+uint32_t ldpaa_dq_fqd_ctx_lo(const struct ldpaa_dq *);
+/* get the Frame Descriptor */
+const struct dpaa_fd *ldpaa_dq_fd(const struct ldpaa_dq *);
+
+#endif /* __FSL_DPAA_FD_H */
--- /dev/null
+/*
+ * Freescale Layerscape MC I/O wrapper
+ *
+ * Copyright (C) 2013-2015 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+/*!
+ * @file fsl_dpbp.h
+ * @brief Data Path Buffer Pool API
+ */
+#ifndef __FSL_DPBP_H
+#define __FSL_DPBP_H
+
+/* DPBP Version */
+#define DPBP_VER_MAJOR 2
+#define DPBP_VER_MINOR 0
+
+/* Command IDs */
+#define DPBP_CMDID_CLOSE 0x800
+#define DPBP_CMDID_OPEN 0x804
+
+#define DPBP_CMDID_ENABLE 0x002
+#define DPBP_CMDID_DISABLE 0x003
+#define DPBP_CMDID_GET_ATTR 0x004
+#define DPBP_CMDID_RESET 0x005
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPBP_CMD_OPEN(cmd, dpbp_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, int, dpbp_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPBP_RSP_GET_ATTRIBUTES(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, attr->bpid); \
+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\
+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
+} while (0)
+
+/* Data Path Buffer Pool API
+ * Contains initialization APIs and runtime control APIs for DPBP
+ */
+
+struct fsl_mc_io;
+
+/**
+ * dpbp_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dpbp_id: DPBP unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpbp_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_open(struct fsl_mc_io *mc_io, int dpbp_id, uint16_t *token);
+
+/**
+ * dpbp_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPBP object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_close(struct fsl_mc_io *mc_io, uint16_t token);
+
+/**
+ * dpbp_enable() - Enable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+
+int dpbp_enable(struct fsl_mc_io *mc_io, uint16_t token);
+
+/**
+ * dpbp_disable() - Disable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_disable(struct fsl_mc_io *mc_io, uint16_t token);
+
+/**
+ * dpbp_reset() - Reset the DPBP, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_reset(struct fsl_mc_io *mc_io, uint16_t token);
+
+/**
+ * struct dpbp_attr - Structure representing DPBP attributes
+ * @id: DPBP object ID
+ * @version: DPBP version
+ * @bpid: Hardware buffer pool ID; should be used as an argument in
+ * acquire/release operations on buffers
+ */
+struct dpbp_attr {
+ int id;
+ /**
+ * struct version - Structure representing DPBP version
+ * @major: DPBP major version
+ * @minor: DPBP minor version
+ */
+ struct {
+ uint16_t major;
+ uint16_t minor;
+ } version;
+ uint16_t bpid;
+};
+
+
+/**
+ * dpbp_get_attributes - Retrieve DPBP attributes.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPBP object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dpbp_attr *attr);
+
+/** @} */
+
+#endif /* __FSL_DPBP_H */
--- /dev/null
+/*
+ * Copyright (C) 2013-2015 Freescale Semiconductor
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef _FSL_DPIO_H
+#define _FSL_DPIO_H
+
+/* DPIO Version */
+#define DPIO_VER_MAJOR 2
+#define DPIO_VER_MINOR 1
+
+/* Command IDs */
+#define DPIO_CMDID_CLOSE 0x800
+#define DPIO_CMDID_OPEN 0x803
+
+#define DPIO_CMDID_ENABLE 0x002
+#define DPIO_CMDID_DISABLE 0x003
+#define DPIO_CMDID_GET_ATTR 0x004
+#define DPIO_CMDID_RESET 0x005
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPIO_CMD_OPEN(cmd, dpio_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPIO_RSP_GET_ATTR(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_portal_id);\
+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\
+ MC_RSP_OP(cmd, 0, 56, 4, enum dpio_channel_mode, attr->channel_mode);\
+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->qbman_portal_ce_paddr);\
+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, attr->qbman_portal_ci_paddr);\
+ MC_RSP_OP(cmd, 3, 0, 16, uint16_t, attr->version.major);\
+ MC_RSP_OP(cmd, 3, 16, 16, uint16_t, attr->version.minor);\
+} while (0)
+
+/* Data Path I/O Portal API
+ * Contains initialization APIs and runtime control APIs for DPIO
+ */
+
+struct fsl_mc_io;
+/**
+ * dpio_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dpio_id: DPIO unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpio_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_open(struct fsl_mc_io *mc_io, int dpio_id, uint16_t *token);
+
+/**
+ * dpio_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dpio_id: DPIO unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpio_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_close(struct fsl_mc_io *mc_io, uint16_t token);
+
+/**
+ * enum dpio_channel_mode - DPIO notification channel mode
+ * @DPIO_NO_CHANNEL: No support for notification channel
+ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
+ * dedicated channel in the DPIO; user should point the queue's
+ * destination in the relevant interface to this DPIO
+ */
+enum dpio_channel_mode {
+ DPIO_NO_CHANNEL = 0,
+ DPIO_LOCAL_CHANNEL = 1,
+};
+
+/**
+ * dpio_enable() - Enable the DPIO, allow I/O portal operations.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_enable(struct fsl_mc_io *mc_io, uint16_t token);
+
+/**
+ * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_disable(struct fsl_mc_io *mc_io, uint16_t token);
+
+/**
+ * dpio_reset() - Reset the DPIO, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_reset(struct fsl_mc_io *mc_io, uint16_t token);
+
+/**
+ * struct dpio_attr - Structure representing DPIO attributes
+ * @id: DPIO object ID
+ * @version: DPIO version
+ * @qbman_portal_ce_paddr: Physical address of the software portal
+ * cache-enabled area
+ * @qbman_portal_ci_paddr: Physical address of the software portal
+ * cache-inhibited area
+ * @qbman_portal_id: Software portal ID
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification channel (1-8);
+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
+ */
+struct dpio_attr {
+ int id;
+ /**
+ * struct version - DPIO version
+ * @major: DPIO major version
+ * @minor: DPIO minor version
+ */
+ struct {
+ uint16_t major;
+ uint16_t minor;
+ } version;
+ uint64_t qbman_portal_ce_paddr;
+ uint64_t qbman_portal_ci_paddr;
+ uint16_t qbman_portal_id;
+ enum dpio_channel_mode channel_mode;
+ uint8_t num_priorities;
+};
+
+/**
+ * dpio_get_attributes() - Retrieve DPIO attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPIO object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dpio_attr *attr);
+
+#endif /* _FSL_DPIO_H */
-/* Copyright 2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
-/*!
- * @file fsl_dpmng.h
- * @brief Management Complex General API
- */
-
#ifndef __FSL_DPMNG_H
#define __FSL_DPMNG_H
-/*!
- * @Group grp_dpmng Management Complex General API
- *
- * @brief Contains general API for the Management Complex firmware
- * @{
+/* Management Complex General API
+ * Contains general API for the Management Complex firmware
*/
struct fsl_mc_io;
/**
- * @brief Management Complex firmware version information
+ * Management Complex firmware version information
*/
-#define MC_VER_MAJOR 4
+#define MC_VER_MAJOR 6
#define MC_VER_MINOR 0
+/**
+ * struct mc_versoin
+ * @major: Major version number: incremented on API compatibility changes
+ * @minor: Minor version number: incremented on API additions (that are
+ * backward compatible); reset when major version is incremented
+ * @revision: Internal revision number: incremented on implementation changes
+ * and/or bug fixes that have no impact on API
+ */
struct mc_version {
uint32_t major;
- /*!< Major version number: incremented on API compatibility changes */
uint32_t minor;
- /*!< Minor version number: incremented on API additions (that are
- * backward compatible); reset when major version is incremented
- */
uint32_t revision;
- /*!< Internal revision number: incremented on implementation changes
- * and/or bug fixes that have no impact on API
- */
};
/**
- * @brief Retrieves the Management Complex firmware version information
- *
- * @param[in] mc_io Pointer to opaque I/O object
- * @param[out] mc_ver_info Pointer to version information structure
+ * mc_get_version() - Retrieves the Management Complex firmware
+ * version information
+ * @mc_io: Pointer to opaque I/O object
+ * @mc_ver_info: Returned version information structure
*
- * @returns '0' on Success; Error code otherwise.
+ * Return: '0' on Success; Error code otherwise.
*/
int mc_get_version(struct fsl_mc_io *mc_io, struct mc_version *mc_ver_info);
-/**
- * @brief Resets an AIOP tile
- *
- * @param[in] mc_io Pointer to opaque I/O object
- * @param[in] container_id AIOP container ID
- * @param[in] aiop_tile_id AIOP tile ID to reset
- *
- * @returns '0' on Success; Error code otherwise.
- */
-int dpmng_reset_aiop(struct fsl_mc_io *mc_io,
- int container_id,
- int aiop_tile_id);
-
-/**
- * @brief Loads an image to AIOP tile
- *
- * @param[in] mc_io Pointer to opaque I/O object
- * @param[in] container_id AIOP container ID
- * @param[in] aiop_tile_id AIOP tile ID to reset
- * @param[in] img_iova I/O virtual address of AIOP ELF image
- * @param[in] img_size Size of AIOP ELF image in memory (in bytes)
- *
- * @returns '0' on Success; Error code otherwise.
- */
-int dpmng_load_aiop(struct fsl_mc_io *mc_io,
- int container_id,
- int aiop_tile_id,
- uint64_t img_iova,
- uint32_t img_size);
-
-/**
- * @brief AIOP run configuration
- */
-struct dpmng_aiop_run_cfg {
- uint32_t cores_mask;
- /*!< Mask of AIOP cores to run (core 0 in most significant bit) */
- uint64_t options;
- /*!< Execution options (currently none defined) */
-};
-
-/**
- * @brief Starts AIOP tile execution
- *
- * @param[in] mc_io Pointer to MC portal's I/O object
- * @param[in] container_id AIOP container ID
- * @param[in] aiop_tile_id AIOP tile ID to reset
- * @param[in] cfg AIOP run configuration
- *
- * @returns '0' on Success; Error code otherwise.
- */
-int dpmng_run_aiop(struct fsl_mc_io *mc_io,
- int container_id,
- int aiop_tile_id,
- const struct dpmng_aiop_run_cfg *cfg);
-
-/**
- * @brief Resets MC portal
- *
- * This function closes all object handles (tokens) that are currently
- * open in the MC portal on which the command is submitted. This allows
- * cleanup of stale handles that belong to non-functional user processes.
- *
- * @param[in] mc_io Pointer to MC portal's I/O object
- *
- * @returns '0' on Success; Error code otherwise.
- */
-int dpmng_reset_mc_portal(struct fsl_mc_io *mc_io);
-
-/** @} */
-
#endif /* __FSL_DPMNG_H */
--- /dev/null
+/*
+ * Copyright (C) 2013-2015 Freescale Semiconductor
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+#ifndef _FSL_DPNI_H
+#define _FSL_DPNI_H
+
+/* DPNI Version */
+#define DPNI_VER_MAJOR 4
+#define DPNI_VER_MINOR 0
+
+/* Command IDs */
+#define DPNI_CMDID_OPEN 0x801
+#define DPNI_CMDID_CLOSE 0x800
+
+#define DPNI_CMDID_ENABLE 0x002
+#define DPNI_CMDID_DISABLE 0x003
+#define DPNI_CMDID_GET_ATTR 0x004
+#define DPNI_CMDID_RESET 0x005
+
+#define DPNI_CMDID_SET_POOLS 0x200
+#define DPNI_CMDID_GET_RX_BUFFER_LAYOUT 0x201
+#define DPNI_CMDID_SET_RX_BUFFER_LAYOUT 0x202
+#define DPNI_CMDID_GET_TX_BUFFER_LAYOUT 0x203
+#define DPNI_CMDID_SET_TX_BUFFER_LAYOUT 0x204
+#define DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT 0x205
+#define DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT 0x206
+
+#define DPNI_CMDID_GET_QDID 0x210
+#define DPNI_CMDID_GET_TX_DATA_OFFSET 0x212
+#define DPNI_CMDID_GET_COUNTER 0x213
+#define DPNI_CMDID_SET_COUNTER 0x214
+#define DPNI_CMDID_GET_LINK_STATE 0x215
+#define DPNI_CMDID_SET_LINK_CFG 0x21A
+
+#define DPNI_CMDID_SET_PRIM_MAC 0x224
+#define DPNI_CMDID_GET_PRIM_MAC 0x225
+#define DPNI_CMDID_ADD_MAC_ADDR 0x226
+#define DPNI_CMDID_REMOVE_MAC_ADDR 0x227
+
+#define DPNI_CMDID_SET_TX_FLOW 0x236
+#define DPNI_CMDID_GET_TX_FLOW 0x237
+#define DPNI_CMDID_SET_RX_FLOW 0x238
+#define DPNI_CMDID_GET_RX_FLOW 0x239
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_OPEN(cmd, dpni_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id)
+
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_POOLS(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \
+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \
+ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\
+ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \
+ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\
+ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \
+ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\
+ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \
+ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\
+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \
+ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\
+ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \
+ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\
+ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \
+ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\
+ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \
+ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_ATTR(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->max_tcs); \
+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, attr->max_senders); \
+ MC_RSP_OP(cmd, 0, 48, 8, enum net_prot, attr->start_hdr); \
+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->options); \
+ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->max_unicast_filters); \
+ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->max_multicast_filters);\
+ MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->max_vlan_filters); \
+ MC_RSP_OP(cmd, 2, 24, 8, uint8_t, attr->max_qos_entries); \
+ MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->max_qos_key_size); \
+ MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->max_dist_key_size); \
+ MC_RSP_OP(cmd, 3, 0, 8, uint8_t, attr->max_dist_per_tc[0]); \
+ MC_RSP_OP(cmd, 3, 8, 8, uint8_t, attr->max_dist_per_tc[1]); \
+ MC_RSP_OP(cmd, 3, 16, 8, uint8_t, attr->max_dist_per_tc[2]); \
+ MC_RSP_OP(cmd, 3, 24, 8, uint8_t, attr->max_dist_per_tc[3]); \
+ MC_RSP_OP(cmd, 3, 32, 8, uint8_t, attr->max_dist_per_tc[4]); \
+ MC_RSP_OP(cmd, 3, 40, 8, uint8_t, attr->max_dist_per_tc[5]); \
+ MC_RSP_OP(cmd, 3, 48, 8, uint8_t, attr->max_dist_per_tc[6]); \
+ MC_RSP_OP(cmd, 3, 56, 8, uint8_t, attr->max_dist_per_tc[7]); \
+ MC_RSP_OP(cmd, 4, 0, 16, uint16_t, \
+ attr->ipr_cfg.max_reass_frm_size); \
+ MC_RSP_OP(cmd, 4, 16, 16, uint16_t, \
+ attr->ipr_cfg.min_frag_size_ipv4); \
+ MC_RSP_OP(cmd, 4, 32, 16, uint16_t, \
+ attr->ipr_cfg.min_frag_size_ipv6); \
+ MC_RSP_OP(cmd, 5, 0, 16, uint16_t, \
+ attr->ipr_cfg.max_open_frames_ipv4); \
+ MC_RSP_OP(cmd, 5, 16, 16, uint16_t, \
+ attr->ipr_cfg.max_open_frames_ipv6); \
+ MC_RSP_OP(cmd, 5, 32, 16, uint16_t, attr->version.major);\
+ MC_RSP_OP(cmd, 5, 48, 16, uint16_t, attr->version.minor);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \
+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \
+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \
+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_QDID(cmd, qdid) \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, qdid)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_TX_DATA_OFFSET(cmd, data_offset) \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, data_offset)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_GET_COUNTER(cmd, counter) \
+ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_COUNTER(cmd, value) \
+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, value)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_COUNTER(cmd, counter, value) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter); \
+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, value); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_LINK_CFG(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->rate);\
+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_LINK_STATE(cmd, state) \
+do { \
+ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\
+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, state->rate);\
+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\
+} while (0)
+
+
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr) \
+do { \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr) \
+do { \
+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
+ MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr) \
+do { \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr) \
+do { \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_TX_FLOW(cmd, flow_id, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, int, \
+ cfg->conf_err_cfg.queue_cfg.dest_cfg.dest_id);\
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, \
+ cfg->conf_err_cfg.queue_cfg.dest_cfg.priority);\
+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, \
+ cfg->conf_err_cfg.queue_cfg.dest_cfg.dest_type);\
+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->conf_err_cfg.errors_only);\
+ MC_CMD_OP(cmd, 0, 43, 1, int, cfg->l3_chksum_gen);\
+ MC_CMD_OP(cmd, 0, 44, 1, int, cfg->l4_chksum_gen);\
+ MC_CMD_OP(cmd, 0, 45, 1, int, \
+ cfg->conf_err_cfg.use_default_queue);\
+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id);\
+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, \
+ cfg->conf_err_cfg.queue_cfg.user_ctx);\
+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, \
+ cfg->conf_err_cfg.queue_cfg.options);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_SET_TX_FLOW(cmd, flow_id) \
+ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, flow_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_GET_TX_FLOW(cmd, flow_id) \
+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_TX_FLOW(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, \
+ attr->conf_err_attr.queue_attr.dest_cfg.dest_id);\
+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, \
+ attr->conf_err_attr.queue_attr.dest_cfg.priority);\
+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, \
+ attr->conf_err_attr.queue_attr.dest_cfg.dest_type);\
+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->conf_err_attr.errors_only);\
+ MC_RSP_OP(cmd, 0, 43, 1, int, attr->l3_chksum_gen);\
+ MC_RSP_OP(cmd, 0, 44, 1, int, attr->l4_chksum_gen);\
+ MC_RSP_OP(cmd, 0, 45, 1, int, \
+ attr->conf_err_attr.use_default_queue);\
+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, \
+ attr->conf_err_attr.queue_attr.user_ctx);\
+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, \
+ attr->conf_err_attr.queue_attr.fqid);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\
+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\
+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \
+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
+ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, tc_id); \
+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->options); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id) \
+do { \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_RX_FLOW(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \
+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type); \
+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \
+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \
+} while (0)
+
+enum net_prot {
+ NET_PROT_NONE = 0,
+ NET_PROT_PAYLOAD,
+ NET_PROT_ETH,
+ NET_PROT_VLAN,
+ NET_PROT_IPV4,
+ NET_PROT_IPV6,
+ NET_PROT_IP,
+ NET_PROT_TCP,
+ NET_PROT_UDP,
+ NET_PROT_UDP_LITE,
+ NET_PROT_IPHC,
+ NET_PROT_SCTP,
+ NET_PROT_SCTP_CHUNK_DATA,
+ NET_PROT_PPPOE,
+ NET_PROT_PPP,
+ NET_PROT_PPPMUX,
+ NET_PROT_PPPMUX_SUBFRM,
+ NET_PROT_L2TPV2,
+ NET_PROT_L2TPV3_CTRL,
+ NET_PROT_L2TPV3_SESS,
+ NET_PROT_LLC,
+ NET_PROT_LLC_SNAP,
+ NET_PROT_NLPID,
+ NET_PROT_SNAP,
+ NET_PROT_MPLS,
+ NET_PROT_IPSEC_AH,
+ NET_PROT_IPSEC_ESP,
+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
+ NET_PROT_MACSEC,
+ NET_PROT_GRE,
+ NET_PROT_MINENCAP,
+ NET_PROT_DCCP,
+ NET_PROT_ICMP,
+ NET_PROT_IGMP,
+ NET_PROT_ARP,
+ NET_PROT_CAPWAP_DATA,
+ NET_PROT_CAPWAP_CTRL,
+ NET_PROT_RFC2684,
+ NET_PROT_ICMPV6,
+ NET_PROT_FCOE,
+ NET_PROT_FIP,
+ NET_PROT_ISCSI,
+ NET_PROT_GTP,
+ NET_PROT_USER_DEFINED_L2,
+ NET_PROT_USER_DEFINED_L3,
+ NET_PROT_USER_DEFINED_L4,
+ NET_PROT_USER_DEFINED_L5,
+ NET_PROT_USER_DEFINED_SHIM1,
+ NET_PROT_USER_DEFINED_SHIM2,
+
+ NET_PROT_DUMMY_LAST
+};
+
+/* Data Path Network Interface API
+ * Contains initialization APIs and runtime control APIs for DPNI
+ */
+
+struct fsl_mc_io;
+
+/* General DPNI macros */
+
+/* Maximum number of traffic classes */
+#define DPNI_MAX_TC 8
+/* Maximum number of buffer pools per DPNI */
+#define DPNI_MAX_DPBP 8
+
+/* All traffic classes considered; see dpni_set_rx_flow() */
+#define DPNI_ALL_TCS (uint8_t)(-1)
+/* All flows within traffic class considered; see dpni_set_rx_flow() */
+#define DPNI_ALL_TC_FLOWS (uint16_t)(-1)
+/* Generate new flow ID; see dpni_set_tx_flow() */
+#define DPNI_NEW_FLOW_ID (uint16_t)(-1)
+
+/**
+ * dpni_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dpni_id: DPNI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpni_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_open(struct fsl_mc_io *mc_io, int dpni_id, uint16_t *token);
+
+/**
+ * dpni_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_close(struct fsl_mc_io *mc_io, uint16_t token);
+
+/**
+ * struct dpni_ipr_cfg - Structure representing IP reassembly configuration
+ * @max_reass_frm_size: Maximum size of the reassembled frame
+ * @min_frag_size_ipv4: Minimum fragment size of IPv4 fragments
+ * @min_frag_size_ipv6: Minimum fragment size of IPv6 fragments
+ * @max_open_frames_ipv4: Maximum concurrent IPv4 packets in reassembly process
+ * @max_open_frames_ipv6: Maximum concurrent IPv6 packets in reassembly process
+ */
+struct dpni_ipr_cfg {
+ uint16_t max_reass_frm_size;
+ uint16_t min_frag_size_ipv4;
+ uint16_t min_frag_size_ipv6;
+ uint16_t max_open_frames_ipv4;
+ uint16_t max_open_frames_ipv6;
+};
+
+/**
+ * struct dpni_pools_cfg - Structure representing buffer pools configuration
+ * @num_dpbp: Number of DPBPs
+ * @pools: Array of buffer pools parameters; The number of valid entries
+ * must match 'num_dpbp' value
+ */
+struct dpni_pools_cfg {
+ uint8_t num_dpbp;
+ /**
+ * struct pools - Buffer pools parameters
+ * @dpbp_id: DPBP object ID
+ * @buffer_size: Buffer size
+ */
+ struct {
+ int dpbp_id;
+ uint16_t buffer_size;
+ } pools[DPNI_MAX_DPBP];
+};
+
+/**
+ * dpni_set_pools() - Set buffer pools configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @cfg: Buffer pools configuration
+ *
+ * mandatory for DPNI operation
+ * warning:Allowed only when DPNI is disabled
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_pools(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const struct dpni_pools_cfg *cfg);
+
+/**
+ * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_enable(struct fsl_mc_io *mc_io, uint16_t token);
+
+/**
+ * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_disable(struct fsl_mc_io *mc_io, uint16_t token);
+
+
+/**
+ * @dpni_reset() - Reset the DPNI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_reset(struct fsl_mc_io *mc_io, uint16_t token);
+
+/**
+ * struct dpni_attr - Structure representing DPNI attributes
+ * @id: DPNI object ID
+ * @version: DPNI version
+ * @start_hdr: Indicates the packet starting header for parsing
+ * @options: Mask of available options; reflects the value as was given in
+ * object's creation
+ * @max_senders: Maximum number of different senders; used as the number
+ * of dedicated Tx flows;
+ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx)
+ * @max_dist_per_tc: Maximum distribution size per Rx traffic class;
+ * Set to the required value minus 1
+ * @max_unicast_filters: Maximum number of unicast filters
+ * @max_multicast_filters: Maximum number of multicast filters
+ * @max_vlan_filters: Maximum number of VLAN filters
+ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in QoS table
+ * @max_qos_key_size: Maximum key size for the QoS look-up
+ * @max_dist_key_size: Maximum key size for the distribution look-up
+ * @ipr_cfg: IP reassembly configuration
+ */
+struct dpni_attr {
+ int id;
+ /**
+ * struct version - DPNI version
+ * @major: DPNI major version
+ * @minor: DPNI minor version
+ */
+ struct {
+ uint16_t major;
+ uint16_t minor;
+ } version;
+ enum net_prot start_hdr;
+ uint64_t options;
+ uint8_t max_senders;
+ uint8_t max_tcs;
+ uint8_t max_dist_per_tc[DPNI_MAX_TC];
+ uint8_t max_unicast_filters;
+ uint8_t max_multicast_filters;
+ uint8_t max_vlan_filters;
+ uint8_t max_qos_entries;
+ uint8_t max_qos_key_size;
+ uint8_t max_dist_key_size;
+ struct dpni_ipr_cfg ipr_cfg;
+};
+/**
+ * dpni_get_attributes() - Retrieve DPNI attributes.
+ * @mc_io: Pointer to MC portal's I/O objec
+ * @token: Token of DPNI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_attributes(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dpni_attr *attr);
+
+/* DPNI buffer layout modification options */
+
+/* Select to modify the time-stamp setting */
+#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
+/* Select to modify the parser-result setting; not applicable for Tx */
+#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
+/* Select to modify the frame-status setting */
+#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
+/* Select to modify the private-data-size setting */
+#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
+/* Select to modify the data-alignment setting */
+#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
+/* Select to modify the data-head-room setting */
+#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
+/*!< Select to modify the data-tail-room setting */
+#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
+
+/**
+ * struct dpni_buffer_layout - Structure representing DPNI buffer layout
+ * @options: Flags representing the suggested modifications to the buffer
+ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
+ * @pass_timestamp: Pass timestamp value
+ * @pass_parser_result: Pass parser results
+ * @pass_frame_status: Pass frame status
+ * @private_data_size: Size kept for private data (in bytes)
+ * @data_align: Data alignment
+ * @data_head_room: Data head room
+ * @data_tail_room: Data tail room
+ */
+struct dpni_buffer_layout {
+ uint32_t options;
+ int pass_timestamp;
+ int pass_parser_result;
+ int pass_frame_status;
+ uint16_t private_data_size;
+ uint16_t data_align;
+ uint16_t data_head_room;
+ uint16_t data_tail_room;
+};
+
+/**
+ * dpni_get_rx_buffer_layout() - Retrieve Rx buffer layout attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @layout: Returns buffer layout attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dpni_buffer_layout *layout);
+/**
+ * dpni_set_rx_buffer_layout() - Set Rx buffer layout configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @layout: Buffer layout configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const struct dpni_buffer_layout *layout);
+
+/**
+ * dpni_get_tx_buffer_layout() - Retrieve Tx buffer layout attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @layout: Returns buffer layout attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dpni_buffer_layout *layout);
+
+/**
+ * @brief Set Tx buffer layout configuration.
+ *
+ * @param[in] mc_io Pointer to MC portal's I/O object
+ * @param[in] token Token of DPNI object
+ * @param[in] layout Buffer layout configuration
+ *
+ * @returns '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const struct dpni_buffer_layout *layout);
+/**
+ * dpni_get_tx_conf_buffer_layout() - Retrieve Tx confirmation buffer layout
+ * attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @layout: Returns buffer layout attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dpni_buffer_layout *layout);
+/**
+ * dpni_set_tx_conf_buffer_layout() - Set Tx confirmation buffer layout
+ * configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @layout: Buffer layout configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const struct dpni_buffer_layout *layout);
+/**
+ * dpni_get_spid() - Get the AIOP storage profile ID associated with the DPNI
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @spid: Returned aiop storage-profile ID
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Only relevant for DPNI that belongs to AIOP container.
+ */
+int dpni_get_qdid(struct fsl_mc_io *mc_io, uint16_t token, uint16_t *qdid);
+
+/**
+ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @data_offset: Tx data offset (from start of buffer)
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ uint16_t *data_offset);
+
+/**
+ * enum dpni_counter - DPNI counter types
+ * @DPNI_CNT_ING_FRAME: Counts ingress frames
+ * @DPNI_CNT_ING_BYTE: Counts ingress bytes
+ * @DPNI_CNT_ING_FRAME_DROP: Counts ingress frames dropped due to explicit
+ * 'drop' setting
+ * @DPNI_CNT_ING_FRAME_DISCARD: Counts ingress frames discarded due to errors
+ * @DPNI_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
+ * @DPNI_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
+ * @DPNI_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
+ * @DPNI_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
+ * @DPNI_CNT_EGR_FRAME: Counts egress frames
+ * @DPNI_CNT_EGR_BYTE: Counts egress bytes
+ * @DPNI_CNT_EGR_FRAME_DISCARD: Counts egress frames discarded due to errors
+ */
+enum dpni_counter {
+ DPNI_CNT_ING_FRAME = 0x0,
+ DPNI_CNT_ING_BYTE = 0x1,
+ DPNI_CNT_ING_FRAME_DROP = 0x2,
+ DPNI_CNT_ING_FRAME_DISCARD = 0x3,
+ DPNI_CNT_ING_MCAST_FRAME = 0x4,
+ DPNI_CNT_ING_MCAST_BYTE = 0x5,
+ DPNI_CNT_ING_BCAST_FRAME = 0x6,
+ DPNI_CNT_ING_BCAST_BYTES = 0x7,
+ DPNI_CNT_EGR_FRAME = 0x8,
+ DPNI_CNT_EGR_BYTE = 0x9,
+ DPNI_CNT_EGR_FRAME_DISCARD = 0xa
+};
+
+/**
+ * dpni_get_counter() - Read a specific DPNI counter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @counter: The requested counter
+ * @value: Returned counter's current value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_counter(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ enum dpni_counter counter,
+ uint64_t *value);
+
+/**
+ * dpni_set_counter() - Set (or clear) a specific DPNI counter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @counter: The requested counter
+ * @value: New counter value; typically pass '0' for resetting
+ * the counter.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_counter(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ enum dpni_counter counter,
+ uint64_t value);
+/**
+ * struct - Structure representing DPNI link configuration
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ */
+struct dpni_link_cfg {
+ uint64_t rate;
+ uint64_t options;
+};
+
+/**
+ * dpni_set_link_cfg() - set the link configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @cfg: Link configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dpni_link_cfg *cfg);
+
+/**
+ * struct dpni_link_state - Structure representing DPNI link state
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ * @up: Link state; '0' for down, '1' for up
+ */
+struct dpni_link_state {
+ uint64_t rate;
+ uint64_t options;
+ int up;
+};
+
+/**
+ * dpni_get_link_state() - Return the link state (either up or down)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @state: Returned link state;
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_link_state(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dpni_link_state *state);
+
+/**
+ * dpni_set_primary_mac_addr() - Set the primary MAC address
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to set as primary address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const uint8_t mac_addr[6]);
+/**
+ * dpni_get_primary_mac_addr() - Get the primary MAC address
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @mac_addr: Returned MAC address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ uint8_t mac_addr[6]);
+/**
+ * dpni_add_mac_addr() - Add MAC address filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to add
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const uint8_t mac_addr[6]);
+
+/**
+ * dpni_remove_mac_addr() - Remove MAC address filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const uint8_t mac_addr[6]);
+
+/**
+ * enum dpni_dest - DPNI destination types
+ * DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
+ * does not generate FQDAN notifications; user is expected to
+ * dequeue from the queue based on polling or other user-defined
+ * method
+ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON
+ * object; user is expected to dequeue from the DPCON channel
+ */
+enum dpni_dest {
+ DPNI_DEST_NONE = 0,
+ DPNI_DEST_DPIO = 1,
+ DPNI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that
+ * channel; not relevant for 'DPNI_DEST_NONE' option
+ */
+struct dpni_dest_cfg {
+ enum dpni_dest dest_type;
+ int dest_id;
+ uint8_t priority;
+};
+
+/* DPNI queue modification options */
+
+/* Select to modify the user's context associated with the queue */
+#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
+/* Select to modify the queue's destination */
+#define DPNI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * struct dpni_queue_cfg - Structure representing queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPNI_QUEUE_OPT_<X>' flags
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame; valid only if 'DPNI_QUEUE_OPT_USER_CTX'
+ * is contained in 'options'
+ * @dest_cfg: Queue destination parameters;
+ * valid only if 'DPNI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpni_queue_cfg {
+ uint32_t options;
+ uint64_t user_ctx;
+ struct dpni_dest_cfg dest_cfg;
+};
+
+/**
+ * struct dpni_queue_attr - Structure representing queue attributes
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual fqid value to be used for dequeue operations
+ */
+struct dpni_queue_attr {
+ uint64_t user_ctx;
+ struct dpni_dest_cfg dest_cfg;
+ uint32_t fqid;
+};
+
+/* DPNI Tx flow modification options */
+
+/* Select to modify the settings for dedicate Tx confirmation/error */
+#define DPNI_TX_FLOW_OPT_TX_CONF_ERROR 0x00000001
+/*!< Select to modify the Tx confirmation and/or error setting */
+#define DPNI_TX_FLOW_OPT_ONLY_TX_ERROR 0x00000002
+/*!< Select to modify the queue configuration */
+#define DPNI_TX_FLOW_OPT_QUEUE 0x00000004
+/*!< Select to modify the L3 checksum generation setting */
+#define DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN 0x00000010
+/*!< Select to modify the L4 checksum generation setting */
+#define DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN 0x00000020
+
+/**
+ * struct dpni_tx_flow_cfg - Structure representing Tx flow configuration
+ * @options: Flags representing the suggested modifications to the Tx flow;
+ * Use any combination 'DPNI_TX_FLOW_OPT_<X>' flags
+ * @conf_err_cfg: Tx confirmation and error configuration; these settings are
+ * ignored if 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' was set at
+ * DPNI creation
+ * @l3_chksum_gen: Set to '1' to enable L3 checksum generation; '0' to disable;
+ * valid only if 'DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN' is contained in
+ * 'options'
+ * @l4_chksum_gen: Set to '1' to enable L4 checksum generation; '0' to disable;
+ * valid only if 'DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN' is contained in
+ * 'options'
+ */
+struct dpni_tx_flow_cfg {
+ uint32_t options;
+ /**
+ * struct cnf_err_cfg - Tx confirmation and error configuration
+ * @use_default_queue: Set to '1' to use the common (default) Tx
+ * confirmation and error queue; Set to '0' to use the
+ * private Tx confirmation and error queue; valid only if
+ * 'DPNI_TX_FLOW_OPT_TX_CONF_ERROR' is contained in
+ * 'options'
+ * @errors_only: Set to '1' to report back only error frames;
+ * Set to '0' to confirm transmission/error for all
+ * transmitted frames;
+ * valid only if 'DPNI_TX_FLOW_OPT_ONLY_TX_ERROR' is
+ * contained in 'options' and 'use_default_queue = 0';
+ * @queue_cfg: Queue configuration; valid only if
+ * 'DPNI_TX_FLOW_OPT_QUEUE' is contained in 'options'
+ */
+ struct {
+ int use_default_queue;
+ int errors_only;
+ struct dpni_queue_cfg queue_cfg;
+ } conf_err_cfg;
+ int l3_chksum_gen;
+ int l4_chksum_gen;
+};
+
+/**
+ * dpni_set_tx_flow() - Set Tx flow configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @flow_id: Provides (or returns) the sender's flow ID;
+ * for each new sender set (*flow_id) to
+ * 'DPNI_NEW_FLOW_ID' to generate a new flow_id;
+ * this ID should be used as the QDBIN argument
+ * in enqueue operations
+ * @cfg: Tx flow configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_tx_flow(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ uint16_t *flow_id,
+ const struct dpni_tx_flow_cfg *cfg);
+
+/**
+ * struct dpni_tx_flow_attr - Structure representing Tx flow attributes
+ * @conf_err_attr: Tx confirmation and error attributes
+ * @l3_chksum_gen: '1' if L3 checksum generation is enabled; '0' if disabled
+ * @l4_chksum_gen: '1' if L4 checksum generation is enabled; '0' if disabled
+ */
+struct dpni_tx_flow_attr {
+ /**
+ * struct conf_err_attr - Tx confirmation and error attributes
+ * @use_default_queue: '1' if using common (default) Tx confirmation and
+ * error queue;
+ * '0' if using private Tx confirmation and error
+ * queue
+ * @errors_only: '1' if only error frames are reported back; '0' if all
+ * transmitted frames are confirmed
+ * @queue_attr: Queue attributes
+ */
+ struct {
+ int use_default_queue;
+ int errors_only;
+ struct dpni_queue_attr queue_attr;
+ } conf_err_attr;
+ int l3_chksum_gen;
+ int l4_chksum_gen;
+};
+
+/**
+ * dpni_get_tx_flow() - Get Tx flow attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @flow_id: The sender's flow ID, as returned by the
+ * dpni_set_tx_flow() function
+ * @attr: Returned Tx flow attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_tx_flow(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ uint16_t flow_id,
+ struct dpni_tx_flow_attr *attr);
+
+/**
+ * dpni_set_rx_flow() - Set Rx flow configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7);
+ * use 'DPNI_ALL_TCS' to set all TCs and all flows
+ * @flow_id Rx flow id within the traffic class; use
+ * 'DPNI_ALL_TC_FLOWS' to set all flows within
+ * this tc_id; ignored if tc_id is set to
+ * 'DPNI_ALL_TCS';
+ * @cfg: Rx flow configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_rx_flow(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ uint8_t tc_id,
+ uint16_t flow_id,
+ const struct dpni_queue_cfg *cfg);
+
+/**
+ * dpni_get_rx_flow() - Get Rx flow attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @flow_id: Rx flow id within the traffic class
+ * @attr: Returned Rx flow attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_rx_flow(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ uint8_t tc_id,
+ uint16_t flow_id,
+ struct dpni_queue_attr *attr);
+
+#endif /* _FSL_DPNI_H */
--- /dev/null
+/*
+ * Freescale Layerscape MC I/O wrapper
+ *
+ * Copyright (C) 2013-2015 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+#ifndef _FSL_DPRC_H
+#define _FSL_DPRC_H
+
+/* DPRC Version */
+#define DPRC_VER_MAJOR 2
+#define DPRC_VER_MINOR 0
+
+/* Command IDs */
+#define DPRC_CMDID_CLOSE 0x800
+#define DPRC_CMDID_OPEN 0x805
+
+#define DPRC_CMDID_GET_ATTR 0x004
+#define DPRC_CMDID_RESET_CONT 0x005
+
+#define DPRC_CMDID_GET_CONT_ID 0x830
+#define DPRC_CMDID_GET_OBJ_COUNT 0x159
+#define DPRC_CMDID_GET_OBJ 0x15A
+#define DPRC_CMDID_GET_RES_COUNT 0x15B
+#define DPRC_CMDID_GET_RES_IDS 0x15C
+#define DPRC_CMDID_GET_OBJ_REG 0x15E
+
+#define DPRC_CMDID_CONNECT 0x167
+#define DPRC_CMDID_DISCONNECT 0x168
+#define DPRC_CMDID_GET_CONNECTION 0x16C
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPRC_RSP_GET_CONTAINER_ID(cmd, container_id) \
+ MC_RSP_OP(cmd, 0, 0, 32, int, container_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPRC_CMD_OPEN(cmd, container_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, int, container_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPRC_CMD_RESET_CONTAINER(cmd, child_container_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPRC_RSP_GET_ATTRIBUTES(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->container_id); \
+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->icid); \
+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options);\
+ MC_RSP_OP(cmd, 1, 32, 32, int, attr->portal_id); \
+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, attr->version.major);\
+ MC_RSP_OP(cmd, 2, 16, 16, uint16_t, attr->version.minor);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPRC_RSP_GET_OBJ_COUNT(cmd, obj_count) \
+ MC_RSP_OP(cmd, 0, 32, 32, int, obj_count)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPRC_CMD_GET_OBJ(cmd, obj_index) \
+ MC_CMD_OP(cmd, 0, 0, 32, int, obj_index)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPRC_RSP_GET_OBJ(cmd, obj_desc) \
+do { \
+ MC_RSP_OP(cmd, 0, 32, 32, int, obj_desc->id); \
+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, obj_desc->vendor); \
+ MC_RSP_OP(cmd, 1, 16, 8, uint8_t, obj_desc->irq_count); \
+ MC_RSP_OP(cmd, 1, 24, 8, uint8_t, obj_desc->region_count); \
+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, obj_desc->state);\
+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, obj_desc->ver_major);\
+ MC_RSP_OP(cmd, 2, 16, 16, uint16_t, obj_desc->ver_minor);\
+ MC_RSP_OP(cmd, 3, 0, 8, char, obj_desc->type[0]);\
+ MC_RSP_OP(cmd, 3, 8, 8, char, obj_desc->type[1]);\
+ MC_RSP_OP(cmd, 3, 16, 8, char, obj_desc->type[2]);\
+ MC_RSP_OP(cmd, 3, 24, 8, char, obj_desc->type[3]);\
+ MC_RSP_OP(cmd, 3, 32, 8, char, obj_desc->type[4]);\
+ MC_RSP_OP(cmd, 3, 40, 8, char, obj_desc->type[5]);\
+ MC_RSP_OP(cmd, 3, 48, 8, char, obj_desc->type[6]);\
+ MC_RSP_OP(cmd, 3, 56, 8, char, obj_desc->type[7]);\
+ MC_RSP_OP(cmd, 4, 0, 8, char, obj_desc->type[8]);\
+ MC_RSP_OP(cmd, 4, 8, 8, char, obj_desc->type[9]);\
+ MC_RSP_OP(cmd, 4, 16, 8, char, obj_desc->type[10]);\
+ MC_RSP_OP(cmd, 4, 24, 8, char, obj_desc->type[11]);\
+ MC_RSP_OP(cmd, 4, 32, 8, char, obj_desc->type[12]);\
+ MC_RSP_OP(cmd, 4, 40, 8, char, obj_desc->type[13]);\
+ MC_RSP_OP(cmd, 4, 48, 8, char, obj_desc->type[14]);\
+ MC_RSP_OP(cmd, 4, 56, 8, char, obj_desc->type[15]);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPRC_CMD_GET_RES_COUNT(cmd, type) \
+do { \
+ MC_CMD_OP(cmd, 1, 0, 8, char, type[0]);\
+ MC_CMD_OP(cmd, 1, 8, 8, char, type[1]);\
+ MC_CMD_OP(cmd, 1, 16, 8, char, type[2]);\
+ MC_CMD_OP(cmd, 1, 24, 8, char, type[3]);\
+ MC_CMD_OP(cmd, 1, 32, 8, char, type[4]);\
+ MC_CMD_OP(cmd, 1, 40, 8, char, type[5]);\
+ MC_CMD_OP(cmd, 1, 48, 8, char, type[6]);\
+ MC_CMD_OP(cmd, 1, 56, 8, char, type[7]);\
+ MC_CMD_OP(cmd, 2, 0, 8, char, type[8]);\
+ MC_CMD_OP(cmd, 2, 8, 8, char, type[9]);\
+ MC_CMD_OP(cmd, 2, 16, 8, char, type[10]);\
+ MC_CMD_OP(cmd, 2, 24, 8, char, type[11]);\
+ MC_CMD_OP(cmd, 2, 32, 8, char, type[12]);\
+ MC_CMD_OP(cmd, 2, 40, 8, char, type[13]);\
+ MC_CMD_OP(cmd, 2, 48, 8, char, type[14]);\
+ MC_CMD_OP(cmd, 2, 56, 8, char, type[15]);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPRC_RSP_GET_RES_COUNT(cmd, res_count) \
+ MC_RSP_OP(cmd, 0, 0, 32, int, res_count)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPRC_CMD_GET_RES_IDS(cmd, range_desc, type) \
+do { \
+ MC_CMD_OP(cmd, 0, 42, 7, enum dprc_iter_status, \
+ range_desc->iter_status); \
+ MC_CMD_OP(cmd, 1, 0, 32, int, range_desc->base_id); \
+ MC_CMD_OP(cmd, 1, 32, 32, int, range_desc->last_id);\
+ MC_CMD_OP(cmd, 2, 0, 8, char, type[0]);\
+ MC_CMD_OP(cmd, 2, 8, 8, char, type[1]);\
+ MC_CMD_OP(cmd, 2, 16, 8, char, type[2]);\
+ MC_CMD_OP(cmd, 2, 24, 8, char, type[3]);\
+ MC_CMD_OP(cmd, 2, 32, 8, char, type[4]);\
+ MC_CMD_OP(cmd, 2, 40, 8, char, type[5]);\
+ MC_CMD_OP(cmd, 2, 48, 8, char, type[6]);\
+ MC_CMD_OP(cmd, 2, 56, 8, char, type[7]);\
+ MC_CMD_OP(cmd, 3, 0, 8, char, type[8]);\
+ MC_CMD_OP(cmd, 3, 8, 8, char, type[9]);\
+ MC_CMD_OP(cmd, 3, 16, 8, char, type[10]);\
+ MC_CMD_OP(cmd, 3, 24, 8, char, type[11]);\
+ MC_CMD_OP(cmd, 3, 32, 8, char, type[12]);\
+ MC_CMD_OP(cmd, 3, 40, 8, char, type[13]);\
+ MC_CMD_OP(cmd, 3, 48, 8, char, type[14]);\
+ MC_CMD_OP(cmd, 3, 56, 8, char, type[15]);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPRC_RSP_GET_RES_IDS(cmd, range_desc) \
+do { \
+ MC_RSP_OP(cmd, 0, 42, 7, enum dprc_iter_status, \
+ range_desc->iter_status);\
+ MC_RSP_OP(cmd, 1, 0, 32, int, range_desc->base_id); \
+ MC_RSP_OP(cmd, 1, 32, 32, int, range_desc->last_id);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPRC_CMD_GET_OBJ_REGION(cmd, obj_type, obj_id, region_index) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, int, obj_id); \
+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, region_index);\
+ MC_CMD_OP(cmd, 3, 0, 8, char, obj_type[0]);\
+ MC_CMD_OP(cmd, 3, 8, 8, char, obj_type[1]);\
+ MC_CMD_OP(cmd, 3, 16, 8, char, obj_type[2]);\
+ MC_CMD_OP(cmd, 3, 24, 8, char, obj_type[3]);\
+ MC_CMD_OP(cmd, 3, 32, 8, char, obj_type[4]);\
+ MC_CMD_OP(cmd, 3, 40, 8, char, obj_type[5]);\
+ MC_CMD_OP(cmd, 3, 48, 8, char, obj_type[6]);\
+ MC_CMD_OP(cmd, 3, 56, 8, char, obj_type[7]);\
+ MC_CMD_OP(cmd, 4, 0, 8, char, obj_type[8]);\
+ MC_CMD_OP(cmd, 4, 8, 8, char, obj_type[9]);\
+ MC_CMD_OP(cmd, 4, 16, 8, char, obj_type[10]);\
+ MC_CMD_OP(cmd, 4, 24, 8, char, obj_type[11]);\
+ MC_CMD_OP(cmd, 4, 32, 8, char, obj_type[12]);\
+ MC_CMD_OP(cmd, 4, 40, 8, char, obj_type[13]);\
+ MC_CMD_OP(cmd, 4, 48, 8, char, obj_type[14]);\
+ MC_CMD_OP(cmd, 4, 56, 8, char, obj_type[15]);\
+} while (0)
+
+/* param, offset, width, type, arg_name */
+#define DPRC_RSP_GET_OBJ_REGION(cmd, region_desc) \
+do { \
+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, region_desc->base_paddr);\
+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, region_desc->size); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPRC_CMD_CONNECT(cmd, endpoint1, endpoint2) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, int, endpoint1->id); \
+ MC_CMD_OP(cmd, 0, 32, 32, int, endpoint1->interface_id); \
+ MC_CMD_OP(cmd, 1, 0, 32, int, endpoint2->id); \
+ MC_CMD_OP(cmd, 1, 32, 32, int, endpoint2->interface_id); \
+ MC_CMD_OP(cmd, 2, 0, 8, char, endpoint1->type[0]); \
+ MC_CMD_OP(cmd, 2, 8, 8, char, endpoint1->type[1]); \
+ MC_CMD_OP(cmd, 2, 16, 8, char, endpoint1->type[2]); \
+ MC_CMD_OP(cmd, 2, 24, 8, char, endpoint1->type[3]); \
+ MC_CMD_OP(cmd, 2, 32, 8, char, endpoint1->type[4]); \
+ MC_CMD_OP(cmd, 2, 40, 8, char, endpoint1->type[5]); \
+ MC_CMD_OP(cmd, 2, 48, 8, char, endpoint1->type[6]); \
+ MC_CMD_OP(cmd, 2, 56, 8, char, endpoint1->type[7]); \
+ MC_CMD_OP(cmd, 3, 0, 8, char, endpoint1->type[8]); \
+ MC_CMD_OP(cmd, 3, 8, 8, char, endpoint1->type[9]); \
+ MC_CMD_OP(cmd, 3, 16, 8, char, endpoint1->type[10]); \
+ MC_CMD_OP(cmd, 3, 24, 8, char, endpoint1->type[11]); \
+ MC_CMD_OP(cmd, 3, 32, 8, char, endpoint1->type[12]); \
+ MC_CMD_OP(cmd, 3, 40, 8, char, endpoint1->type[13]); \
+ MC_CMD_OP(cmd, 3, 48, 8, char, endpoint1->type[14]); \
+ MC_CMD_OP(cmd, 3, 56, 8, char, endpoint1->type[15]); \
+ MC_CMD_OP(cmd, 5, 0, 8, char, endpoint2->type[0]); \
+ MC_CMD_OP(cmd, 5, 8, 8, char, endpoint2->type[1]); \
+ MC_CMD_OP(cmd, 5, 16, 8, char, endpoint2->type[2]); \
+ MC_CMD_OP(cmd, 5, 24, 8, char, endpoint2->type[3]); \
+ MC_CMD_OP(cmd, 5, 32, 8, char, endpoint2->type[4]); \
+ MC_CMD_OP(cmd, 5, 40, 8, char, endpoint2->type[5]); \
+ MC_CMD_OP(cmd, 5, 48, 8, char, endpoint2->type[6]); \
+ MC_CMD_OP(cmd, 5, 56, 8, char, endpoint2->type[7]); \
+ MC_CMD_OP(cmd, 6, 0, 8, char, endpoint2->type[8]); \
+ MC_CMD_OP(cmd, 6, 8, 8, char, endpoint2->type[9]); \
+ MC_CMD_OP(cmd, 6, 16, 8, char, endpoint2->type[10]); \
+ MC_CMD_OP(cmd, 6, 24, 8, char, endpoint2->type[11]); \
+ MC_CMD_OP(cmd, 6, 32, 8, char, endpoint2->type[12]); \
+ MC_CMD_OP(cmd, 6, 40, 8, char, endpoint2->type[13]); \
+ MC_CMD_OP(cmd, 6, 48, 8, char, endpoint2->type[14]); \
+ MC_CMD_OP(cmd, 6, 56, 8, char, endpoint2->type[15]); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPRC_CMD_DISCONNECT(cmd, endpoint) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, int, endpoint->id); \
+ MC_CMD_OP(cmd, 0, 32, 32, int, endpoint->interface_id); \
+ MC_CMD_OP(cmd, 1, 0, 8, char, endpoint->type[0]); \
+ MC_CMD_OP(cmd, 1, 8, 8, char, endpoint->type[1]); \
+ MC_CMD_OP(cmd, 1, 16, 8, char, endpoint->type[2]); \
+ MC_CMD_OP(cmd, 1, 24, 8, char, endpoint->type[3]); \
+ MC_CMD_OP(cmd, 1, 32, 8, char, endpoint->type[4]); \
+ MC_CMD_OP(cmd, 1, 40, 8, char, endpoint->type[5]); \
+ MC_CMD_OP(cmd, 1, 48, 8, char, endpoint->type[6]); \
+ MC_CMD_OP(cmd, 1, 56, 8, char, endpoint->type[7]); \
+ MC_CMD_OP(cmd, 2, 0, 8, char, endpoint->type[8]); \
+ MC_CMD_OP(cmd, 2, 8, 8, char, endpoint->type[9]); \
+ MC_CMD_OP(cmd, 2, 16, 8, char, endpoint->type[10]); \
+ MC_CMD_OP(cmd, 2, 24, 8, char, endpoint->type[11]); \
+ MC_CMD_OP(cmd, 2, 32, 8, char, endpoint->type[12]); \
+ MC_CMD_OP(cmd, 2, 40, 8, char, endpoint->type[13]); \
+ MC_CMD_OP(cmd, 2, 48, 8, char, endpoint->type[14]); \
+ MC_CMD_OP(cmd, 2, 56, 8, char, endpoint->type[15]); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPRC_CMD_GET_CONNECTION(cmd, endpoint1) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, int, endpoint1->id); \
+ MC_CMD_OP(cmd, 0, 32, 32, int, endpoint1->interface_id); \
+ MC_CMD_OP(cmd, 1, 0, 8, char, endpoint1->type[0]); \
+ MC_CMD_OP(cmd, 1, 8, 8, char, endpoint1->type[1]); \
+ MC_CMD_OP(cmd, 1, 16, 8, char, endpoint1->type[2]); \
+ MC_CMD_OP(cmd, 1, 24, 8, char, endpoint1->type[3]); \
+ MC_CMD_OP(cmd, 1, 32, 8, char, endpoint1->type[4]); \
+ MC_CMD_OP(cmd, 1, 40, 8, char, endpoint1->type[5]); \
+ MC_CMD_OP(cmd, 1, 48, 8, char, endpoint1->type[6]); \
+ MC_CMD_OP(cmd, 1, 56, 8, char, endpoint1->type[7]); \
+ MC_CMD_OP(cmd, 2, 0, 8, char, endpoint1->type[8]); \
+ MC_CMD_OP(cmd, 2, 8, 8, char, endpoint1->type[9]); \
+ MC_CMD_OP(cmd, 2, 16, 8, char, endpoint1->type[10]); \
+ MC_CMD_OP(cmd, 2, 24, 8, char, endpoint1->type[11]); \
+ MC_CMD_OP(cmd, 2, 32, 8, char, endpoint1->type[12]); \
+ MC_CMD_OP(cmd, 2, 40, 8, char, endpoint1->type[13]); \
+ MC_CMD_OP(cmd, 2, 48, 8, char, endpoint1->type[14]); \
+ MC_CMD_OP(cmd, 2, 56, 8, char, endpoint1->type[15]); \
+} while (0)
+
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPRC_RSP_GET_CONNECTION(cmd, endpoint2, state) \
+do { \
+ MC_RSP_OP(cmd, 3, 0, 32, int, endpoint2->id); \
+ MC_RSP_OP(cmd, 3, 32, 32, int, endpoint2->interface_id); \
+ MC_RSP_OP(cmd, 4, 0, 8, char, endpoint2->type[0]); \
+ MC_RSP_OP(cmd, 4, 8, 8, char, endpoint2->type[1]); \
+ MC_RSP_OP(cmd, 4, 16, 8, char, endpoint2->type[2]); \
+ MC_RSP_OP(cmd, 4, 24, 8, char, endpoint2->type[3]); \
+ MC_RSP_OP(cmd, 4, 32, 8, char, endpoint2->type[4]); \
+ MC_RSP_OP(cmd, 4, 40, 8, char, endpoint2->type[5]); \
+ MC_RSP_OP(cmd, 4, 48, 8, char, endpoint2->type[6]); \
+ MC_RSP_OP(cmd, 4, 56, 8, char, endpoint2->type[7]); \
+ MC_RSP_OP(cmd, 5, 0, 8, char, endpoint2->type[8]); \
+ MC_RSP_OP(cmd, 5, 8, 8, char, endpoint2->type[9]); \
+ MC_RSP_OP(cmd, 5, 16, 8, char, endpoint2->type[10]); \
+ MC_RSP_OP(cmd, 5, 24, 8, char, endpoint2->type[11]); \
+ MC_RSP_OP(cmd, 5, 32, 8, char, endpoint2->type[12]); \
+ MC_RSP_OP(cmd, 5, 40, 8, char, endpoint2->type[13]); \
+ MC_RSP_OP(cmd, 5, 48, 8, char, endpoint2->type[14]); \
+ MC_RSP_OP(cmd, 5, 56, 8, char, endpoint2->type[15]); \
+ MC_RSP_OP(cmd, 6, 0, 32, int, state); \
+} while (0)
+
+/* Data Path Resource Container API
+ * Contains DPRC API for managing and querying DPAA resources
+ */
+struct fsl_mc_io;
+
+/**
+ * Set this value as the icid value in dprc_cfg structure when creating a
+ * container, in case the ICID is not selected by the user and should be
+ * allocated by the DPRC from the pool of ICIDs.
+ */
+#define DPRC_GET_ICID_FROM_POOL (uint16_t)(~(0))
+
+/**
+ * Set this value as the portal_id value in dprc_cfg structure when creating a
+ * container, in case the portal ID is not specifically selected by the
+ * user and should be allocated by the DPRC from the pool of portal ids.
+ */
+#define DPRC_GET_PORTAL_ID_FROM_POOL (int)(~(0))
+
+/**
+ * dprc_get_container_id() - Get container ID associated with a given portal.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @container_id: Requested container ID
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_container_id(struct fsl_mc_io *mc_io, int *container_id);
+
+/**
+ * dprc_open() - Open DPRC object for use
+ * @mc_io: Pointer to MC portal's I/O object
+ * @container_id: Container ID to open
+ * @token: Returned token of DPRC object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Required before any operation on the object.
+ */
+int dprc_open(struct fsl_mc_io *mc_io, int container_id, uint16_t *token);
+
+/**
+ * dprc_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPRC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_close(struct fsl_mc_io *mc_io, uint16_t token);
+
+/**
+ * Container general options
+ *
+ * These options may be selected at container creation by the container creator
+ * and can be retrieved using dprc_get_attributes()
+ */
+
+/* Spawn Policy Option allowed - Indicates that the new container is allowed
+ * to spawn and have its own child containers.
+ */
+#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001
+
+/* General Container allocation policy - Indicates that the new container is
+ * allowed to allocate requested resources from its parent container; if not
+ * set, the container is only allowed to use resources in its own pools; Note
+ * that this is a container's global policy, but the parent container may
+ * override it and set specific quota per resource type.
+ */
+#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002
+
+/* Object initialization allowed - software context associated with this
+ * container is allowed to invoke object initialization operations.
+ */
+#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004
+
+/* Topology change allowed - software context associated with this
+ * container is allowed to invoke topology operations, such as attach/detach
+ * of network objects.
+ */
+#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008
+
+/* IOMMU bypass - indicates whether objects of this container are permitted
+ * to bypass the IOMMU.
+ */
+#define DPRC_CFG_OPT_IOMMU_BYPASS 0x00000010
+
+/* AIOP - Indicates that container belongs to AIOP. */
+#define DPRC_CFG_OPT_AIOP 0x00000020
+
+/**
+ * struct dprc_cfg - Container configuration options
+ * @icid: Container's ICID; if set to 'DPRC_GET_ICID_FROM_POOL', a free
+ * ICID value is allocated by the DPRC
+ * @portal_id: Portal ID; if set to 'DPRC_GET_PORTAL_ID_FROM_POOL', a free
+ * portal ID is allocated by the DPRC
+ * @options: Combination of 'DPRC_CFG_OPT_<X>' options
+ */
+struct dprc_cfg {
+ uint16_t icid;
+ int portal_id;
+ uint64_t options;
+};
+
+/**
+ * dprc_reset_container - Reset child container.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPRC object
+ * @child_container_id: ID of the container to reset
+ *
+ * In case a software context crashes or becomes non-responsive, the parent
+ * may wish to reset its resources container before the software context is
+ * restarted.
+ *
+ * This routine informs all objects assigned to the child container that the
+ * container is being reset, so they may perform any cleanup operations that are
+ * needed. All objects handles that were owned by the child container shall be
+ * closed.
+ *
+ * Note that such request may be submitted even if the child software context
+ * has not crashed, but the resulting object cleanup operations will not be
+ * aware of that.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_reset_container(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ int child_container_id);
+
+/**
+ * struct dprc_attributes - Container attributes
+ * @container_id: Container's ID
+ * @icid: Container's ICID
+ * @portal_id: Container's portal ID
+ * @options: Container's options as set at container's creation
+ * @version: DPRC version
+ */
+struct dprc_attributes {
+ int container_id;
+ uint16_t icid;
+ int portal_id;
+ uint64_t options;
+ /**
+ * struct version - DPRC version
+ * @major: DPRC major version
+ * @minor: DPRC minor version
+ */
+ struct {
+ uint16_t major;
+ uint16_t minor;
+ } version;
+};
+
+/**
+ * dprc_get_attributes() - Obtains container attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPRC object
+ * @attributes Returned container attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_attributes(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ struct dprc_attributes *attributes);
+
+/**
+ * dprc_get_obj_count() - Obtains the number of objects in the DPRC
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPRC object
+ * @obj_count: Number of objects assigned to the DPRC
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_obj_count(struct fsl_mc_io *mc_io, uint16_t token, int *obj_count);
+
+/* Objects Attributes Flags */
+
+/* Opened state - Indicates that an object is open by at least one owner */
+#define DPRC_OBJ_STATE_OPEN 0x00000001
+/* Plugged state - Indicates that the object is plugged */
+#define DPRC_OBJ_STATE_PLUGGED 0x00000002
+
+/**
+ * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
+ * @type: Type of object: NULL terminated string
+ * @id: ID of logical object resource
+ * @vendor: Object vendor identifier
+ * @ver_major: Major version number
+ * @ver_minor: Minor version number
+ * @irq_count: Number of interrupts supported by the object
+ * @region_count: Number of mappable regions supported by the object
+ * @state: Object state: combination of DPRC_OBJ_STATE_ states
+ */
+struct dprc_obj_desc {
+ char type[16];
+ int id;
+ uint16_t vendor;
+ uint16_t ver_major;
+ uint16_t ver_minor;
+ uint8_t irq_count;
+ uint8_t region_count;
+ uint32_t state;
+};
+
+/**
+ * dprc_get_obj() - Get general information on an object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPRC object
+ * @obj_index: Index of the object to be queried (< obj_count)
+ * @obj_desc: Returns the requested object descriptor
+ *
+ * The object descriptors are retrieved one by one by incrementing
+ * obj_index up to (not including) the value of obj_count returned
+ * from dprc_get_obj_count(). dprc_get_obj_count() must
+ * be called prior to dprc_get_obj().
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_obj(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ int obj_index,
+ struct dprc_obj_desc *obj_desc);
+
+/**
+ * dprc_get_res_count() - Obtains the number of free resources that are assigned
+ * to this container, by pool type
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPRC object
+ * @type: pool type
+ * @res_count: Returned number of free resources of the given
+ * resource type that are assigned to this DPRC
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_res_count(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ char *type,
+ int *res_count);
+
+/**
+ * enum dprc_iter_status - Iteration status
+ * @DPRC_ITER_STATUS_FIRST: Perform first iteration
+ * @DPRC_ITER_STATUS_MORE: Indicates more/next iteration is needed
+ * @DPRC_ITER_STATUS_LAST: Indicates last iteration
+ */
+enum dprc_iter_status {
+ DPRC_ITER_STATUS_FIRST = 0,
+ DPRC_ITER_STATUS_MORE = 1,
+ DPRC_ITER_STATUS_LAST = 2
+};
+
+/**
+ * struct dprc_res_ids_range_desc - Resource ID range descriptor
+ * @base_id: Base resource ID of this range
+ * @last_id: Last resource ID of this range
+ * @iter_status: Iteration status - should be set to DPRC_ITER_STATUS_FIRST at
+ * first iteration; while the returned marker is DPRC_ITER_STATUS_MORE,
+ * additional iterations are needed, until the returned marker is
+ * DPRC_ITER_STATUS_LAST
+ */
+struct dprc_res_ids_range_desc {
+ int base_id;
+ int last_id;
+ enum dprc_iter_status iter_status;
+};
+
+/**
+ * dprc_get_res_ids() - Obtains IDs of free resources in the container
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPRC object
+ * @type: pool type
+ * @range_desc: range descriptor
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_res_ids(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ char *type,
+ struct dprc_res_ids_range_desc *range_desc);
+
+/**
+ * struct dprc_region_desc - Mappable region descriptor
+ * @base_paddr: Region base physical address
+ * @size: Region size (in bytes)
+ */
+struct dprc_region_desc {
+ uint64_t base_paddr;
+ uint32_t size;
+};
+
+/**
+ * dprc_get_obj_region() - Get region information for a specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPRC object
+ * @obj_type; Object type as returned in dprc_get_obj()
+ * @obj_id: Unique object instance as returned in dprc_get_obj()
+ * @region_index: The specific region to query
+ * @region_desc: Returns the requested region descriptor
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ char *obj_type,
+ int obj_id,
+ uint8_t region_index,
+ struct dprc_region_desc *region_desc);
+/**
+ * struct dprc_endpoint - Endpoint description for link connect/disconnect
+ * operations
+ * @type: Endpoint object type: NULL terminated string
+ * @id: Endpoint object ID
+ * @interface_id: Interface ID; should be set for endpoints with multiple
+ * interfaces ("dpsw", "dpdmux"); for others, always set to 0
+ */
+struct dprc_endpoint {
+ char type[16];
+ int id;
+ int interface_id;
+};
+
+/**
+ * dprc_connect() - Connect two endpoints to create a network link between them
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPRC object
+ * @endpoint1: Endpoint 1 configuration parameters
+ * @endpoint2: Endpoint 2 configuration parameters
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_connect(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const struct dprc_endpoint *endpoint1,
+ const struct dprc_endpoint *endpoint2);
+
+/**
+ * dprc_disconnect() - Disconnect one endpoint to remove its network connection
+ * @mc_io: Pointer to MC portal's I/O object
+ * @token: Token of DPRC object
+ * @endpoint: Endpoint configuration parameters
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_disconnect(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const struct dprc_endpoint *endpoint);
+
+/**
+* dprc_get_connection() - Get connected endpoint and link status if connection
+* exists.
+* @mc_io Pointer to MC portal's I/O object
+* @token Token of DPRC object
+* @endpoint1 Endpoint 1 configuration parameters
+* @endpoint2 Returned endpoint 2 configuration parameters
+* @state: Returned link state: 1 - link is up, 0 - link is down
+*
+* Return: '0' on Success; -ENAVAIL if connection does not exist.
+*/
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ uint16_t token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state);
+
+#endif /* _FSL_DPRC_H */
u32 reg_error[];
};
-int mc_init(bd_t *bis);
-
int get_mc_boot_status(void);
unsigned long mc_get_dram_block_size(void);
+int fsl_mc_ldpaa_init(bd_t *bis);
+void fsl_mc_ldpaa_exit(bd_t *bis);
#endif
-/* Copyright 2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#define MC_CMD_HDR_CMDID_O 52 /* Command ID field offset */
#define MC_CMD_HDR_CMDID_S 12 /* Command ID field size */
-#define MC_CMD_HDR_AUTHID_O 38 /* Authentication ID field offset */
-#define MC_CMD_HDR_AUTHID_S 10 /* Authentication ID field size */
#define MC_CMD_HDR_STATUS_O 16 /* Status field offset */
+#define MC_CMD_HDR_TOKEN_O 38 /* Token field offset */
+#define MC_CMD_HDR_TOKEN_S 10 /* Token field size */
#define MC_CMD_HDR_STATUS_S 8 /* Status field size*/
#define MC_CMD_HDR_PRI_O 15 /* Priority field offset */
#define MC_CMD_HDR_PRI_S 1 /* Priority field size */
((enum mc_cmd_status)u64_dec((_hdr), \
MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S))
-#define MC_CMD_HDR_READ_AUTHID(_hdr) \
- ((uint16_t)u64_dec((_hdr), MC_CMD_HDR_AUTHID_O, MC_CMD_HDR_AUTHID_S))
+#define MC_CMD_HDR_READ_TOKEN(_hdr) \
+ ((uint16_t)u64_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
#define MC_CMD_PRI_LOW 0 /*!< Low Priority command indication */
#define MC_CMD_PRI_HIGH 1 /*!< High Priority command indication */
+#define MC_EXT_OP(_ext, _param, _offset, _width, _type, _arg) \
+ ((_ext)[_param] |= u64_enc((_offset), (_width), _arg))
+
#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
((_cmd).params[_param] |= u64_enc((_offset), (_width), _arg))
static inline uint64_t mc_encode_cmd_header(uint16_t cmd_id,
uint8_t priority,
- uint16_t auth_id)
+ uint16_t token)
{
uint64_t hdr;
hdr = u64_enc(MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S, cmd_id);
- hdr |= u64_enc(MC_CMD_HDR_AUTHID_O, MC_CMD_HDR_AUTHID_S, auth_id);
+ hdr |= u64_enc(MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S, token);
hdr |= u64_enc(MC_CMD_HDR_PRI_O, MC_CMD_HDR_PRI_S, priority);
hdr |= u64_enc(MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S,
MC_CMD_STATUS_READY);
--- /dev/null
+/*
+ * Copyright (C) 2014 Freescale Semiconductor
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef _FSL_MC_PRIVATE_H_
+#define _FSL_MC_PRIVATE_H_
+
+#include <errno.h>
+#include <malloc.h>
+#include <asm/io.h>
+#include <linux/compat.h>
+#include <linux/types.h>
+#include <linux/stringify.h>
+
+#include <fsl-mc/fsl_mc_sys.h>
+#include <fsl-mc/fsl_mc_cmd.h>
+#include <fsl-mc/fsl_dpbp.h>
+
+extern struct fsl_mc_io *dflt_mc_io;
+
+/**
+ * struct dpbp_node - DPBP strucuture
+ * @uint16_t handle: DPBP object handle
+ * @int dpbp_id: DPBP id
+ */
+struct fsl_dpbp_obj {
+ uint16_t dpbp_handle;
+ struct dpbp_attr dpbp_attr;
+};
+
+extern struct fsl_dpbp_obj *dflt_dpbp;
+
+/**
+ * struct fsl_dpio_obj - DPIO strucuture
+ * @int dpio_id: DPIO id
+ * @struct qbman_swp *sw_portal: SW portal object
+ */
+struct fsl_dpio_obj {
+ int dpio_id;
+ struct qbman_swp *sw_portal; /** SW portal object */
+};
+
+extern struct fsl_dpio_obj *dflt_dpio;
+
+int mc_init(void);
+#endif /* _FSL_MC_PRIVATE_H_ */
--- /dev/null
+/*
+ * Copyright (C) 2014 Freescale Semiconductor
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef _FSL_QBMAN_BASE_H
+#define _FSL_QBMAN_BASE_H
+
+/* Descriptor for a QBMan instance on the SoC. On partitions/targets that do not
+ * control this QBMan instance, these values may simply be place-holders. The
+ * idea is simply that we be able to distinguish between them, eg. so that SWP
+ * descriptors can identify which QBMan instance they belong to. */
+struct qbman_block_desc {
+ void *ccsr_reg_bar; /* CCSR register map */
+ int irq_rerr; /* Recoverable error interrupt line */
+ int irq_nrerr; /* Non-recoverable error interrupt line */
+};
+
+/* Descriptor for a QBMan software portal, expressed in terms that make sense to
+ * the user context. Ie. on MC, this information is likely to be true-physical,
+ * and instantiated statically at compile-time. On GPP, this information is
+ * likely to be obtained via "discovery" over a partition's "layerscape bus"
+ * (ie. in response to a MC portal command), and would take into account any
+ * virtualisation of the GPP user's address space and/or interrupt numbering. */
+struct qbman_swp_desc {
+ const struct qbman_block_desc *block; /* The QBMan instance */
+ void *cena_bar; /* Cache-enabled portal register map */
+ void *cinh_bar; /* Cache-inhibited portal register map */
+};
+
+/* Driver object for managing a QBMan portal */
+struct qbman_swp;
+
+/* Place-holder for FDs, we represent it via the simplest form that we need for
+ * now. Different overlays may be needed to support different options, etc. (It
+ * is impractical to define One True Struct, because the resulting encoding
+ * routines (lots of read-modify-writes) would be worst-case performance whether
+ * or not circumstances required them.)
+ *
+ * Note, as with all data-structures exchanged between software and hardware (be
+ * they located in the portal register map or DMA'd to and from main-memory),
+ * the driver ensures that the caller of the driver API sees the data-structures
+ * in host-endianness. "struct qbman_fd" is no exception. The 32-bit words
+ * contained within this structure are represented in host-endianness, even if
+ * hardware always treats them as little-endian. As such, if any of these fields
+ * are interpreted in a binary (rather than numerical) fashion by hardware
+ * blocks (eg. accelerators), then the user should be careful. We illustrate
+ * with an example;
+ *
+ * Suppose the desired behaviour of an accelerator is controlled by the "frc"
+ * field of the FDs that are sent to it. Suppose also that the behaviour desired
+ * by the user corresponds to an "frc" value which is expressed as the literal
+ * sequence of bytes 0xfe, 0xed, 0xab, and 0xba. So "frc" should be the 32-bit
+ * value in which 0xfe is the first byte and 0xba is the last byte, and as
+ * hardware is little-endian, this amounts to a 32-bit "value" of 0xbaabedfe. If
+ * the software is little-endian also, this can simply be achieved by setting
+ * frc=0xbaabedfe. On the other hand, if software is big-endian, it should set
+ * frc=0xfeedabba! The best away of avoiding trouble with this sort of thing is
+ * to treat the 32-bit words as numerical values, in which the offset of a field
+ * from the beginning of the first byte (as required or generated by hardware)
+ * is numerically encoded by a left-shift (ie. by raising the field to a
+ * corresponding power of 2). Ie. in the current example, software could set
+ * "frc" in the following way, and it would work correctly on both little-endian
+ * and big-endian operation;
+ * fd.frc = (0xfe << 0) | (0xed << 8) | (0xab << 16) | (0xba << 24);
+ */
+struct qbman_fd {
+ union {
+ uint32_t words[8];
+ struct qbman_fd_simple {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t len;
+ /* offset in the MS 16 bits, BPID in the LS 16 bits */
+ uint32_t bpid_offset;
+ uint32_t frc; /* frame context */
+ /* "err", "va", "cbmt", "asal", [...] */
+ uint32_t ctrl;
+ /* flow context */
+ uint32_t flc_lo;
+ uint32_t flc_hi;
+ } simple;
+ };
+};
+
+#endif /* !_FSL_QBMAN_BASE_H */
--- /dev/null
+/*
+ * Copyright (C) 2014 Freescale Semiconductor
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef _FSL_QBMAN_PORTAL_H
+#define _FSL_QBMAN_PORTAL_H
+
+#include <fsl-mc/fsl_qbman_base.h>
+
+/* Create and destroy a functional object representing the given QBMan portal
+ * descriptor. */
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *);
+
+ /************/
+ /* Dequeues */
+ /************/
+
+/* See the QBMan driver API documentation for details on the enqueue
+ * mechanisms. NB: the use of a 'ldpaa_' prefix for this type is because it is
+ * primarily used by the "DPIO" layer that sits above (and hides) the QBMan
+ * driver. The structure is defined in the DPIO interface, but to avoid circular
+ * dependencies we just pre/re-declare it here opaquely. */
+struct ldpaa_dq;
+
+
+/* ------------------- */
+/* Pull-mode dequeuing */
+/* ------------------- */
+
+struct qbman_pull_desc {
+ uint32_t dont_manipulate_directly[6];
+};
+
+/* Clear the contents of a descriptor to default/starting state. */
+void qbman_pull_desc_clear(struct qbman_pull_desc *);
+/* If not called, or if called with 'storage' as NULL, the result pull dequeues
+ * will produce results to DQRR. If 'storage' is non-NULL, then results are
+ * produced to the given memory location (using the physical/DMA address which
+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not
+ * those writes to main-memory express a cache-warming attribute. */
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *,
+ struct ldpaa_dq *storage,
+ dma_addr_t storage_phys,
+ int stash);
+/* numframes must be between 1 and 16, inclusive */
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *, uint8_t numframes);
+/* token is the value that shows up in the dequeue results that can be used to
+ * detect when the results have been published, and is not really used when
+ * dequeue results go to DQRR. The easiest technique is to zero result "storage"
+ * before issuing a pull dequeue, and use any non-zero 'token' value. */
+void qbman_pull_desc_set_token(struct qbman_pull_desc *, uint8_t token);
+/* Exactly one of the following descriptor "actions" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * - pull dequeue from the given frame queue (FQ)
+ * - pull dequeue from any FQ in the given work queue (WQ)
+ * - pull dequeue from any FQ in any WQ in the given channel
+ */
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *, uint32_t fqid);
+
+/* Issue the pull dequeue command */
+int qbman_swp_pull(struct qbman_swp *, struct qbman_pull_desc *);
+
+/* -------------------------------- */
+/* Polling DQRR for dequeue results */
+/* -------------------------------- */
+
+/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order. */
+const struct ldpaa_dq *qbman_swp_dqrr_next(struct qbman_swp *);
+/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
+void qbman_swp_dqrr_consume(struct qbman_swp *, const struct ldpaa_dq *);
+
+/* ------------------------------------------------- */
+/* Polling user-provided storage for dequeue results */
+/* ------------------------------------------------- */
+
+/* Only used for user-provided storage of dequeue results, not DQRR. Prior to
+ * being used, the storage must set "oldtoken", so that the driver notices when
+ * hardware has filled it in with results using a "newtoken". NB, for efficiency
+ * purposes, the driver will perform any required endianness conversion to
+ * ensure that the user's dequeue result storage is in host-endian format
+ * (whether or not that is the same as the little-endian format that hardware
+ * DMA'd to the user's storage). As such, once the user has called
+ * qbman_dq_entry_has_newtoken() and been returned a valid dequeue result, they
+ * should not call it again on the same memory location (except of course if
+ * another dequeue command has been executed to produce a new result to that
+ * location).
+ */
+void qbman_dq_entry_set_oldtoken(struct ldpaa_dq *,
+ unsigned int num_entries,
+ uint8_t oldtoken);
+int qbman_dq_entry_has_newtoken(struct qbman_swp *,
+ const struct ldpaa_dq *,
+ uint8_t newtoken);
+
+/* -------------------------------------------------------- */
+/* Parsing dequeue entries (DQRR and user-provided storage) */
+/* -------------------------------------------------------- */
+
+/* DQRR entries may contain non-dequeue results, ie. notifications */
+int qbman_dq_entry_is_DQ(const struct ldpaa_dq *);
+
+ /************/
+ /* Enqueues */
+ /************/
+
+struct qbman_eq_desc {
+ uint32_t dont_manipulate_directly[8];
+};
+
+
+/* Clear the contents of a descriptor to default/starting state. */
+void qbman_eq_desc_clear(struct qbman_eq_desc *);
+/* Exactly one of the following descriptor "actions" should be set. (Calling
+ * any one of these will replace the effect of any prior call to one of these.)
+ * - enqueue without order-restoration
+ * - enqueue with order-restoration
+ * - fill a hole in the order-restoration sequence, without any enqueue
+ * - advance NESN (Next Expected Sequence Number), without any enqueue
+ * 'respond_success' indicates whether an enqueue response should be DMA'd
+ * after success (otherwise a response is DMA'd only after failure).
+ * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to
+ * be enqueued.
+ */
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *, int respond_success);
+void qbman_eq_desc_set_response(struct qbman_eq_desc *,
+ dma_addr_t storage_phys,
+ int stash);
+/* token is the value that shows up in an enqueue response that can be used to
+ * detect when the results have been published. The easiest technique is to zero
+ * result "storage" before issuing an enqueue, and use any non-zero 'token'
+ * value. */
+void qbman_eq_desc_set_token(struct qbman_eq_desc *, uint8_t token);
+/* Exactly one of the following descriptor "targets" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * - enqueue to a frame queue
+ * - enqueue to a queuing destination
+ * Note, that none of these will have any affect if the "action" type has been
+ * set to "orp_hole" or "orp_nesn".
+ */
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *, uint32_t fqid);
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *, uint32_t qdid,
+ uint32_t qd_bin, uint32_t qd_prio);
+
+/* Issue an enqueue command. ('fd' should only be NULL if the "action" of the
+ * descriptor is "orp_hole" or "orp_nesn".) */
+int qbman_swp_enqueue(struct qbman_swp *, const struct qbman_eq_desc *,
+ const struct qbman_fd *fd);
+
+ /*******************/
+ /* Buffer releases */
+ /*******************/
+
+struct qbman_release_desc {
+ uint32_t dont_manipulate_directly[1];
+};
+
+/* Clear the contents of a descriptor to default/starting state. */
+void qbman_release_desc_clear(struct qbman_release_desc *);
+/* Set the ID of the buffer pool to release to */
+void qbman_release_desc_set_bpid(struct qbman_release_desc *, uint32_t bpid);
+/* Issue a release command. 'num_buffers' must be less than 8. */
+int qbman_swp_release(struct qbman_swp *, const struct qbman_release_desc *,
+ const uint64_t *buffers, unsigned int num_buffers);
+
+ /*******************/
+ /* Buffer acquires */
+ /*******************/
+
+int qbman_swp_acquire(struct qbman_swp *, uint32_t bpid, uint64_t *buffers,
+ unsigned int num_buffers);
+#endif /* !_FSL_QBMAN_PORTAL_H */