2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
9 * Copyright (C) 2016 Stefan Roese <sr@denx.de>
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
18 #include <dm/device-internal.h>
25 #include <linux/errno.h>
29 #include <asm/arch/cpu.h>
30 #include <asm/arch/soc.h>
31 #include <linux/compat.h>
32 #include <linux/mbus.h>
34 DECLARE_GLOBAL_DATA_PTR;
36 /* Some linux -> U-Boot compatibility stuff */
37 #define netdev_err(dev, fmt, args...) \
39 #define netdev_warn(dev, fmt, args...) \
41 #define netdev_info(dev, fmt, args...) \
43 #define netdev_dbg(dev, fmt, args...) \
46 #define ETH_ALEN 6 /* Octets in one ethernet addr */
48 #define __verify_pcpu_ptr(ptr) \
50 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
54 #define VERIFY_PERCPU_PTR(__p) \
56 __verify_pcpu_ptr(__p); \
57 (typeof(*(__p)) __kernel __force *)(__p); \
60 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
61 #define smp_processor_id() 0
62 #define num_present_cpus() 1
63 #define for_each_present_cpu(cpu) \
64 for ((cpu) = 0; (cpu) < 1; (cpu)++)
66 #define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE)
68 #define CONFIG_NR_CPUS 1
69 #define ETH_HLEN ETHER_HDR_SIZE /* Total octets in header */
71 /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
72 #define WRAP (2 + ETH_HLEN + 4 + 32)
74 #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
76 #define MVPP2_SMI_TIMEOUT 10000
78 /* RX Fifo Registers */
79 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
80 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
81 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
82 #define MVPP2_RX_FIFO_INIT_REG 0x64
84 /* RX DMA Top Registers */
85 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
86 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
87 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
88 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
89 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
90 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
91 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
92 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
93 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
94 #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
95 #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
96 #define MVPP2_RXQ_POOL_LONG_OFFS 24
97 #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
98 #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
99 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
100 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
101 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
103 /* Parser Registers */
104 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
105 #define MVPP2_PRS_PORT_LU_MAX 0xf
106 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
107 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
108 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
109 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
110 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
111 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
112 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
113 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
114 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
115 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
116 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
117 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
118 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
119 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
120 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
122 /* Classifier Registers */
123 #define MVPP2_CLS_MODE_REG 0x1800
124 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
125 #define MVPP2_CLS_PORT_WAY_REG 0x1810
126 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
127 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
128 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
129 #define MVPP2_CLS_LKP_TBL_REG 0x1818
130 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
131 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
132 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
133 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
134 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
135 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
136 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
137 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
138 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
139 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
140 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
141 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
143 /* Descriptor Manager Top Registers */
144 #define MVPP2_RXQ_NUM_REG 0x2040
145 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
146 #define MVPP22_DESC_ADDR_OFFS 8
147 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
148 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
149 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
150 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
151 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
152 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
153 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
154 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
155 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
156 #define MVPP2_RXQ_THRESH_REG 0x204c
157 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
158 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
159 #define MVPP2_RXQ_INDEX_REG 0x2050
160 #define MVPP2_TXQ_NUM_REG 0x2080
161 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
162 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
163 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
164 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
165 #define MVPP2_TXQ_THRESH_REG 0x2094
166 #define MVPP2_TRANSMITTED_THRESH_OFFSET 16
167 #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
168 #define MVPP2_TXQ_INDEX_REG 0x2098
169 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
170 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
171 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
172 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
173 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
174 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
175 #define MVPP2_TXQ_PENDING_REG 0x20a0
176 #define MVPP2_TXQ_PENDING_MASK 0x3fff
177 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
178 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
179 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
180 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
181 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
182 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
183 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
184 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
185 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
186 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
187 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
188 #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
189 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
190 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
191 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
192 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
193 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
195 /* MBUS bridge registers */
196 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
197 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
198 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
199 #define MVPP2_BASE_ADDR_ENABLE 0x4060
201 /* Interrupt Cause and Mask registers */
202 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
203 #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
204 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
205 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
206 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
207 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
208 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
209 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
210 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
211 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
212 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
213 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
214 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
215 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
216 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
217 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
218 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
219 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
220 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
221 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
223 /* Buffer Manager registers */
224 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
225 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
226 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
227 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
228 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
229 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
230 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
231 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
232 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
233 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
234 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
235 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
236 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
237 #define MVPP2_BM_START_MASK BIT(0)
238 #define MVPP2_BM_STOP_MASK BIT(1)
239 #define MVPP2_BM_STATE_MASK BIT(4)
240 #define MVPP2_BM_LOW_THRESH_OFFS 8
241 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
242 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
243 MVPP2_BM_LOW_THRESH_OFFS)
244 #define MVPP2_BM_HIGH_THRESH_OFFS 16
245 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
246 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
247 MVPP2_BM_HIGH_THRESH_OFFS)
248 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
249 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
250 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
251 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
252 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
253 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
254 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
255 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
256 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
257 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
258 #define MVPP2_BM_ADDR_HIGH_ALLOC 0x6444
259 #define MVPP2_BM_ADDR_HIGH_PHYS_MASK 0xff
260 #define MVPP2_BM_ADDR_HIGH_VIRT_MASK 0xff00
261 #define MVPP2_BM_ADDR_HIGH_VIRT_SHIFT 8
262 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
263 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
264 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
265 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
266 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
267 #define MVPP21_BM_MC_RLS_REG 0x64c4
268 #define MVPP2_BM_MC_ID_MASK 0xfff
269 #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
270 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
271 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
272 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
273 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
274 #define MVPP22_BM_MC_RLS_REG 0x64d4
276 /* TX Scheduler registers */
277 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
278 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
279 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
280 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
281 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
282 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
283 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
284 #define MVPP2_TXP_MTU_MAX 0x7FFFF
285 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
286 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
287 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
288 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
289 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
290 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
291 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
292 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
293 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
294 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
295 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
296 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
297 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
298 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
300 /* TX general registers */
301 #define MVPP2_TX_SNOOP_REG 0x8800
302 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
303 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
306 #define MVPP2_SRC_ADDR_MIDDLE 0x24
307 #define MVPP2_SRC_ADDR_HIGH 0x28
308 #define MVPP2_PHY_AN_CFG0_REG 0x34
309 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
310 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
311 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
313 /* Per-port registers */
314 #define MVPP2_GMAC_CTRL_0_REG 0x0
315 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
316 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
317 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
318 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
319 #define MVPP2_GMAC_CTRL_1_REG 0x4
320 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
321 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
322 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
323 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
324 #define MVPP2_GMAC_SA_LOW_OFFS 7
325 #define MVPP2_GMAC_CTRL_2_REG 0x8
326 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
327 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
328 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
329 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
330 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
331 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
332 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
333 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
334 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
335 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
336 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
337 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
338 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
339 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
340 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
341 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
342 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
343 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
345 #define MVPP22_SMI_MISC_CFG_REG 0x1204
346 #define MVPP22_SMI_POLLING_EN BIT(10)
348 #define MVPP22_PORT_BASE 0x30e00
349 #define MVPP22_PORT_OFFSET 0x1000
351 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
353 /* Descriptor ring Macros */
354 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
355 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
357 /* SMI: 0xc0054 -> offset 0x54 to lms_base */
358 #define MVPP2_SMI 0x0054
359 #define MVPP2_PHY_REG_MASK 0x1f
360 /* SMI register fields */
361 #define MVPP2_SMI_DATA_OFFS 0 /* Data */
362 #define MVPP2_SMI_DATA_MASK (0xffff << MVPP2_SMI_DATA_OFFS)
363 #define MVPP2_SMI_DEV_ADDR_OFFS 16 /* PHY device address */
364 #define MVPP2_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/
365 #define MVPP2_SMI_OPCODE_OFFS 26 /* Write/Read opcode */
366 #define MVPP2_SMI_OPCODE_READ (1 << MVPP2_SMI_OPCODE_OFFS)
367 #define MVPP2_SMI_READ_VALID (1 << 27) /* Read Valid */
368 #define MVPP2_SMI_BUSY (1 << 28) /* Busy */
370 #define MVPP2_PHY_ADDR_MASK 0x1f
371 #define MVPP2_PHY_REG_MASK 0x1f
373 /* Various constants */
376 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
377 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
378 #define MVPP2_RX_COAL_PKTS 32
379 #define MVPP2_RX_COAL_USEC 100
381 /* The two bytes Marvell header. Either contains a special value used
382 * by Marvell switches when a specific hardware mode is enabled (not
383 * supported by this driver) or is filled automatically by zeroes on
384 * the RX side. Those two bytes being at the front of the Ethernet
385 * header, they allow to have the IP header aligned on a 4 bytes
386 * boundary automatically: the hardware skips those two bytes on its
389 #define MVPP2_MH_SIZE 2
390 #define MVPP2_ETH_TYPE_LEN 2
391 #define MVPP2_PPPOE_HDR_SIZE 8
392 #define MVPP2_VLAN_TAG_LEN 4
394 /* Lbtd 802.3 type */
395 #define MVPP2_IP_LBDT_TYPE 0xfffa
397 #define MVPP2_CPU_D_CACHE_LINE_SIZE 32
398 #define MVPP2_TX_CSUM_MAX_SIZE 9800
400 /* Timeout constants */
401 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
402 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
404 #define MVPP2_TX_MTU_MAX 0x7ffff
406 /* Maximum number of T-CONTs of PON port */
407 #define MVPP2_MAX_TCONT 16
409 /* Maximum number of supported ports */
410 #define MVPP2_MAX_PORTS 4
412 /* Maximum number of TXQs used by single port */
413 #define MVPP2_MAX_TXQ 8
415 /* Maximum number of RXQs used by single port */
416 #define MVPP2_MAX_RXQ 8
418 /* Default number of TXQs in use */
419 #define MVPP2_DEFAULT_TXQ 1
421 /* Dfault number of RXQs in use */
422 #define MVPP2_DEFAULT_RXQ 1
423 #define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */
425 /* Total number of RXQs available to all ports */
426 #define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
428 /* Max number of Rx descriptors */
429 #define MVPP2_MAX_RXD 16
431 /* Max number of Tx descriptors */
432 #define MVPP2_MAX_TXD 16
434 /* Amount of Tx descriptors that can be reserved at once by CPU */
435 #define MVPP2_CPU_DESC_CHUNK 64
437 /* Max number of Tx descriptors in each aggregated queue */
438 #define MVPP2_AGGR_TXQ_SIZE 256
440 /* Descriptor aligned size */
441 #define MVPP2_DESC_ALIGNED_SIZE 32
443 /* Descriptor alignment mask */
444 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
446 /* RX FIFO constants */
447 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
448 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
449 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
451 /* RX buffer constants */
452 #define MVPP2_SKB_SHINFO_SIZE \
455 #define MVPP2_RX_PKT_SIZE(mtu) \
456 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
457 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
459 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
460 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
461 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
462 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
464 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
466 /* IPv6 max L3 address size */
467 #define MVPP2_MAX_L3_ADDR_SIZE 16
470 #define MVPP2_F_LOOPBACK BIT(0)
472 /* Marvell tag types */
473 enum mvpp2_tag_type {
474 MVPP2_TAG_TYPE_NONE = 0,
475 MVPP2_TAG_TYPE_MH = 1,
476 MVPP2_TAG_TYPE_DSA = 2,
477 MVPP2_TAG_TYPE_EDSA = 3,
478 MVPP2_TAG_TYPE_VLAN = 4,
479 MVPP2_TAG_TYPE_LAST = 5
482 /* Parser constants */
483 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
484 #define MVPP2_PRS_TCAM_WORDS 6
485 #define MVPP2_PRS_SRAM_WORDS 4
486 #define MVPP2_PRS_FLOW_ID_SIZE 64
487 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
488 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
489 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
490 #define MVPP2_PRS_IPV4_HEAD 0x40
491 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
492 #define MVPP2_PRS_IPV4_MC 0xe0
493 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
494 #define MVPP2_PRS_IPV4_BC_MASK 0xff
495 #define MVPP2_PRS_IPV4_IHL 0x5
496 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
497 #define MVPP2_PRS_IPV6_MC 0xff
498 #define MVPP2_PRS_IPV6_MC_MASK 0xff
499 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
500 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
501 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
502 #define MVPP2_PRS_DBL_VLANS_MAX 100
505 * - lookup ID - 4 bits
507 * - additional information - 1 byte
508 * - header data - 8 bytes
509 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
511 #define MVPP2_PRS_AI_BITS 8
512 #define MVPP2_PRS_PORT_MASK 0xff
513 #define MVPP2_PRS_LU_MASK 0xf
514 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
515 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
516 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
517 (((offs) * 2) - ((offs) % 2) + 2)
518 #define MVPP2_PRS_TCAM_AI_BYTE 16
519 #define MVPP2_PRS_TCAM_PORT_BYTE 17
520 #define MVPP2_PRS_TCAM_LU_BYTE 20
521 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
522 #define MVPP2_PRS_TCAM_INV_WORD 5
523 /* Tcam entries ID */
524 #define MVPP2_PE_DROP_ALL 0
525 #define MVPP2_PE_FIRST_FREE_TID 1
526 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
527 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
528 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
529 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
530 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
531 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
532 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
533 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
534 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
535 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
536 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
537 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
538 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
539 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
540 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
541 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
542 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
543 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
544 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
545 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
546 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
547 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
548 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
549 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
550 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
553 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
555 #define MVPP2_PRS_SRAM_RI_OFFS 0
556 #define MVPP2_PRS_SRAM_RI_WORD 0
557 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
558 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
559 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
560 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
561 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
562 #define MVPP2_PRS_SRAM_UDF_OFFS 73
563 #define MVPP2_PRS_SRAM_UDF_BITS 8
564 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
565 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
566 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
567 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
568 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
569 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
570 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
571 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
572 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
573 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
574 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
575 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
576 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
577 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
578 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
579 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
580 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
581 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
582 #define MVPP2_PRS_SRAM_AI_OFFS 90
583 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
584 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
585 #define MVPP2_PRS_SRAM_AI_MASK 0xff
586 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
587 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
588 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
589 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
591 /* Sram result info bits assignment */
592 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
593 #define MVPP2_PRS_RI_DSA_MASK 0x2
594 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
595 #define MVPP2_PRS_RI_VLAN_NONE 0x0
596 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
597 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
598 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
599 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
600 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
601 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
602 #define MVPP2_PRS_RI_L2_UCAST 0x0
603 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
604 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
605 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
606 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
607 #define MVPP2_PRS_RI_L3_UN 0x0
608 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
609 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
610 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
611 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
612 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
613 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
614 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
615 #define MVPP2_PRS_RI_L3_UCAST 0x0
616 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
617 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
618 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
619 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
620 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
621 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
622 #define MVPP2_PRS_RI_L4_TCP BIT(22)
623 #define MVPP2_PRS_RI_L4_UDP BIT(23)
624 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
625 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
626 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
627 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
629 /* Sram additional info bits assignment */
630 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
631 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
632 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
633 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
634 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
635 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
636 #define MVPP2_PRS_SINGLE_VLAN_AI 0
637 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
640 #define MVPP2_PRS_TAGGED true
641 #define MVPP2_PRS_UNTAGGED false
642 #define MVPP2_PRS_EDSA true
643 #define MVPP2_PRS_DSA false
645 /* MAC entries, shadow udf */
647 MVPP2_PRS_UDF_MAC_DEF,
648 MVPP2_PRS_UDF_MAC_RANGE,
649 MVPP2_PRS_UDF_L2_DEF,
650 MVPP2_PRS_UDF_L2_DEF_COPY,
651 MVPP2_PRS_UDF_L2_USER,
655 enum mvpp2_prs_lookup {
669 enum mvpp2_prs_l3_cast {
670 MVPP2_PRS_L3_UNI_CAST,
671 MVPP2_PRS_L3_MULTI_CAST,
672 MVPP2_PRS_L3_BROAD_CAST
675 /* Classifier constants */
676 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
677 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
678 #define MVPP2_CLS_LKP_TBL_SIZE 64
681 #define MVPP2_BM_POOLS_NUM 1
682 #define MVPP2_BM_LONG_BUF_NUM 16
683 #define MVPP2_BM_SHORT_BUF_NUM 16
684 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
685 #define MVPP2_BM_POOL_PTR_ALIGN 128
686 #define MVPP2_BM_SWF_LONG_POOL(port) 0
688 /* BM cookie (32 bits) definition */
689 #define MVPP2_BM_COOKIE_POOL_OFFS 8
690 #define MVPP2_BM_COOKIE_CPU_OFFS 24
692 /* BM short pool packet size
693 * These value assure that for SWF the total number
694 * of bytes allocated for each buffer will be 512
696 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
706 /* Shared Packet Processor resources */
708 /* Shared registers' base addresses */
710 void __iomem *lms_base;
711 void __iomem *iface_base;
713 /* List of pointers to port structures */
714 struct mvpp2_port **port_list;
716 /* Aggregated TXQs */
717 struct mvpp2_tx_queue *aggr_txqs;
720 struct mvpp2_bm_pool *bm_pools;
722 /* PRS shadow table */
723 struct mvpp2_prs_shadow *prs_shadow;
724 /* PRS auxiliary table for double vlan entries control */
725 bool *prs_double_vlans;
731 enum { MVPP21, MVPP22 } hw_version;
736 struct mvpp2_pcpu_stats {
746 /* Index of the port from the "group of ports" complex point
755 /* Per-port registers' base address */
758 struct mvpp2_rx_queue **rxqs;
759 struct mvpp2_tx_queue **txqs;
763 u32 pending_cause_rx;
765 /* Per-CPU port control */
766 struct mvpp2_port_pcpu __percpu *pcpu;
773 struct mvpp2_pcpu_stats __percpu *stats;
775 struct phy_device *phy_dev;
776 phy_interface_t phy_interface;
784 struct mvpp2_bm_pool *pool_long;
785 struct mvpp2_bm_pool *pool_short;
787 /* Index of first port's physical RXQ */
790 u8 dev_addr[ETH_ALEN];
793 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
794 * layout of the transmit and reception DMA descriptors, and their
795 * layout is therefore defined by the hardware design
798 #define MVPP2_TXD_L3_OFF_SHIFT 0
799 #define MVPP2_TXD_IP_HLEN_SHIFT 8
800 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
801 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
802 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
803 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
804 #define MVPP2_TXD_L4_UDP BIT(24)
805 #define MVPP2_TXD_L3_IP6 BIT(26)
806 #define MVPP2_TXD_L_DESC BIT(28)
807 #define MVPP2_TXD_F_DESC BIT(29)
809 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
810 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
811 #define MVPP2_RXD_ERR_CRC 0x0
812 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
813 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
814 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
815 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
816 #define MVPP2_RXD_HWF_SYNC BIT(21)
817 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
818 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
819 #define MVPP2_RXD_L4_TCP BIT(25)
820 #define MVPP2_RXD_L4_UDP BIT(26)
821 #define MVPP2_RXD_L3_IP4 BIT(28)
822 #define MVPP2_RXD_L3_IP6 BIT(30)
823 #define MVPP2_RXD_BUF_HDR BIT(31)
825 /* HW TX descriptor for PPv2.1 */
826 struct mvpp21_tx_desc {
827 u32 command; /* Options used by HW for packet transmitting.*/
828 u8 packet_offset; /* the offset from the buffer beginning */
829 u8 phys_txq; /* destination queue ID */
830 u16 data_size; /* data size of transmitted packet in bytes */
831 u32 buf_dma_addr; /* physical addr of transmitted buffer */
832 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
833 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
834 u32 reserved2; /* reserved (for future use) */
837 /* HW RX descriptor for PPv2.1 */
838 struct mvpp21_rx_desc {
839 u32 status; /* info about received packet */
840 u16 reserved1; /* parser_info (for future use, PnC) */
841 u16 data_size; /* size of received packet in bytes */
842 u32 buf_dma_addr; /* physical address of the buffer */
843 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
844 u16 reserved2; /* gem_port_id (for future use, PON) */
845 u16 reserved3; /* csum_l4 (for future use, PnC) */
846 u8 reserved4; /* bm_qset (for future use, BM) */
848 u16 reserved6; /* classify_info (for future use, PnC) */
849 u32 reserved7; /* flow_id (for future use, PnC) */
853 /* HW TX descriptor for PPv2.2 */
854 struct mvpp22_tx_desc {
860 u64 buf_dma_addr_ptp;
864 /* HW RX descriptor for PPv2.2 */
865 struct mvpp22_rx_desc {
871 u64 buf_dma_addr_key_hash;
875 /* Opaque type used by the driver to manipulate the HW TX and RX
878 struct mvpp2_tx_desc {
880 struct mvpp21_tx_desc pp21;
881 struct mvpp22_tx_desc pp22;
885 struct mvpp2_rx_desc {
887 struct mvpp21_rx_desc pp21;
888 struct mvpp22_rx_desc pp22;
892 /* Per-CPU Tx queue control */
893 struct mvpp2_txq_pcpu {
896 /* Number of Tx DMA descriptors in the descriptor ring */
899 /* Number of currently used Tx DMA descriptor in the
904 /* Number of Tx DMA descriptors reserved for each CPU */
907 /* Index of last TX DMA descriptor that was inserted */
910 /* Index of the TX DMA descriptor to be cleaned up */
914 struct mvpp2_tx_queue {
915 /* Physical number of this Tx queue */
918 /* Logical number of this Tx queue */
921 /* Number of Tx DMA descriptors in the descriptor ring */
924 /* Number of currently used Tx DMA descriptor in the descriptor ring */
927 /* Per-CPU control of physical Tx queues */
928 struct mvpp2_txq_pcpu __percpu *pcpu;
932 /* Virtual address of thex Tx DMA descriptors array */
933 struct mvpp2_tx_desc *descs;
935 /* DMA address of the Tx DMA descriptors array */
936 dma_addr_t descs_dma;
938 /* Index of the last Tx DMA descriptor */
941 /* Index of the next Tx DMA descriptor to process */
942 int next_desc_to_proc;
945 struct mvpp2_rx_queue {
946 /* RX queue number, in the range 0-31 for physical RXQs */
949 /* Num of rx descriptors in the rx descriptor ring */
955 /* Virtual address of the RX DMA descriptors array */
956 struct mvpp2_rx_desc *descs;
958 /* DMA address of the RX DMA descriptors array */
959 dma_addr_t descs_dma;
961 /* Index of the last RX DMA descriptor */
964 /* Index of the next RX DMA descriptor to process */
965 int next_desc_to_proc;
967 /* ID of port to which physical RXQ is mapped */
970 /* Port's logic RXQ number to which physical RXQ is mapped */
974 union mvpp2_prs_tcam_entry {
975 u32 word[MVPP2_PRS_TCAM_WORDS];
976 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
979 union mvpp2_prs_sram_entry {
980 u32 word[MVPP2_PRS_SRAM_WORDS];
981 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
984 struct mvpp2_prs_entry {
986 union mvpp2_prs_tcam_entry tcam;
987 union mvpp2_prs_sram_entry sram;
990 struct mvpp2_prs_shadow {
997 /* User defined offset */
1005 struct mvpp2_cls_flow_entry {
1007 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1010 struct mvpp2_cls_lookup_entry {
1016 struct mvpp2_bm_pool {
1017 /* Pool number in the range 0-7 */
1019 enum mvpp2_bm_type type;
1021 /* Buffer Pointers Pool External (BPPE) size */
1023 /* Number of buffers for this pool */
1025 /* Pool buffer size */
1030 /* BPPE virtual base address */
1031 unsigned long *virt_addr;
1032 /* BPPE DMA base address */
1033 dma_addr_t dma_addr;
1035 /* Ports using BM pool */
1038 /* Occupied buffers indicator */
1042 /* Static declaractions */
1044 /* Number of RXQs used by single port */
1045 static int rxq_number = MVPP2_DEFAULT_RXQ;
1046 /* Number of TXQs used by single port */
1047 static int txq_number = MVPP2_DEFAULT_TXQ;
1049 #define MVPP2_DRIVER_NAME "mvpp2"
1050 #define MVPP2_DRIVER_VERSION "1.0"
1053 * U-Boot internal data, mostly uncached buffers for descriptors and data
1055 struct buffer_location {
1056 struct mvpp2_tx_desc *aggr_tx_descs;
1057 struct mvpp2_tx_desc *tx_descs;
1058 struct mvpp2_rx_desc *rx_descs;
1059 unsigned long *bm_pool[MVPP2_BM_POOLS_NUM];
1060 unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM];
1065 * All 4 interfaces use the same global buffer, since only one interface
1066 * can be enabled at once
1068 static struct buffer_location buffer_loc;
1071 * Page table entries are set to 1MB, or multiples of 1MB
1072 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
1074 #define BD_SPACE (1 << 20)
1076 /* Utility/helper methods */
1078 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1080 writel(data, priv->base + offset);
1083 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1085 return readl(priv->base + offset);
1088 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1089 struct mvpp2_tx_desc *tx_desc,
1090 dma_addr_t dma_addr)
1092 if (port->priv->hw_version == MVPP21) {
1093 tx_desc->pp21.buf_dma_addr = dma_addr;
1095 u64 val = (u64)dma_addr;
1097 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1098 tx_desc->pp22.buf_dma_addr_ptp |= val;
1102 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1103 struct mvpp2_tx_desc *tx_desc,
1106 if (port->priv->hw_version == MVPP21)
1107 tx_desc->pp21.data_size = size;
1109 tx_desc->pp22.data_size = size;
1112 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1113 struct mvpp2_tx_desc *tx_desc,
1116 if (port->priv->hw_version == MVPP21)
1117 tx_desc->pp21.phys_txq = txq;
1119 tx_desc->pp22.phys_txq = txq;
1122 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1123 struct mvpp2_tx_desc *tx_desc,
1124 unsigned int command)
1126 if (port->priv->hw_version == MVPP21)
1127 tx_desc->pp21.command = command;
1129 tx_desc->pp22.command = command;
1132 static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1133 struct mvpp2_tx_desc *tx_desc,
1134 unsigned int offset)
1136 if (port->priv->hw_version == MVPP21)
1137 tx_desc->pp21.packet_offset = offset;
1139 tx_desc->pp22.packet_offset = offset;
1142 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1143 struct mvpp2_rx_desc *rx_desc)
1145 if (port->priv->hw_version == MVPP21)
1146 return rx_desc->pp21.buf_dma_addr;
1148 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
1151 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1152 struct mvpp2_rx_desc *rx_desc)
1154 if (port->priv->hw_version == MVPP21)
1155 return rx_desc->pp21.buf_cookie;
1157 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
1160 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1161 struct mvpp2_rx_desc *rx_desc)
1163 if (port->priv->hw_version == MVPP21)
1164 return rx_desc->pp21.data_size;
1166 return rx_desc->pp22.data_size;
1169 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1170 struct mvpp2_rx_desc *rx_desc)
1172 if (port->priv->hw_version == MVPP21)
1173 return rx_desc->pp21.status;
1175 return rx_desc->pp22.status;
1178 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1180 txq_pcpu->txq_get_index++;
1181 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1182 txq_pcpu->txq_get_index = 0;
1185 /* Get number of physical egress port */
1186 static inline int mvpp2_egress_port(struct mvpp2_port *port)
1188 return MVPP2_MAX_TCONT + port->id;
1191 /* Get number of physical TXQ */
1192 static inline int mvpp2_txq_phys(int port, int txq)
1194 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1197 /* Parser configuration routines */
1199 /* Update parser tcam and sram hw entries */
1200 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1204 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1207 /* Clear entry invalidation bit */
1208 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1210 /* Write tcam index - indirect access */
1211 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1212 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1213 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1215 /* Write sram index - indirect access */
1216 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1217 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1218 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1223 /* Read tcam entry from hw */
1224 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1228 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1231 /* Write tcam index - indirect access */
1232 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1234 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1235 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1236 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1237 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1239 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1240 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1242 /* Write sram index - indirect access */
1243 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1244 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1245 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1250 /* Invalidate tcam hw entry */
1251 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1253 /* Write index - indirect access */
1254 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1255 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1256 MVPP2_PRS_TCAM_INV_MASK);
1259 /* Enable shadow table entry and set its lookup ID */
1260 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1262 priv->prs_shadow[index].valid = true;
1263 priv->prs_shadow[index].lu = lu;
1266 /* Update ri fields in shadow table entry */
1267 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1268 unsigned int ri, unsigned int ri_mask)
1270 priv->prs_shadow[index].ri_mask = ri_mask;
1271 priv->prs_shadow[index].ri = ri;
1274 /* Update lookup field in tcam sw entry */
1275 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1277 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1279 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1280 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1283 /* Update mask for single port in tcam sw entry */
1284 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1285 unsigned int port, bool add)
1287 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1290 pe->tcam.byte[enable_off] &= ~(1 << port);
1292 pe->tcam.byte[enable_off] |= 1 << port;
1295 /* Update port map in tcam sw entry */
1296 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1299 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1300 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1302 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1303 pe->tcam.byte[enable_off] &= ~port_mask;
1304 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1307 /* Obtain port map from tcam sw entry */
1308 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1310 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1312 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1315 /* Set byte of data and its enable bits in tcam sw entry */
1316 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1317 unsigned int offs, unsigned char byte,
1318 unsigned char enable)
1320 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1321 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1324 /* Get byte of data and its enable bits from tcam sw entry */
1325 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1326 unsigned int offs, unsigned char *byte,
1327 unsigned char *enable)
1329 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1330 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1333 /* Set ethertype in tcam sw entry */
1334 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1335 unsigned short ethertype)
1337 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1338 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1341 /* Set bits in sram sw entry */
1342 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1345 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1348 /* Clear bits in sram sw entry */
1349 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1352 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1355 /* Update ri bits in sram sw entry */
1356 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1357 unsigned int bits, unsigned int mask)
1361 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1362 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1364 if (!(mask & BIT(i)))
1368 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1370 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1372 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1376 /* Update ai bits in sram sw entry */
1377 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1378 unsigned int bits, unsigned int mask)
1381 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1383 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1385 if (!(mask & BIT(i)))
1389 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1391 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1393 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1397 /* Read ai bits from sram sw entry */
1398 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1401 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1402 int ai_en_off = ai_off + 1;
1403 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1405 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1406 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1411 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1414 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1417 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1419 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1420 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1421 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1424 /* In the sram sw entry set sign and value of the next lookup offset
1425 * and the offset value generated to the classifier
1427 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1432 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1435 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1439 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1440 (unsigned char)shift;
1442 /* Reset and set operation */
1443 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1444 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1445 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1447 /* Set base offset as current */
1448 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1451 /* In the sram sw entry set sign and value of the user defined offset
1452 * generated to the classifier
1454 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1455 unsigned int type, int offset,
1460 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1461 offset = 0 - offset;
1463 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1467 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1468 MVPP2_PRS_SRAM_UDF_MASK);
1469 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1470 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1471 MVPP2_PRS_SRAM_UDF_BITS)] &=
1472 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1473 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1474 MVPP2_PRS_SRAM_UDF_BITS)] |=
1475 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1477 /* Set offset type */
1478 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1479 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1480 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1482 /* Set offset operation */
1483 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1484 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1485 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1487 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1488 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1489 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1490 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1492 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1493 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1494 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1496 /* Set base offset as current */
1497 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1500 /* Find parser flow entry */
1501 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1503 struct mvpp2_prs_entry *pe;
1506 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1509 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1511 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1512 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1515 if (!priv->prs_shadow[tid].valid ||
1516 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1520 mvpp2_prs_hw_read(priv, pe);
1521 bits = mvpp2_prs_sram_ai_get(pe);
1523 /* Sram store classification lookup ID in AI bits [5:0] */
1524 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1532 /* Return first free tcam index, seeking from start to end */
1533 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1541 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1542 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1544 for (tid = start; tid <= end; tid++) {
1545 if (!priv->prs_shadow[tid].valid)
1552 /* Enable/disable dropping all mac da's */
1553 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1555 struct mvpp2_prs_entry pe;
1557 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1558 /* Entry exist - update port only */
1559 pe.index = MVPP2_PE_DROP_ALL;
1560 mvpp2_prs_hw_read(priv, &pe);
1562 /* Entry doesn't exist - create new */
1563 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1564 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1565 pe.index = MVPP2_PE_DROP_ALL;
1567 /* Non-promiscuous mode for all ports - DROP unknown packets */
1568 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1569 MVPP2_PRS_RI_DROP_MASK);
1571 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1572 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1574 /* Update shadow table */
1575 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1577 /* Mask all ports */
1578 mvpp2_prs_tcam_port_map_set(&pe, 0);
1581 /* Update port mask */
1582 mvpp2_prs_tcam_port_set(&pe, port, add);
1584 mvpp2_prs_hw_write(priv, &pe);
1587 /* Set port to promiscuous mode */
1588 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1590 struct mvpp2_prs_entry pe;
1592 /* Promiscuous mode - Accept unknown packets */
1594 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1595 /* Entry exist - update port only */
1596 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1597 mvpp2_prs_hw_read(priv, &pe);
1599 /* Entry doesn't exist - create new */
1600 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1601 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1602 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1604 /* Continue - set next lookup */
1605 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1607 /* Set result info bits */
1608 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1609 MVPP2_PRS_RI_L2_CAST_MASK);
1611 /* Shift to ethertype */
1612 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1613 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1615 /* Mask all ports */
1616 mvpp2_prs_tcam_port_map_set(&pe, 0);
1618 /* Update shadow table */
1619 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1622 /* Update port mask */
1623 mvpp2_prs_tcam_port_set(&pe, port, add);
1625 mvpp2_prs_hw_write(priv, &pe);
1628 /* Accept multicast */
1629 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1632 struct mvpp2_prs_entry pe;
1633 unsigned char da_mc;
1635 /* Ethernet multicast address first byte is
1636 * 0x01 for IPv4 and 0x33 for IPv6
1638 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1640 if (priv->prs_shadow[index].valid) {
1641 /* Entry exist - update port only */
1643 mvpp2_prs_hw_read(priv, &pe);
1645 /* Entry doesn't exist - create new */
1646 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1647 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1650 /* Continue - set next lookup */
1651 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1653 /* Set result info bits */
1654 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1655 MVPP2_PRS_RI_L2_CAST_MASK);
1657 /* Update tcam entry data first byte */
1658 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1660 /* Shift to ethertype */
1661 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1662 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1664 /* Mask all ports */
1665 mvpp2_prs_tcam_port_map_set(&pe, 0);
1667 /* Update shadow table */
1668 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1671 /* Update port mask */
1672 mvpp2_prs_tcam_port_set(&pe, port, add);
1674 mvpp2_prs_hw_write(priv, &pe);
1677 /* Parser per-port initialization */
1678 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1679 int lu_max, int offset)
1684 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1685 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1686 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1687 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1689 /* Set maximum number of loops for packet received from port */
1690 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1691 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1692 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1693 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1695 /* Set initial offset for packet header extraction for the first
1698 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1699 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1700 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1701 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1704 /* Default flow entries initialization for all ports */
1705 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1707 struct mvpp2_prs_entry pe;
1710 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1711 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1712 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1713 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1715 /* Mask all ports */
1716 mvpp2_prs_tcam_port_map_set(&pe, 0);
1719 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1720 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1722 /* Update shadow table and hw entry */
1723 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1724 mvpp2_prs_hw_write(priv, &pe);
1728 /* Set default entry for Marvell Header field */
1729 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1731 struct mvpp2_prs_entry pe;
1733 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1735 pe.index = MVPP2_PE_MH_DEFAULT;
1736 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1737 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1738 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1739 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1741 /* Unmask all ports */
1742 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1744 /* Update shadow table and hw entry */
1745 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1746 mvpp2_prs_hw_write(priv, &pe);
1749 /* Set default entires (place holder) for promiscuous, non-promiscuous and
1750 * multicast MAC addresses
1752 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1754 struct mvpp2_prs_entry pe;
1756 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1758 /* Non-promiscuous mode for all ports - DROP unknown packets */
1759 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1760 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1762 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1763 MVPP2_PRS_RI_DROP_MASK);
1764 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1765 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1767 /* Unmask all ports */
1768 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1770 /* Update shadow table and hw entry */
1771 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1772 mvpp2_prs_hw_write(priv, &pe);
1774 /* place holders only - no ports */
1775 mvpp2_prs_mac_drop_all_set(priv, 0, false);
1776 mvpp2_prs_mac_promisc_set(priv, 0, false);
1777 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
1778 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
1781 /* Match basic ethertypes */
1782 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1784 struct mvpp2_prs_entry pe;
1787 /* Ethertype: PPPoE */
1788 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1789 MVPP2_PE_LAST_FREE_TID);
1793 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1794 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1797 mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES);
1799 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1800 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1801 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1802 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1803 MVPP2_PRS_RI_PPPOE_MASK);
1805 /* Update shadow table and hw entry */
1806 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1807 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1808 priv->prs_shadow[pe.index].finish = false;
1809 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1810 MVPP2_PRS_RI_PPPOE_MASK);
1811 mvpp2_prs_hw_write(priv, &pe);
1813 /* Ethertype: ARP */
1814 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1815 MVPP2_PE_LAST_FREE_TID);
1819 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1820 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1823 mvpp2_prs_match_etype(&pe, 0, PROT_ARP);
1825 /* Generate flow in the next iteration*/
1826 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1827 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1828 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1829 MVPP2_PRS_RI_L3_PROTO_MASK);
1831 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1833 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1835 /* Update shadow table and hw entry */
1836 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1837 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1838 priv->prs_shadow[pe.index].finish = true;
1839 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1840 MVPP2_PRS_RI_L3_PROTO_MASK);
1841 mvpp2_prs_hw_write(priv, &pe);
1843 /* Ethertype: LBTD */
1844 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1845 MVPP2_PE_LAST_FREE_TID);
1849 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1850 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1853 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1855 /* Generate flow in the next iteration*/
1856 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1857 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1858 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1859 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1860 MVPP2_PRS_RI_CPU_CODE_MASK |
1861 MVPP2_PRS_RI_UDF3_MASK);
1863 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1865 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1867 /* Update shadow table and hw entry */
1868 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1869 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1870 priv->prs_shadow[pe.index].finish = true;
1871 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1872 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1873 MVPP2_PRS_RI_CPU_CODE_MASK |
1874 MVPP2_PRS_RI_UDF3_MASK);
1875 mvpp2_prs_hw_write(priv, &pe);
1877 /* Ethertype: IPv4 without options */
1878 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1879 MVPP2_PE_LAST_FREE_TID);
1883 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1884 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1887 mvpp2_prs_match_etype(&pe, 0, PROT_IP);
1888 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1889 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1890 MVPP2_PRS_IPV4_HEAD_MASK |
1891 MVPP2_PRS_IPV4_IHL_MASK);
1893 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1894 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1895 MVPP2_PRS_RI_L3_PROTO_MASK);
1896 /* Skip eth_type + 4 bytes of IP header */
1897 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1898 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1900 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1902 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1904 /* Update shadow table and hw entry */
1905 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1906 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1907 priv->prs_shadow[pe.index].finish = false;
1908 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1909 MVPP2_PRS_RI_L3_PROTO_MASK);
1910 mvpp2_prs_hw_write(priv, &pe);
1912 /* Ethertype: IPv4 with options */
1913 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1914 MVPP2_PE_LAST_FREE_TID);
1920 /* Clear tcam data before updating */
1921 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
1922 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
1924 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1925 MVPP2_PRS_IPV4_HEAD,
1926 MVPP2_PRS_IPV4_HEAD_MASK);
1928 /* Clear ri before updating */
1929 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1930 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1931 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1932 MVPP2_PRS_RI_L3_PROTO_MASK);
1934 /* Update shadow table and hw entry */
1935 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1936 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1937 priv->prs_shadow[pe.index].finish = false;
1938 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
1939 MVPP2_PRS_RI_L3_PROTO_MASK);
1940 mvpp2_prs_hw_write(priv, &pe);
1942 /* Ethertype: IPv6 without options */
1943 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1944 MVPP2_PE_LAST_FREE_TID);
1948 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1949 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1952 mvpp2_prs_match_etype(&pe, 0, PROT_IPV6);
1954 /* Skip DIP of IPV6 header */
1955 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1956 MVPP2_MAX_L3_ADDR_SIZE,
1957 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1958 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1959 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1960 MVPP2_PRS_RI_L3_PROTO_MASK);
1962 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1964 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1966 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1967 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1968 priv->prs_shadow[pe.index].finish = false;
1969 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
1970 MVPP2_PRS_RI_L3_PROTO_MASK);
1971 mvpp2_prs_hw_write(priv, &pe);
1973 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1974 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1975 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1976 pe.index = MVPP2_PE_ETH_TYPE_UN;
1978 /* Unmask all ports */
1979 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1981 /* Generate flow in the next iteration*/
1982 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1983 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1984 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1985 MVPP2_PRS_RI_L3_PROTO_MASK);
1986 /* Set L3 offset even it's unknown L3 */
1987 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1989 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1991 /* Update shadow table and hw entry */
1992 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1993 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1994 priv->prs_shadow[pe.index].finish = true;
1995 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
1996 MVPP2_PRS_RI_L3_PROTO_MASK);
1997 mvpp2_prs_hw_write(priv, &pe);
2002 /* Parser default initialization */
2003 static int mvpp2_prs_default_init(struct udevice *dev,
2008 /* Enable tcam table */
2009 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2011 /* Clear all tcam and sram entries */
2012 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2013 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2014 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2015 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2017 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2018 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2019 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2022 /* Invalidate all tcam entries */
2023 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2024 mvpp2_prs_hw_inv(priv, index);
2026 priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2027 sizeof(struct mvpp2_prs_shadow),
2029 if (!priv->prs_shadow)
2032 /* Always start from lookup = 0 */
2033 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2034 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2035 MVPP2_PRS_PORT_LU_MAX, 0);
2037 mvpp2_prs_def_flow_init(priv);
2039 mvpp2_prs_mh_init(priv);
2041 mvpp2_prs_mac_init(priv);
2043 err = mvpp2_prs_etype_init(priv);
2050 /* Compare MAC DA with tcam entry data */
2051 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2052 const u8 *da, unsigned char *mask)
2054 unsigned char tcam_byte, tcam_mask;
2057 for (index = 0; index < ETH_ALEN; index++) {
2058 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2059 if (tcam_mask != mask[index])
2062 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2069 /* Find tcam entry with matched pair <MAC DA, port> */
2070 static struct mvpp2_prs_entry *
2071 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2072 unsigned char *mask, int udf_type)
2074 struct mvpp2_prs_entry *pe;
2077 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2080 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2082 /* Go through the all entires with MVPP2_PRS_LU_MAC */
2083 for (tid = MVPP2_PE_FIRST_FREE_TID;
2084 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2085 unsigned int entry_pmap;
2087 if (!priv->prs_shadow[tid].valid ||
2088 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2089 (priv->prs_shadow[tid].udf != udf_type))
2093 mvpp2_prs_hw_read(priv, pe);
2094 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
2096 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
2105 /* Update parser's mac da entry */
2106 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
2107 const u8 *da, bool add)
2109 struct mvpp2_prs_entry *pe;
2110 unsigned int pmap, len, ri;
2111 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2114 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2115 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
2116 MVPP2_PRS_UDF_MAC_DEF);
2123 /* Create new TCAM entry */
2124 /* Find first range mac entry*/
2125 for (tid = MVPP2_PE_FIRST_FREE_TID;
2126 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
2127 if (priv->prs_shadow[tid].valid &&
2128 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
2129 (priv->prs_shadow[tid].udf ==
2130 MVPP2_PRS_UDF_MAC_RANGE))
2133 /* Go through the all entries from first to last */
2134 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2139 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2142 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2145 /* Mask all ports */
2146 mvpp2_prs_tcam_port_map_set(pe, 0);
2149 /* Update port mask */
2150 mvpp2_prs_tcam_port_set(pe, port, add);
2152 /* Invalidate the entry if no ports are left enabled */
2153 pmap = mvpp2_prs_tcam_port_map_get(pe);
2159 mvpp2_prs_hw_inv(priv, pe->index);
2160 priv->prs_shadow[pe->index].valid = false;
2165 /* Continue - set next lookup */
2166 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
2168 /* Set match on DA */
2171 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
2173 /* Set result info bits */
2174 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
2176 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2177 MVPP2_PRS_RI_MAC_ME_MASK);
2178 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2179 MVPP2_PRS_RI_MAC_ME_MASK);
2181 /* Shift to ethertype */
2182 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
2183 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2185 /* Update shadow table and hw entry */
2186 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
2187 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
2188 mvpp2_prs_hw_write(priv, pe);
2195 static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da)
2199 /* Remove old parser entry */
2200 err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr,
2205 /* Add new parser entry */
2206 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
2210 /* Set addr in the device */
2211 memcpy(port->dev_addr, da, ETH_ALEN);
2216 /* Set prs flow for the port */
2217 static int mvpp2_prs_def_flow(struct mvpp2_port *port)
2219 struct mvpp2_prs_entry *pe;
2222 pe = mvpp2_prs_flow_find(port->priv, port->id);
2224 /* Such entry not exist */
2226 /* Go through the all entires from last to first */
2227 tid = mvpp2_prs_tcam_first_free(port->priv,
2228 MVPP2_PE_LAST_FREE_TID,
2229 MVPP2_PE_FIRST_FREE_TID);
2233 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2237 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
2241 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2242 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2244 /* Update shadow table */
2245 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
2248 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
2249 mvpp2_prs_hw_write(port->priv, pe);
2255 /* Classifier configuration routines */
2257 /* Update classification flow table registers */
2258 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
2259 struct mvpp2_cls_flow_entry *fe)
2261 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
2262 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
2263 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
2264 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
2267 /* Update classification lookup table register */
2268 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
2269 struct mvpp2_cls_lookup_entry *le)
2273 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
2274 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
2275 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
2278 /* Classifier default initialization */
2279 static void mvpp2_cls_init(struct mvpp2 *priv)
2281 struct mvpp2_cls_lookup_entry le;
2282 struct mvpp2_cls_flow_entry fe;
2285 /* Enable classifier */
2286 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
2288 /* Clear classifier flow table */
2289 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
2290 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
2292 mvpp2_cls_flow_write(priv, &fe);
2295 /* Clear classifier lookup table */
2297 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
2300 mvpp2_cls_lookup_write(priv, &le);
2303 mvpp2_cls_lookup_write(priv, &le);
2307 static void mvpp2_cls_port_config(struct mvpp2_port *port)
2309 struct mvpp2_cls_lookup_entry le;
2312 /* Set way for the port */
2313 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
2314 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
2315 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
2317 /* Pick the entry to be accessed in lookup ID decoding table
2318 * according to the way and lkpid.
2320 le.lkpid = port->id;
2324 /* Set initial CPU queue for receiving packets */
2325 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
2326 le.data |= port->first_rxq;
2328 /* Disable classification engines */
2329 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
2331 /* Update lookup ID table entry */
2332 mvpp2_cls_lookup_write(port->priv, &le);
2335 /* Set CPU queue number for oversize packets */
2336 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
2340 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
2341 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
2343 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
2344 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
2346 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
2347 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
2348 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
2351 /* Buffer Manager configuration routines */
2354 static int mvpp2_bm_pool_create(struct udevice *dev,
2356 struct mvpp2_bm_pool *bm_pool, int size)
2360 /* Number of buffer pointers must be a multiple of 16, as per
2361 * hardware constraints
2363 if (!IS_ALIGNED(size, 16))
2366 bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id];
2367 bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id];
2368 if (!bm_pool->virt_addr)
2371 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
2372 MVPP2_BM_POOL_PTR_ALIGN)) {
2373 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
2374 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
2378 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
2379 lower_32_bits(bm_pool->dma_addr));
2380 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
2382 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2383 val |= MVPP2_BM_START_MASK;
2384 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2386 bm_pool->type = MVPP2_BM_FREE;
2387 bm_pool->size = size;
2388 bm_pool->pkt_size = 0;
2389 bm_pool->buf_num = 0;
2394 /* Set pool buffer size */
2395 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
2396 struct mvpp2_bm_pool *bm_pool,
2401 bm_pool->buf_size = buf_size;
2403 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
2404 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
2407 /* Free all buffers from the pool */
2408 static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv,
2409 struct mvpp2_bm_pool *bm_pool)
2411 bm_pool->buf_num = 0;
2415 static int mvpp2_bm_pool_destroy(struct udevice *dev,
2417 struct mvpp2_bm_pool *bm_pool)
2421 mvpp2_bm_bufs_free(dev, priv, bm_pool);
2422 if (bm_pool->buf_num) {
2423 dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id);
2427 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2428 val |= MVPP2_BM_STOP_MASK;
2429 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2434 static int mvpp2_bm_pools_init(struct udevice *dev,
2438 struct mvpp2_bm_pool *bm_pool;
2440 /* Create all pools with maximum size */
2441 size = MVPP2_BM_POOL_SIZE_MAX;
2442 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2443 bm_pool = &priv->bm_pools[i];
2445 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
2447 goto err_unroll_pools;
2448 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
2453 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
2454 for (i = i - 1; i >= 0; i--)
2455 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
2459 static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv)
2463 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2464 /* Mask BM all interrupts */
2465 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
2466 /* Clear BM cause register */
2467 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
2470 /* Allocate and initialize BM pools */
2471 priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM,
2472 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
2473 if (!priv->bm_pools)
2476 err = mvpp2_bm_pools_init(dev, priv);
2482 /* Attach long pool to rxq */
2483 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
2484 int lrxq, int long_pool)
2489 /* Get queue physical ID */
2490 prxq = port->rxqs[lrxq]->id;
2492 if (port->priv->hw_version == MVPP21)
2493 mask = MVPP21_RXQ_POOL_LONG_MASK;
2495 mask = MVPP22_RXQ_POOL_LONG_MASK;
2497 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2499 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
2500 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2503 /* Set pool number in a BM cookie */
2504 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
2508 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
2509 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
2514 /* Get pool number from a BM cookie */
2515 static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
2517 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
2520 /* Release buffer to BM */
2521 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
2522 dma_addr_t buf_dma_addr,
2523 unsigned long buf_phys_addr)
2525 if (port->priv->hw_version == MVPP22) {
2528 if (sizeof(dma_addr_t) == 8)
2529 val |= upper_32_bits(buf_dma_addr) &
2530 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
2532 if (sizeof(phys_addr_t) == 8)
2533 val |= (upper_32_bits(buf_phys_addr)
2534 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
2535 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
2537 mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val);
2540 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
2541 * returned in the "cookie" field of the RX
2542 * descriptor. Instead of storing the virtual address, we
2543 * store the physical address
2545 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
2546 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
2549 /* Refill BM pool */
2550 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
2551 dma_addr_t dma_addr,
2552 phys_addr_t phys_addr)
2554 int pool = mvpp2_bm_cookie_pool_get(bm);
2556 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
2559 /* Allocate buffers for the pool */
2560 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
2561 struct mvpp2_bm_pool *bm_pool, int buf_num)
2566 (buf_num + bm_pool->buf_num > bm_pool->size)) {
2567 netdev_err(port->dev,
2568 "cannot allocate %d buffers for pool %d\n",
2569 buf_num, bm_pool->id);
2573 for (i = 0; i < buf_num; i++) {
2574 mvpp2_bm_pool_put(port, bm_pool->id,
2575 (dma_addr_t)buffer_loc.rx_buffer[i],
2576 (unsigned long)buffer_loc.rx_buffer[i]);
2580 /* Update BM driver with number of buffers added to pool */
2581 bm_pool->buf_num += i;
2582 bm_pool->in_use_thresh = bm_pool->buf_num / 4;
2587 /* Notify the driver that BM pool is being used as specific type and return the
2588 * pool pointer on success
2590 static struct mvpp2_bm_pool *
2591 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
2594 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
2597 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
2598 netdev_err(port->dev, "mixing pool types is forbidden\n");
2602 if (new_pool->type == MVPP2_BM_FREE)
2603 new_pool->type = type;
2605 /* Allocate buffers in case BM pool is used as long pool, but packet
2606 * size doesn't match MTU or BM pool hasn't being used yet
2608 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
2609 (new_pool->pkt_size == 0)) {
2612 /* Set default buffer number or free all the buffers in case
2613 * the pool is not empty
2615 pkts_num = new_pool->buf_num;
2617 pkts_num = type == MVPP2_BM_SWF_LONG ?
2618 MVPP2_BM_LONG_BUF_NUM :
2619 MVPP2_BM_SHORT_BUF_NUM;
2621 mvpp2_bm_bufs_free(NULL,
2622 port->priv, new_pool);
2624 new_pool->pkt_size = pkt_size;
2626 /* Allocate buffers for this pool */
2627 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
2628 if (num != pkts_num) {
2629 dev_err(dev, "pool %d: %d of %d allocated\n",
2630 new_pool->id, num, pkts_num);
2635 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
2636 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
2641 /* Initialize pools for swf */
2642 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
2646 if (!port->pool_long) {
2648 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
2651 if (!port->pool_long)
2654 port->pool_long->port_map |= (1 << port->id);
2656 for (rxq = 0; rxq < rxq_number; rxq++)
2657 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
2663 /* Port configuration routines */
2665 static void mvpp2_port_mii_set(struct mvpp2_port *port)
2669 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2671 switch (port->phy_interface) {
2672 case PHY_INTERFACE_MODE_SGMII:
2673 val |= MVPP2_GMAC_INBAND_AN_MASK;
2675 case PHY_INTERFACE_MODE_RGMII:
2676 val |= MVPP2_GMAC_PORT_RGMII_MASK;
2678 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
2681 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2684 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
2688 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2689 val |= MVPP2_GMAC_FC_ADV_EN;
2690 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2693 static void mvpp2_port_enable(struct mvpp2_port *port)
2697 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2698 val |= MVPP2_GMAC_PORT_EN_MASK;
2699 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
2700 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2703 static void mvpp2_port_disable(struct mvpp2_port *port)
2707 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2708 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
2709 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2712 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
2713 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
2717 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
2718 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
2719 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2722 /* Configure loopback port */
2723 static void mvpp2_port_loopback_set(struct mvpp2_port *port)
2727 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
2729 if (port->speed == 1000)
2730 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
2732 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
2734 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
2735 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
2737 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
2739 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2742 static void mvpp2_port_reset(struct mvpp2_port *port)
2746 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2747 ~MVPP2_GMAC_PORT_RESET_MASK;
2748 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2750 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2751 MVPP2_GMAC_PORT_RESET_MASK)
2755 /* Change maximum receive size of the port */
2756 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2760 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2761 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2762 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2763 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2764 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2767 /* Set defaults to the MVPP2 port */
2768 static void mvpp2_defaults_set(struct mvpp2_port *port)
2770 int tx_port_num, val, queue, ptxq, lrxq;
2772 if (port->priv->hw_version == MVPP21) {
2773 /* Configure port to loopback if needed */
2774 if (port->flags & MVPP2_F_LOOPBACK)
2775 mvpp2_port_loopback_set(port);
2777 /* Update TX FIFO MIN Threshold */
2778 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2779 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
2780 /* Min. TX threshold must be less than minimal packet length */
2781 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
2782 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2785 /* Disable Legacy WRR, Disable EJP, Release from reset */
2786 tx_port_num = mvpp2_egress_port(port);
2787 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2789 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
2791 /* Close bandwidth for all queues */
2792 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
2793 ptxq = mvpp2_txq_phys(port->id, queue);
2794 mvpp2_write(port->priv,
2795 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
2798 /* Set refill period to 1 usec, refill tokens
2799 * and bucket size to maximum
2801 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8);
2802 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
2803 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
2804 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
2805 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
2806 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
2807 val = MVPP2_TXP_TOKEN_SIZE_MAX;
2808 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2810 /* Set MaximumLowLatencyPacketSize value to 256 */
2811 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
2812 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
2813 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
2815 /* Enable Rx cache snoop */
2816 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2817 queue = port->rxqs[lrxq]->id;
2818 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2819 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
2820 MVPP2_SNOOP_BUF_HDR_MASK;
2821 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2825 /* Enable/disable receiving packets */
2826 static void mvpp2_ingress_enable(struct mvpp2_port *port)
2831 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2832 queue = port->rxqs[lrxq]->id;
2833 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2834 val &= ~MVPP2_RXQ_DISABLE_MASK;
2835 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2839 static void mvpp2_ingress_disable(struct mvpp2_port *port)
2844 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2845 queue = port->rxqs[lrxq]->id;
2846 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2847 val |= MVPP2_RXQ_DISABLE_MASK;
2848 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2852 /* Enable transmit via physical egress queue
2853 * - HW starts take descriptors from DRAM
2855 static void mvpp2_egress_enable(struct mvpp2_port *port)
2859 int tx_port_num = mvpp2_egress_port(port);
2861 /* Enable all initialized TXs. */
2863 for (queue = 0; queue < txq_number; queue++) {
2864 struct mvpp2_tx_queue *txq = port->txqs[queue];
2866 if (txq->descs != NULL)
2867 qmap |= (1 << queue);
2870 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2871 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2874 /* Disable transmit via physical egress queue
2875 * - HW doesn't take descriptors from DRAM
2877 static void mvpp2_egress_disable(struct mvpp2_port *port)
2881 int tx_port_num = mvpp2_egress_port(port);
2883 /* Issue stop command for active channels only */
2884 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2885 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2886 MVPP2_TXP_SCHED_ENQ_MASK;
2888 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
2889 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
2891 /* Wait for all Tx activity to terminate. */
2894 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2895 netdev_warn(port->dev,
2896 "Tx stop timed out, status=0x%08x\n",
2903 /* Check port TX Command register that all
2904 * Tx queues are stopped
2906 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
2907 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2910 /* Rx descriptors helper methods */
2912 /* Get number of Rx descriptors occupied by received packets */
2914 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2916 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
2918 return val & MVPP2_RXQ_OCCUPIED_MASK;
2921 /* Update Rx queue status with the number of occupied and available
2922 * Rx descriptor slots.
2925 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2926 int used_count, int free_count)
2928 /* Decrement the number of used descriptors and increment count
2929 * increment the number of free descriptors.
2931 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2933 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2936 /* Get pointer to next RX descriptor to be processed by SW */
2937 static inline struct mvpp2_rx_desc *
2938 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
2940 int rx_desc = rxq->next_desc_to_proc;
2942 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
2943 prefetch(rxq->descs + rxq->next_desc_to_proc);
2944 return rxq->descs + rx_desc;
2947 /* Set rx queue offset */
2948 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
2949 int prxq, int offset)
2953 /* Convert offset from bytes to units of 32 bytes */
2954 offset = offset >> 5;
2956 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2957 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
2960 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
2961 MVPP2_RXQ_PACKET_OFFSET_MASK);
2963 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2966 /* Obtain BM cookie information from descriptor */
2967 static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
2968 struct mvpp2_rx_desc *rx_desc)
2970 int cpu = smp_processor_id();
2973 pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
2974 MVPP2_RXD_BM_POOL_ID_MASK) >>
2975 MVPP2_RXD_BM_POOL_ID_OFFS;
2977 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
2978 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
2981 /* Tx descriptors helper methods */
2983 /* Get number of Tx descriptors waiting to be transmitted by HW */
2984 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
2985 struct mvpp2_tx_queue *txq)
2989 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
2990 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
2992 return val & MVPP2_TXQ_PENDING_MASK;
2995 /* Get pointer to next Tx descriptor to be processed (send) by HW */
2996 static struct mvpp2_tx_desc *
2997 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
2999 int tx_desc = txq->next_desc_to_proc;
3001 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
3002 return txq->descs + tx_desc;
3005 /* Update HW with number of aggregated Tx descriptors to be sent */
3006 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
3008 /* aggregated access - relevant TXQ number is written in TX desc */
3009 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
3012 /* Get number of sent descriptors and decrement counter.
3013 * The number of sent descriptors is returned.
3016 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
3017 struct mvpp2_tx_queue *txq)
3021 /* Reading status reg resets transmitted descriptor counter */
3022 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
3024 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
3025 MVPP2_TRANSMITTED_COUNT_OFFSET;
3028 static void mvpp2_txq_sent_counter_clear(void *arg)
3030 struct mvpp2_port *port = arg;
3033 for (queue = 0; queue < txq_number; queue++) {
3034 int id = port->txqs[queue]->id;
3036 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
3040 /* Set max sizes for Tx queues */
3041 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
3044 int txq, tx_port_num;
3046 mtu = port->pkt_size * 8;
3047 if (mtu > MVPP2_TXP_MTU_MAX)
3048 mtu = MVPP2_TXP_MTU_MAX;
3050 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
3053 /* Indirect access to registers */
3054 tx_port_num = mvpp2_egress_port(port);
3055 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3058 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
3059 val &= ~MVPP2_TXP_MTU_MAX;
3061 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
3063 /* TXP token size and all TXQs token size must be larger that MTU */
3064 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
3065 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
3068 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
3070 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3073 for (txq = 0; txq < txq_number; txq++) {
3074 val = mvpp2_read(port->priv,
3075 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
3076 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
3080 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
3082 mvpp2_write(port->priv,
3083 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
3089 /* Free Tx queue skbuffs */
3090 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
3091 struct mvpp2_tx_queue *txq,
3092 struct mvpp2_txq_pcpu *txq_pcpu, int num)
3096 for (i = 0; i < num; i++)
3097 mvpp2_txq_inc_get(txq_pcpu);
3100 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
3103 int queue = fls(cause) - 1;
3105 return port->rxqs[queue];
3108 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
3111 int queue = fls(cause) - 1;
3113 return port->txqs[queue];
3116 /* Rx/Tx queue initialization/cleanup methods */
3118 /* Allocate and initialize descriptors for aggr TXQ */
3119 static int mvpp2_aggr_txq_init(struct udevice *dev,
3120 struct mvpp2_tx_queue *aggr_txq,
3121 int desc_num, int cpu,
3126 /* Allocate memory for TX descriptors */
3127 aggr_txq->descs = buffer_loc.aggr_tx_descs;
3128 aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs;
3129 if (!aggr_txq->descs)
3132 /* Make sure descriptor address is cache line size aligned */
3133 BUG_ON(aggr_txq->descs !=
3134 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
3136 aggr_txq->last_desc = aggr_txq->size - 1;
3138 /* Aggr TXQ no reset WA */
3139 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
3140 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
3142 /* Set Tx descriptors queue starting address indirect
3145 if (priv->hw_version == MVPP21)
3146 txq_dma = aggr_txq->descs_dma;
3148 txq_dma = aggr_txq->descs_dma >>
3149 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
3151 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
3152 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
3157 /* Create a specified Rx queue */
3158 static int mvpp2_rxq_init(struct mvpp2_port *port,
3159 struct mvpp2_rx_queue *rxq)
3164 rxq->size = port->rx_ring_size;
3166 /* Allocate memory for RX descriptors */
3167 rxq->descs = buffer_loc.rx_descs;
3168 rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs;
3172 BUG_ON(rxq->descs !=
3173 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
3175 rxq->last_desc = rxq->size - 1;
3177 /* Zero occupied and non-occupied counters - direct access */
3178 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3180 /* Set Rx descriptors queue starting address - indirect access */
3181 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
3182 if (port->priv->hw_version == MVPP21)
3183 rxq_dma = rxq->descs_dma;
3185 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
3186 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
3187 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
3188 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
3191 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
3193 /* Add number of descriptors ready for receiving packets */
3194 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
3199 /* Push packets received by the RXQ to BM pool */
3200 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
3201 struct mvpp2_rx_queue *rxq)
3205 rx_received = mvpp2_rxq_received(port, rxq->id);
3209 for (i = 0; i < rx_received; i++) {
3210 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3211 u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
3213 mvpp2_pool_refill(port, bm,
3214 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
3215 mvpp2_rxdesc_cookie_get(port, rx_desc));
3217 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
3220 /* Cleanup Rx queue */
3221 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
3222 struct mvpp2_rx_queue *rxq)
3224 mvpp2_rxq_drop_pkts(port, rxq);
3228 rxq->next_desc_to_proc = 0;
3231 /* Clear Rx descriptors queue starting address and size;
3232 * free descriptor number
3234 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3235 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
3236 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
3237 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
3240 /* Create and initialize a Tx queue */
3241 static int mvpp2_txq_init(struct mvpp2_port *port,
3242 struct mvpp2_tx_queue *txq)
3245 int cpu, desc, desc_per_txq, tx_port_num;
3246 struct mvpp2_txq_pcpu *txq_pcpu;
3248 txq->size = port->tx_ring_size;
3250 /* Allocate memory for Tx descriptors */
3251 txq->descs = buffer_loc.tx_descs;
3252 txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs;
3256 /* Make sure descriptor address is cache line size aligned */
3257 BUG_ON(txq->descs !=
3258 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
3260 txq->last_desc = txq->size - 1;
3262 /* Set Tx descriptors queue starting address - indirect access */
3263 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3264 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma);
3265 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
3266 MVPP2_TXQ_DESC_SIZE_MASK);
3267 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
3268 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
3269 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
3270 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
3271 val &= ~MVPP2_TXQ_PENDING_MASK;
3272 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
3274 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
3275 * for each existing TXQ.
3276 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
3277 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
3280 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
3281 (txq->log_id * desc_per_txq);
3283 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
3284 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
3285 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
3287 /* WRR / EJP configuration - indirect access */
3288 tx_port_num = mvpp2_egress_port(port);
3289 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3291 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
3292 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
3293 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
3294 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
3295 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
3297 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
3298 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
3301 for_each_present_cpu(cpu) {
3302 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3303 txq_pcpu->size = txq->size;
3309 /* Free allocated TXQ resources */
3310 static void mvpp2_txq_deinit(struct mvpp2_port *port,
3311 struct mvpp2_tx_queue *txq)
3315 txq->next_desc_to_proc = 0;
3318 /* Set minimum bandwidth for disabled TXQs */
3319 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
3321 /* Set Tx descriptors queue starting address and size */
3322 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3323 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
3324 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
3327 /* Cleanup Tx ports */
3328 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
3330 struct mvpp2_txq_pcpu *txq_pcpu;
3331 int delay, pending, cpu;
3334 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3335 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
3336 val |= MVPP2_TXQ_DRAIN_EN_MASK;
3337 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
3339 /* The napi queue has been stopped so wait for all packets
3340 * to be transmitted.
3344 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
3345 netdev_warn(port->dev,
3346 "port %d: cleaning queue %d timed out\n",
3347 port->id, txq->log_id);
3353 pending = mvpp2_txq_pend_desc_num_get(port, txq);
3356 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
3357 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
3359 for_each_present_cpu(cpu) {
3360 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3362 /* Release all packets */
3363 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
3366 txq_pcpu->count = 0;
3367 txq_pcpu->txq_put_index = 0;
3368 txq_pcpu->txq_get_index = 0;
3372 /* Cleanup all Tx queues */
3373 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
3375 struct mvpp2_tx_queue *txq;
3379 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
3381 /* Reset Tx ports and delete Tx queues */
3382 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
3383 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3385 for (queue = 0; queue < txq_number; queue++) {
3386 txq = port->txqs[queue];
3387 mvpp2_txq_clean(port, txq);
3388 mvpp2_txq_deinit(port, txq);
3391 mvpp2_txq_sent_counter_clear(port);
3393 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
3394 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3397 /* Cleanup all Rx queues */
3398 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
3402 for (queue = 0; queue < rxq_number; queue++)
3403 mvpp2_rxq_deinit(port, port->rxqs[queue]);
3406 /* Init all Rx queues for port */
3407 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
3411 for (queue = 0; queue < rxq_number; queue++) {
3412 err = mvpp2_rxq_init(port, port->rxqs[queue]);
3419 mvpp2_cleanup_rxqs(port);
3423 /* Init all tx queues for port */
3424 static int mvpp2_setup_txqs(struct mvpp2_port *port)
3426 struct mvpp2_tx_queue *txq;
3429 for (queue = 0; queue < txq_number; queue++) {
3430 txq = port->txqs[queue];
3431 err = mvpp2_txq_init(port, txq);
3436 mvpp2_txq_sent_counter_clear(port);
3440 mvpp2_cleanup_txqs(port);
3445 static void mvpp2_link_event(struct mvpp2_port *port)
3447 struct phy_device *phydev = port->phy_dev;
3448 int status_change = 0;
3452 if ((port->speed != phydev->speed) ||
3453 (port->duplex != phydev->duplex)) {
3456 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3457 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
3458 MVPP2_GMAC_CONFIG_GMII_SPEED |
3459 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
3460 MVPP2_GMAC_AN_SPEED_EN |
3461 MVPP2_GMAC_AN_DUPLEX_EN);
3464 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
3466 if (phydev->speed == SPEED_1000)
3467 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
3468 else if (phydev->speed == SPEED_100)
3469 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
3471 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3473 port->duplex = phydev->duplex;
3474 port->speed = phydev->speed;
3478 if (phydev->link != port->link) {
3479 if (!phydev->link) {
3484 port->link = phydev->link;
3488 if (status_change) {
3490 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3491 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
3492 MVPP2_GMAC_FORCE_LINK_DOWN);
3493 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3494 mvpp2_egress_enable(port);
3495 mvpp2_ingress_enable(port);
3497 mvpp2_ingress_disable(port);
3498 mvpp2_egress_disable(port);
3503 /* Main RX/TX processing routines */
3505 /* Display more error info */
3506 static void mvpp2_rx_error(struct mvpp2_port *port,
3507 struct mvpp2_rx_desc *rx_desc)
3509 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3510 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
3512 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
3513 case MVPP2_RXD_ERR_CRC:
3514 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
3517 case MVPP2_RXD_ERR_OVERRUN:
3518 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
3521 case MVPP2_RXD_ERR_RESOURCE:
3522 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
3528 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
3529 static int mvpp2_rx_refill(struct mvpp2_port *port,
3530 struct mvpp2_bm_pool *bm_pool,
3531 u32 bm, dma_addr_t dma_addr)
3533 mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr);
3537 /* Set hw internals when starting port */
3538 static void mvpp2_start_dev(struct mvpp2_port *port)
3540 mvpp2_gmac_max_rx_size_set(port);
3541 mvpp2_txp_max_tx_size_set(port);
3543 mvpp2_port_enable(port);
3546 /* Set hw internals when stopping port */
3547 static void mvpp2_stop_dev(struct mvpp2_port *port)
3549 /* Stop new packets from arriving to RXQs */
3550 mvpp2_ingress_disable(port);
3552 mvpp2_egress_disable(port);
3553 mvpp2_port_disable(port);
3556 static int mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port)
3558 struct phy_device *phy_dev;
3560 if (!port->init || port->link == 0) {
3561 phy_dev = phy_connect(port->priv->bus, port->phyaddr, dev,
3562 port->phy_interface);
3563 port->phy_dev = phy_dev;
3565 netdev_err(port->dev, "cannot connect to phy\n");
3568 phy_dev->supported &= PHY_GBIT_FEATURES;
3569 phy_dev->advertising = phy_dev->supported;
3571 port->phy_dev = phy_dev;
3576 phy_config(phy_dev);
3577 phy_startup(phy_dev);
3578 if (!phy_dev->link) {
3579 printf("%s: No link\n", phy_dev->dev->name);
3585 mvpp2_egress_enable(port);
3586 mvpp2_ingress_enable(port);
3592 static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port)
3594 unsigned char mac_bcast[ETH_ALEN] = {
3595 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3598 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
3600 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
3603 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
3604 port->dev_addr, true);
3606 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
3609 err = mvpp2_prs_def_flow(port);
3611 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
3615 /* Allocate the Rx/Tx queues */
3616 err = mvpp2_setup_rxqs(port);
3618 netdev_err(port->dev, "cannot allocate Rx queues\n");
3622 err = mvpp2_setup_txqs(port);
3624 netdev_err(port->dev, "cannot allocate Tx queues\n");
3628 err = mvpp2_phy_connect(dev, port);
3632 mvpp2_link_event(port);
3634 mvpp2_start_dev(port);
3639 /* No Device ops here in U-Boot */
3641 /* Driver initialization */
3643 static void mvpp2_port_power_up(struct mvpp2_port *port)
3645 struct mvpp2 *priv = port->priv;
3647 mvpp2_port_mii_set(port);
3648 mvpp2_port_periodic_xon_disable(port);
3649 if (priv->hw_version == MVPP21)
3650 mvpp2_port_fc_adv_enable(port);
3651 mvpp2_port_reset(port);
3654 /* Initialize port HW */
3655 static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port)
3657 struct mvpp2 *priv = port->priv;
3658 struct mvpp2_txq_pcpu *txq_pcpu;
3659 int queue, cpu, err;
3661 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
3665 mvpp2_egress_disable(port);
3666 mvpp2_port_disable(port);
3668 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
3673 /* Associate physical Tx queues to this port and initialize.
3674 * The mapping is predefined.
3676 for (queue = 0; queue < txq_number; queue++) {
3677 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
3678 struct mvpp2_tx_queue *txq;
3680 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
3684 txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu),
3689 txq->id = queue_phy_id;
3690 txq->log_id = queue;
3691 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
3692 for_each_present_cpu(cpu) {
3693 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3694 txq_pcpu->cpu = cpu;
3697 port->txqs[queue] = txq;
3700 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
3705 /* Allocate and initialize Rx queue for this port */
3706 for (queue = 0; queue < rxq_number; queue++) {
3707 struct mvpp2_rx_queue *rxq;
3709 /* Map physical Rx queue to port's logical Rx queue */
3710 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
3713 /* Map this Rx queue to a physical queue */
3714 rxq->id = port->first_rxq + queue;
3715 rxq->port = port->id;
3716 rxq->logic_rxq = queue;
3718 port->rxqs[queue] = rxq;
3721 /* Configure Rx queue group interrupt for this port */
3722 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), CONFIG_MV_ETH_RXQ);
3724 /* Create Rx descriptor rings */
3725 for (queue = 0; queue < rxq_number; queue++) {
3726 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
3728 rxq->size = port->rx_ring_size;
3729 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
3730 rxq->time_coal = MVPP2_RX_COAL_USEC;
3733 mvpp2_ingress_disable(port);
3735 /* Port default configuration */
3736 mvpp2_defaults_set(port);
3738 /* Port's classifier configuration */
3739 mvpp2_cls_oversize_rxq_set(port);
3740 mvpp2_cls_port_config(port);
3742 /* Provide an initial Rx packet size */
3743 port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN);
3745 /* Initialize pools for swf */
3746 err = mvpp2_swf_bm_pool_init(port);
3753 /* Ports initialization */
3754 static int mvpp2_port_probe(struct udevice *dev,
3755 struct mvpp2_port *port,
3758 int *next_first_rxq)
3763 const char *phy_mode_str;
3765 int priv_common_regs_num = 2;
3768 phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy");
3770 dev_err(&pdev->dev, "missing phy\n");
3774 phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL);
3776 phy_mode = phy_get_interface_by_name(phy_mode_str);
3777 if (phy_mode == -1) {
3778 dev_err(&pdev->dev, "incorrect phy mode\n");
3782 id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1);
3784 dev_err(&pdev->dev, "missing port-id value\n");
3788 phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0);
3792 port->first_rxq = *next_first_rxq;
3793 port->phy_node = phy_node;
3794 port->phy_interface = phy_mode;
3795 port->phyaddr = phyaddr;
3797 if (priv->hw_version == MVPP21) {
3798 port->base = (void __iomem *)dev_get_addr_index(
3799 dev->parent, priv_common_regs_num + id);
3800 if (IS_ERR(port->base))
3801 return PTR_ERR(port->base);
3805 gop_id = fdtdec_get_int(gd->fdt_blob, port_node,
3808 dev_err(&pdev->dev, "missing gop-port-id value\n");
3812 port->base = priv->iface_base + MVPP22_PORT_BASE +
3813 gop_id * MVPP22_PORT_OFFSET;
3816 port->tx_ring_size = MVPP2_MAX_TXD;
3817 port->rx_ring_size = MVPP2_MAX_RXD;
3819 err = mvpp2_port_init(dev, port);
3821 dev_err(&pdev->dev, "failed to init port %d\n", id);
3824 mvpp2_port_power_up(port);
3826 /* Increment the first Rx queue number to be used by the next port */
3827 *next_first_rxq += CONFIG_MV_ETH_RXQ;
3828 priv->port_list[id] = port;
3832 /* Initialize decoding windows */
3833 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
3839 for (i = 0; i < 6; i++) {
3840 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
3841 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
3844 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
3849 for (i = 0; i < dram->num_cs; i++) {
3850 const struct mbus_dram_window *cs = dram->cs + i;
3852 mvpp2_write(priv, MVPP2_WIN_BASE(i),
3853 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
3854 dram->mbus_dram_target_id);
3856 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
3857 (cs->size - 1) & 0xffff0000);
3859 win_enable |= (1 << i);
3862 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
3865 /* Initialize Rx FIFO's */
3866 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
3870 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
3871 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
3872 MVPP2_RX_FIFO_PORT_DATA_SIZE);
3873 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
3874 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
3877 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
3878 MVPP2_RX_FIFO_PORT_MIN_PKT);
3879 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
3882 /* Initialize network controller common part HW */
3883 static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv)
3885 const struct mbus_dram_target_info *dram_target_info;
3889 /* Checks for hardware constraints (U-Boot uses only one rxq) */
3890 if ((rxq_number > MVPP2_MAX_RXQ) || (txq_number > MVPP2_MAX_TXQ)) {
3891 dev_err(&pdev->dev, "invalid queue size parameter\n");
3895 /* MBUS windows configuration */
3896 dram_target_info = mvebu_mbus_dram_info();
3897 if (dram_target_info)
3898 mvpp2_conf_mbus_windows(dram_target_info, priv);
3900 /* Disable HW PHY polling */
3901 if (priv->hw_version == MVPP21) {
3902 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
3903 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
3904 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
3906 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
3907 val &= ~MVPP22_SMI_POLLING_EN;
3908 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
3911 /* Allocate and initialize aggregated TXQs */
3912 priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(),
3913 sizeof(struct mvpp2_tx_queue),
3915 if (!priv->aggr_txqs)
3918 for_each_present_cpu(i) {
3919 priv->aggr_txqs[i].id = i;
3920 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
3921 err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i],
3922 MVPP2_AGGR_TXQ_SIZE, i, priv);
3928 mvpp2_rx_fifo_init(priv);
3930 /* Reset Rx queue group interrupt configuration */
3931 for (i = 0; i < MVPP2_MAX_PORTS; i++)
3932 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i),
3935 if (priv->hw_version == MVPP21)
3936 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
3937 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
3939 /* Allow cache snoop when transmiting packets */
3940 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
3942 /* Buffer Manager initialization */
3943 err = mvpp2_bm_init(dev, priv);
3947 /* Parser default initialization */
3948 err = mvpp2_prs_default_init(dev, priv);
3952 /* Classifier default initialization */
3953 mvpp2_cls_init(priv);
3958 /* SMI / MDIO functions */
3960 static int smi_wait_ready(struct mvpp2 *priv)
3962 u32 timeout = MVPP2_SMI_TIMEOUT;
3965 /* wait till the SMI is not busy */
3967 /* read smi register */
3968 smi_reg = readl(priv->lms_base + MVPP2_SMI);
3969 if (timeout-- == 0) {
3970 printf("Error: SMI busy timeout\n");
3973 } while (smi_reg & MVPP2_SMI_BUSY);
3979 * mpp2_mdio_read - miiphy_read callback function.
3981 * Returns 16bit phy register value, or 0xffff on error
3983 static int mpp2_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
3985 struct mvpp2 *priv = bus->priv;
3989 /* check parameters */
3990 if (addr > MVPP2_PHY_ADDR_MASK) {
3991 printf("Error: Invalid PHY address %d\n", addr);
3995 if (reg > MVPP2_PHY_REG_MASK) {
3996 printf("Err: Invalid register offset %d\n", reg);
4000 /* wait till the SMI is not busy */
4001 if (smi_wait_ready(priv) < 0)
4004 /* fill the phy address and regiser offset and read opcode */
4005 smi_reg = (addr << MVPP2_SMI_DEV_ADDR_OFFS)
4006 | (reg << MVPP2_SMI_REG_ADDR_OFFS)
4007 | MVPP2_SMI_OPCODE_READ;
4009 /* write the smi register */
4010 writel(smi_reg, priv->lms_base + MVPP2_SMI);
4012 /* wait till read value is ready */
4013 timeout = MVPP2_SMI_TIMEOUT;
4016 /* read smi register */
4017 smi_reg = readl(priv->lms_base + MVPP2_SMI);
4018 if (timeout-- == 0) {
4019 printf("Err: SMI read ready timeout\n");
4022 } while (!(smi_reg & MVPP2_SMI_READ_VALID));
4024 /* Wait for the data to update in the SMI register */
4025 for (timeout = 0; timeout < MVPP2_SMI_TIMEOUT; timeout++)
4028 return readl(priv->lms_base + MVPP2_SMI) & MVPP2_SMI_DATA_MASK;
4032 * mpp2_mdio_write - miiphy_write callback function.
4034 * Returns 0 if write succeed, -EINVAL on bad parameters
4037 static int mpp2_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
4040 struct mvpp2 *priv = bus->priv;
4043 /* check parameters */
4044 if (addr > MVPP2_PHY_ADDR_MASK) {
4045 printf("Error: Invalid PHY address %d\n", addr);
4049 if (reg > MVPP2_PHY_REG_MASK) {
4050 printf("Err: Invalid register offset %d\n", reg);
4054 /* wait till the SMI is not busy */
4055 if (smi_wait_ready(priv) < 0)
4058 /* fill the phy addr and reg offset and write opcode and data */
4059 smi_reg = value << MVPP2_SMI_DATA_OFFS;
4060 smi_reg |= (addr << MVPP2_SMI_DEV_ADDR_OFFS)
4061 | (reg << MVPP2_SMI_REG_ADDR_OFFS);
4062 smi_reg &= ~MVPP2_SMI_OPCODE_READ;
4064 /* write the smi register */
4065 writel(smi_reg, priv->lms_base + MVPP2_SMI);
4070 static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp)
4072 struct mvpp2_port *port = dev_get_priv(dev);
4073 struct mvpp2_rx_desc *rx_desc;
4074 struct mvpp2_bm_pool *bm_pool;
4075 dma_addr_t dma_addr;
4077 int pool, rx_bytes, err;
4079 struct mvpp2_rx_queue *rxq;
4080 u32 cause_rx_tx, cause_rx, cause_misc;
4083 cause_rx_tx = mvpp2_read(port->priv,
4084 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
4085 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
4086 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
4087 if (!cause_rx_tx && !cause_misc)
4090 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4092 /* Process RX packets */
4093 cause_rx |= port->pending_cause_rx;
4094 rxq = mvpp2_get_rx_queue(port, cause_rx);
4096 /* Get number of received packets and clamp the to-do */
4097 rx_received = mvpp2_rxq_received(port, rxq->id);
4099 /* Return if no packets are received */
4103 rx_desc = mvpp2_rxq_next_desc_get(rxq);
4104 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
4105 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
4106 rx_bytes -= MVPP2_MH_SIZE;
4107 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
4109 bm = mvpp2_bm_cookie_build(port, rx_desc);
4110 pool = mvpp2_bm_cookie_pool_get(bm);
4111 bm_pool = &port->priv->bm_pools[pool];
4113 /* In case of an error, release the requested buffer pointer
4114 * to the Buffer Manager. This request process is controlled
4115 * by the hardware, and the information about the buffer is
4116 * comprised by the RX descriptor.
4118 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
4119 mvpp2_rx_error(port, rx_desc);
4120 /* Return the buffer to the pool */
4121 mvpp2_pool_refill(port, bm, dma_addr, dma_addr);
4125 err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr);
4127 netdev_err(port->dev, "failed to refill BM pools\n");
4131 /* Update Rx queue management counters */
4133 mvpp2_rxq_status_update(port, rxq->id, 1, 1);
4135 /* give packet to stack - skip on first n bytes */
4136 data = (u8 *)dma_addr + 2 + 32;
4142 * No cache invalidation needed here, since the rx_buffer's are
4143 * located in a uncached memory region
4151 static void mvpp2_txq_drain(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4156 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4157 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4159 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4161 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4162 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4165 static int mvpp2_send(struct udevice *dev, void *packet, int length)
4167 struct mvpp2_port *port = dev_get_priv(dev);
4168 struct mvpp2_tx_queue *txq, *aggr_txq;
4169 struct mvpp2_tx_desc *tx_desc;
4173 txq = port->txqs[0];
4174 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
4176 /* Get a descriptor for the first part of the packet */
4177 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4178 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4179 mvpp2_txdesc_size_set(port, tx_desc, length);
4180 mvpp2_txdesc_offset_set(port, tx_desc,
4181 (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN);
4182 mvpp2_txdesc_dma_addr_set(port, tx_desc,
4183 (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN);
4184 /* First and Last descriptor */
4185 mvpp2_txdesc_cmd_set(port, tx_desc,
4186 MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE
4187 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC);
4190 flush_dcache_range((unsigned long)packet,
4191 (unsigned long)packet + ALIGN(length, PKTALIGN));
4193 /* Enable transmit */
4195 mvpp2_aggr_txq_pend_desc_add(port, 1);
4197 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4201 if (timeout++ > 10000) {
4202 printf("timeout: packet not sent from aggregated to phys TXQ\n");
4205 tx_done = mvpp2_txq_pend_desc_num_get(port, txq);
4208 /* Enable TXQ drain */
4209 mvpp2_txq_drain(port, txq, 1);
4213 if (timeout++ > 10000) {
4214 printf("timeout: packet not sent\n");
4217 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4220 /* Disable TXQ drain */
4221 mvpp2_txq_drain(port, txq, 0);
4226 static int mvpp2_start(struct udevice *dev)
4228 struct eth_pdata *pdata = dev_get_platdata(dev);
4229 struct mvpp2_port *port = dev_get_priv(dev);
4231 /* Load current MAC address */
4232 memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN);
4234 /* Reconfigure parser accept the original MAC address */
4235 mvpp2_prs_update_mac_da(port, port->dev_addr);
4237 mvpp2_port_power_up(port);
4239 mvpp2_open(dev, port);
4244 static void mvpp2_stop(struct udevice *dev)
4246 struct mvpp2_port *port = dev_get_priv(dev);
4248 mvpp2_stop_dev(port);
4249 mvpp2_cleanup_rxqs(port);
4250 mvpp2_cleanup_txqs(port);
4253 static int mvpp2_probe(struct udevice *dev)
4255 struct mvpp2_port *port = dev_get_priv(dev);
4256 struct mvpp2 *priv = dev_get_priv(dev->parent);
4259 /* Initialize network controller */
4260 err = mvpp2_init(dev, priv);
4262 dev_err(&pdev->dev, "failed to initialize controller\n");
4266 return mvpp2_port_probe(dev, port, dev_of_offset(dev), priv,
4267 &buffer_loc.first_rxq);
4270 static const struct eth_ops mvpp2_ops = {
4271 .start = mvpp2_start,
4277 static struct driver mvpp2_driver = {
4280 .probe = mvpp2_probe,
4282 .priv_auto_alloc_size = sizeof(struct mvpp2_port),
4283 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
4287 * Use a MISC device to bind the n instances (child nodes) of the
4288 * network base controller in UCLASS_ETH.
4290 static int mvpp2_base_probe(struct udevice *dev)
4292 struct mvpp2 *priv = dev_get_priv(dev);
4293 struct mii_dev *bus;
4298 /* Save hw-version */
4299 priv->hw_version = dev_get_driver_data(dev);
4302 * U-Boot special buffer handling:
4304 * Allocate buffer area for descs and rx_buffers. This is only
4305 * done once for all interfaces. As only one interface can
4306 * be active. Make this area DMA-safe by disabling the D-cache
4309 /* Align buffer area for descs and rx_buffers to 1MiB */
4310 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
4311 mmu_set_region_dcache_behaviour((unsigned long)bd_space,
4312 BD_SPACE, DCACHE_OFF);
4314 buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space;
4315 size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE;
4317 buffer_loc.tx_descs =
4318 (struct mvpp2_tx_desc *)((unsigned long)bd_space + size);
4319 size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE;
4321 buffer_loc.rx_descs =
4322 (struct mvpp2_rx_desc *)((unsigned long)bd_space + size);
4323 size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE;
4325 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
4326 buffer_loc.bm_pool[i] =
4327 (unsigned long *)((unsigned long)bd_space + size);
4328 if (priv->hw_version == MVPP21)
4329 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32);
4331 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64);
4334 for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) {
4335 buffer_loc.rx_buffer[i] =
4336 (unsigned long *)((unsigned long)bd_space + size);
4337 size += RX_BUFFER_SIZE;
4340 /* Save base addresses for later use */
4341 priv->base = (void *)dev_get_addr_index(dev, 0);
4342 if (IS_ERR(priv->base))
4343 return PTR_ERR(priv->base);
4345 if (priv->hw_version == MVPP21) {
4346 priv->lms_base = (void *)dev_get_addr_index(dev, 1);
4347 if (IS_ERR(priv->lms_base))
4348 return PTR_ERR(priv->lms_base);
4350 priv->iface_base = (void *)dev_get_addr_index(dev, 1);
4351 if (IS_ERR(priv->iface_base))
4352 return PTR_ERR(priv->iface_base);
4355 /* Finally create and register the MDIO bus driver */
4358 printf("Failed to allocate MDIO bus\n");
4362 bus->read = mpp2_mdio_read;
4363 bus->write = mpp2_mdio_write;
4364 snprintf(bus->name, sizeof(bus->name), dev->name);
4365 bus->priv = (void *)priv;
4368 return mdio_register(bus);
4371 static int mvpp2_base_bind(struct udevice *parent)
4373 const void *blob = gd->fdt_blob;
4374 int node = dev_of_offset(parent);
4375 struct uclass_driver *drv;
4376 struct udevice *dev;
4377 struct eth_pdata *plat;
4382 /* Lookup eth driver */
4383 drv = lists_uclass_lookup(UCLASS_ETH);
4385 puts("Cannot find eth driver\n");
4389 fdt_for_each_subnode(subnode, blob, node) {
4390 /* Skip disabled ports */
4391 if (!fdtdec_get_is_enabled(blob, subnode))
4394 plat = calloc(1, sizeof(*plat));
4398 id = fdtdec_get_int(blob, subnode, "port-id", -1);
4400 name = calloc(1, 16);
4401 sprintf(name, "mvpp2-%d", id);
4403 /* Create child device UCLASS_ETH and bind it */
4404 device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev);
4405 dev_set_of_offset(dev, subnode);
4411 static const struct udevice_id mvpp2_ids[] = {
4413 .compatible = "marvell,armada-375-pp2",
4419 U_BOOT_DRIVER(mvpp2_base) = {
4420 .name = "mvpp2_base",
4422 .of_match = mvpp2_ids,
4423 .bind = mvpp2_base_bind,
4424 .probe = mvpp2_base_probe,
4425 .priv_auto_alloc_size = sizeof(struct mvpp2),