4 * Copyright (c) 2000 MontaVista Software, Inc. Dan Malek (dmalek@jlc.net)
6 * (C) Copyright 2000 Sysgo Real-Time Solutions, GmbH <www.elinos.com>
7 * Marius Groeger <mgroeger@sysgo.de>
9 * (C) Copyright (c) 2001
10 * Advent Networks, Inc. <http://www.adventnetworks.com>
11 * Jay Monkman <jtm@smoothsmoothie.com>
13 * Modified so that it plays nicely when more than one ETHERNET interface
14 * is in use a la ether_fcc.c.
16 * DENX Software Engineerin GmbH
17 * Gary Jennejohn <garyj@denx.de>
19 * SPDX-License-Identifier: GPL-2.0+
23 #include <asm/cpm_8260.h>
30 #if (CONFIG_ETHER_INDEX == 1)
31 # define PROFF_ENET PROFF_SCC1
32 # define CPM_CR_ENET_PAGE CPM_CR_SCC1_PAGE
33 # define CPM_CR_ENET_SBLOCK CPM_CR_SCC1_SBLOCK
34 # define CMXSCR_MASK (CMXSCR_SC1 |\
38 #elif (CONFIG_ETHER_INDEX == 2)
39 # define PROFF_ENET PROFF_SCC2
40 # define CPM_CR_ENET_PAGE CPM_CR_SCC2_PAGE
41 # define CPM_CR_ENET_SBLOCK CPM_CR_SCC2_SBLOCK
42 # define CMXSCR_MASK (CMXSCR_SC2 |\
46 #elif (CONFIG_ETHER_INDEX == 3)
47 # define PROFF_ENET PROFF_SCC3
48 # define CPM_CR_ENET_PAGE CPM_CR_SCC3_PAGE
49 # define CPM_CR_ENET_SBLOCK CPM_CR_SCC3_SBLOCK
50 # define CMXSCR_MASK (CMXSCR_SC3 |\
53 #elif (CONFIG_ETHER_INDEX == 4)
54 # define PROFF_ENET PROFF_SCC4
55 # define CPM_CR_ENET_PAGE CPM_CR_SCC4_PAGE
56 # define CPM_CR_ENET_SBLOCK CPM_CR_SCC4_SBLOCK
57 # define CMXSCR_MASK (CMXSCR_SC4 |\
64 /* Ethernet Transmit and Receive Buffers */
65 #define DBUF_LENGTH 1520
69 #if !defined(CONFIG_SYS_SCC_TOUT_LOOP)
70 #define CONFIG_SYS_SCC_TOUT_LOOP 1000000
73 static char txbuf[TX_BUF_CNT][ DBUF_LENGTH ];
75 static uint rxIdx; /* index of the current RX buffer */
76 static uint txIdx; /* index of the current TX buffer */
79 * SCC Ethernet Tx and Rx buffer descriptors allocated at the
80 * immr->udata_bd address on Dual-Port RAM
81 * Provide for Double Buffering
84 typedef volatile struct CommonBufferDescriptor {
85 cbd_t rxbd[PKTBUFSRX]; /* Rx BD */
86 cbd_t txbd[TX_BUF_CNT]; /* Tx BD */
92 static int sec_send(struct eth_device *dev, void *packet, int length)
98 printf("scc: bad packet size: %d\n", length);
102 for(i=0; rtx->txbd[txIdx].cbd_sc & BD_ENET_TX_READY; i++) {
103 if (i >= CONFIG_SYS_SCC_TOUT_LOOP) {
104 puts ("scc: tx buffer not ready\n");
109 rtx->txbd[txIdx].cbd_bufaddr = (uint)packet;
110 rtx->txbd[txIdx].cbd_datlen = length;
111 rtx->txbd[txIdx].cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_LAST |
114 for(i=0; rtx->txbd[txIdx].cbd_sc & BD_ENET_TX_READY; i++) {
115 if (i >= CONFIG_SYS_SCC_TOUT_LOOP) {
116 puts ("scc: tx error\n");
121 /* return only status bits */
122 result = rtx->txbd[txIdx].cbd_sc & BD_ENET_TX_STATS;
129 static int sec_rx(struct eth_device *dev)
135 if (rtx->rxbd[rxIdx].cbd_sc & BD_ENET_RX_EMPTY) {
137 break; /* nothing received - leave for() loop */
140 length = rtx->rxbd[rxIdx].cbd_datlen;
142 if (rtx->rxbd[rxIdx].cbd_sc & 0x003f)
144 printf("err: %x\n", rtx->rxbd[rxIdx].cbd_sc);
148 /* Pass the packet up to the protocol layers. */
149 NetReceive(NetRxPackets[rxIdx], length - 4);
153 /* Give the buffer back to the SCC. */
154 rtx->rxbd[rxIdx].cbd_datlen = 0;
156 /* wrap around buffer index when necessary */
157 if ((rxIdx + 1) >= PKTBUFSRX) {
158 rtx->rxbd[PKTBUFSRX - 1].cbd_sc = (BD_ENET_RX_WRAP |
163 rtx->rxbd[rxIdx].cbd_sc = BD_ENET_RX_EMPTY;
170 /**************************************************************
172 * SCC Ethernet Initialization Routine
174 *************************************************************/
176 static int sec_init(struct eth_device *dev, bd_t *bis)
179 volatile immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
180 scc_enet_t *pram_ptr;
188 * Assign static pointer to BD area.
189 * Avoid exhausting DPRAM, which would cause a panic.
192 dpaddr = m8260_cpm_dpalloc(sizeof(RTXBD) + 2, 16);
193 rtx = (RTXBD *)&immr->im_dprambase[dpaddr];
196 /* 24.21 - (1-3): ioports have been set up already */
198 /* 24.21 - (4,5): connect SCC's tx and rx clocks, use NMSI for SCC */
199 immr->im_cpmux.cmx_uar = 0;
200 immr->im_cpmux.cmx_scr = ( (immr->im_cpmux.cmx_scr & ~CMXSCR_MASK) |
201 CONFIG_SYS_CMXSCR_VALUE);
204 /* 24.21 (6) write RBASE and TBASE to parameter RAM */
205 pram_ptr = (scc_enet_t *)&(immr->im_dprambase[PROFF_ENET]);
206 pram_ptr->sen_genscc.scc_rbase = (unsigned int)(&rtx->rxbd[0]);
207 pram_ptr->sen_genscc.scc_tbase = (unsigned int)(&rtx->txbd[0]);
209 pram_ptr->sen_genscc.scc_rfcr = 0x18; /* Nrml Ops and Mot byte ordering */
210 pram_ptr->sen_genscc.scc_tfcr = 0x18; /* Mot byte ordering, Nrml access */
212 pram_ptr->sen_genscc.scc_mrblr = DBUF_LENGTH; /* max. package len 1520 */
214 pram_ptr->sen_cpres = ~(0x0); /* Preset CRC */
215 pram_ptr->sen_cmask = 0xdebb20e3; /* Constant Mask for CRC */
218 /* 24.21 - (7): Write INIT RX AND TX PARAMETERS to CPCR */
219 while(immr->im_cpm.cp_cpcr & CPM_CR_FLG);
220 immr->im_cpm.cp_cpcr = mk_cr_cmd(CPM_CR_ENET_PAGE,
223 CPM_CR_INIT_TRX) | CPM_CR_FLG;
225 /* 24.21 - (8-18): Set up parameter RAM */
226 pram_ptr->sen_crcec = 0x0; /* Error Counter CRC (unused) */
227 pram_ptr->sen_alec = 0x0; /* Align Error Counter (unused) */
228 pram_ptr->sen_disfc = 0x0; /* Discard Frame Counter (unused) */
230 pram_ptr->sen_pads = 0x8888; /* Short Frame PAD Characters */
232 pram_ptr->sen_retlim = 15; /* Retry Limit Threshold */
234 pram_ptr->sen_maxflr = 1518; /* MAX Frame Length Register */
235 pram_ptr->sen_minflr = 64; /* MIN Frame Length Register */
237 pram_ptr->sen_maxd1 = DBUF_LENGTH; /* MAX DMA1 Length Register */
238 pram_ptr->sen_maxd2 = DBUF_LENGTH; /* MAX DMA2 Length Register */
240 pram_ptr->sen_gaddr1 = 0x0; /* Group Address Filter 1 (unused) */
241 pram_ptr->sen_gaddr2 = 0x0; /* Group Address Filter 2 (unused) */
242 pram_ptr->sen_gaddr3 = 0x0; /* Group Address Filter 3 (unused) */
243 pram_ptr->sen_gaddr4 = 0x0; /* Group Address Filter 4 (unused) */
245 eth_getenv_enetaddr("ethaddr", ea);
246 pram_ptr->sen_paddrh = (ea[5] << 8) + ea[4];
247 pram_ptr->sen_paddrm = (ea[3] << 8) + ea[2];
248 pram_ptr->sen_paddrl = (ea[1] << 8) + ea[0];
250 pram_ptr->sen_pper = 0x0; /* Persistence (unused) */
252 pram_ptr->sen_iaddr1 = 0x0; /* Individual Address Filter 1 (unused) */
253 pram_ptr->sen_iaddr2 = 0x0; /* Individual Address Filter 2 (unused) */
254 pram_ptr->sen_iaddr3 = 0x0; /* Individual Address Filter 3 (unused) */
255 pram_ptr->sen_iaddr4 = 0x0; /* Individual Address Filter 4 (unused) */
257 pram_ptr->sen_taddrh = 0x0; /* Tmp Address (MSB) (unused) */
258 pram_ptr->sen_taddrm = 0x0; /* Tmp Address (unused) */
259 pram_ptr->sen_taddrl = 0x0; /* Tmp Address (LSB) (unused) */
261 /* 24.21 - (19): Initialize RxBD */
262 for (i = 0; i < PKTBUFSRX; i++)
264 rtx->rxbd[i].cbd_sc = BD_ENET_RX_EMPTY;
265 rtx->rxbd[i].cbd_datlen = 0; /* Reset */
266 rtx->rxbd[i].cbd_bufaddr = (uint)NetRxPackets[i];
269 rtx->rxbd[PKTBUFSRX - 1].cbd_sc |= BD_ENET_RX_WRAP;
271 /* 24.21 - (20): Initialize TxBD */
272 for (i = 0; i < TX_BUF_CNT; i++)
274 rtx->txbd[i].cbd_sc = (BD_ENET_TX_PAD |
277 rtx->txbd[i].cbd_datlen = 0; /* Reset */
278 rtx->txbd[i].cbd_bufaddr = (uint)&txbuf[i][0];
281 rtx->txbd[TX_BUF_CNT - 1].cbd_sc |= BD_ENET_TX_WRAP;
283 /* 24.21 - (21): Write 0xffff to SCCE */
284 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_scce = ~(0x0);
286 /* 24.21 - (22): Write to SCCM to enable TXE, RXF, TXB events */
287 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_sccm = (SCCE_ENET_TXE |
291 /* 24.21 - (23): we don't use ethernet interrupts */
293 /* 24.21 - (24): Clear GSMR_H to enable normal operations */
294 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_gsmrh = 0;
296 /* 24.21 - (25): Clear GSMR_L to enable normal operations */
297 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_gsmrl = (SCC_GSMRL_TCI |
300 SCC_GSMRL_MODE_ENET);
302 /* 24.21 - (26): Initialize DSR */
303 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_dsr = 0xd555;
305 /* 24.21 - (27): Initialize PSMR2
309 * NIB = Begin searching for SFD 22 bits after RENA
310 * FDE = Full Duplex Enable
311 * BRO = Reject broadcast packets
312 * PROMISCOUS = Catch all packets regardless of dest. MAC adress
314 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_psmr = SCC_PSMR_ENCRC |
316 #if defined(CONFIG_SCC_ENET_FULL_DUPLEX)
319 #if defined(CONFIG_SCC_ENET_NO_BROADCAST)
322 #if defined(CONFIG_SCC_ENET_PROMISCOUS)
327 /* 24.21 - (28): Write to GSMR_L to enable SCC */
328 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_gsmrl |= (SCC_GSMRL_ENR |
335 static void sec_halt(struct eth_device *dev)
337 volatile immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
338 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_gsmrl &= ~(SCC_GSMRL_ENR |
343 static void sec_restart(void)
345 volatile immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
346 immr->im_cpm.cp_scc[CONFIG_ETHER_INDEX-1].scc_gsmrl |= (SCC_GSMRL_ENR |
351 int mpc82xx_scc_enet_initialize(bd_t *bis)
353 struct eth_device *dev;
355 dev = (struct eth_device *) malloc(sizeof *dev);
356 memset(dev, 0, sizeof *dev);
358 sprintf(dev->name, "SCC");
359 dev->init = sec_init;
360 dev->halt = sec_halt;
361 dev->send = sec_send;