4 * \brief GMAC (Ethernet MAC) driver for SAM.
\r
6 * Copyright (c) 2013 Atmel Corporation. All rights reserved.
\r
12 * Redistribution and use in source and binary forms, with or without
\r
13 * modification, are permitted provided that the following conditions are met:
\r
15 * 1. Redistributions of source code must retain the above copyright notice,
\r
16 * this list of conditions and the following disclaimer.
\r
18 * 2. Redistributions in binary form must reproduce the above copyright notice,
\r
19 * this list of conditions and the following disclaimer in the documentation
\r
20 * and/or other materials provided with the distribution.
\r
22 * 3. The name of Atmel may not be used to endorse or promote products derived
\r
23 * from this software without specific prior written permission.
\r
25 * 4. This software may only be redistributed and used in connection with an
\r
26 * Atmel microcontroller product.
\r
28 * THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR IMPLIED
\r
29 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
\r
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
\r
31 * EXPRESSLY AND SPECIFICALLY DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR
\r
32 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
\r
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
\r
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
\r
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
\r
36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
\r
37 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
\r
38 * POSSIBILITY OF SUCH DAMAGE.
\r
44 /* Standard includes. */
\r
50 /* FreeRTOS includes. */
\r
51 #include "FreeRTOS.h"
\r
54 #include "FreeRTOSIPConfig.h"
\r
56 #include "compiler.h"
\r
57 #include "instance/gmac.h"
\r
58 #include "ethernet_phy.h"
\r
69 #define ARRAY_SIZE(x) (int)( sizeof(x) / sizeof(x)[0] )
\r
72 * \defgroup gmac_group Ethernet Media Access Controller
\r
74 * See \ref gmac_quickstart.
\r
76 * Driver for the GMAC (Ethernet Media Access Controller).
\r
77 * This file contains basic functions for the GMAC, with support for all modes, settings
\r
80 * \section dependencies Dependencies
\r
81 * This driver does not depend on other modules.
\r
86 /** TX descriptor lists */
\r
88 static gmac_tx_descriptor_t gs_tx_desc[ GMAC_TX_BUFFERS ];
\r
89 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
90 /** TX callback lists */
\r
91 static gmac_dev_tx_cb_t gs_tx_callback[ GMAC_TX_BUFFERS ];
\r
93 /** RX descriptors lists */
\r
95 static gmac_rx_descriptor_t gs_rx_desc[ GMAC_RX_BUFFERS ];
\r
97 #if( ipconfigZERO_COPY_TX_DRIVER == 0 )
\r
98 /** Send Buffer. Section 3.6 of AMBA 2.0 spec states that burst should not cross the
\r
99 * 1K Boundaries. Receive buffer manager write operations are burst of 2 words => 3 lsb bits
\r
100 * of the address shall be set to 0.
\r
102 COMPILER_ALIGNED(8)
\r
103 static uint8_t gs_uc_tx_buffer[ GMAC_TX_BUFFERS * GMAC_TX_UNITSIZE ];
\r
104 #endif /* ipconfigZERO_COPY_TX_DRIVER */
\r
106 /** Receive Buffer */
\r
107 COMPILER_ALIGNED(8)
\r
108 static uint8_t gs_uc_rx_buffer[ GMAC_RX_BUFFERS * GMAC_RX_UNITSIZE ];
\r
111 * GMAC device memory management struct.
\r
113 typedef struct gmac_dev_mem {
\r
114 /* Pointer to allocated buffer for RX. The address should be 8-byte aligned
\r
115 and the size should be GMAC_RX_UNITSIZE * wRxSize. */
\r
116 uint8_t *p_rx_buffer;
\r
117 /* Pointer to allocated RX descriptor list. */
\r
118 gmac_rx_descriptor_t *p_rx_dscr;
\r
119 /* RX size, in number of registered units (RX descriptors). */
\r
120 /* Increased size from 16- to 32-bits, because it's more efficient */
\r
121 uint32_t us_rx_size;
\r
122 /* Pointer to allocated buffer for TX. The address should be 8-byte aligned
\r
123 and the size should be GMAC_TX_UNITSIZE * wTxSize. */
\r
124 uint8_t *p_tx_buffer;
\r
125 /* Pointer to allocated TX descriptor list. */
\r
126 gmac_tx_descriptor_t *p_tx_dscr;
\r
127 /* TX size, in number of registered units (TX descriptors). */
\r
128 uint32_t us_tx_size;
\r
131 /** Return count in buffer */
\r
132 #define CIRC_CNT( head, tail, size ) ( ( ( head ) - ( tail ) ) % ( size ) )
\r
135 * Return space available, from 0 to size-1.
\r
136 * Always leave one free char as a completely full buffer that has (head == tail),
\r
137 * which is the same as empty.
\r
139 #define CIRC_SPACE( head, tail, size ) CIRC_CNT( ( tail ), ( ( head ) + 1 ), ( size ) )
\r
141 /** Circular buffer is empty ? */
\r
142 #define CIRC_EMPTY( head, tail ) ( head == tail )
\r
143 /** Clear circular buffer */
\r
144 #define CIRC_CLEAR( head, tail ) do { ( head ) = 0; ( tail ) = 0; } while( 0 )
\r
146 /** Increment head or tail */
\r
147 static __inline void circ_inc32( int32_t *lHeadOrTail, uint32_t ulSize )
\r
149 ( *lHeadOrTail ) ++;
\r
150 if( ( *lHeadOrTail ) >= ( int32_t )ulSize )
\r
152 ( *lHeadOrTail ) = 0;
\r
157 * \brief Wait PHY operation to be completed.
\r
159 * \param p_gmac HW controller address.
\r
160 * \param ul_retry The retry times, 0 to wait forever until completeness.
\r
162 * Return GMAC_OK if the operation is completed successfully.
\r
164 static uint8_t gmac_wait_phy(Gmac* p_gmac, const uint32_t ul_retry)
\r
166 volatile uint32_t ul_retry_count = 0;
\r
167 const uint32_t xPHYPollDelay = pdMS_TO_TICKS( 1ul );
\r
169 while (!gmac_is_phy_idle(p_gmac)) {
\r
170 if (ul_retry == 0) {
\r
176 if (ul_retry_count >= ul_retry) {
\r
177 return GMAC_TIMEOUT;
\r
180 /* Block the task to allow other tasks to execute while the PHY
\r
181 is not connected. */
\r
182 vTaskDelay( xPHYPollDelay );
\r
188 * \brief Disable transfer, reset registers and descriptor lists.
\r
190 * \param p_dev Pointer to GMAC driver instance.
\r
193 static void gmac_reset_tx_mem(gmac_device_t* p_dev)
\r
195 Gmac *p_hw = p_dev->p_hw;
\r
196 uint8_t *p_tx_buff = p_dev->p_tx_buffer;
\r
197 gmac_tx_descriptor_t *p_td = p_dev->p_tx_dscr;
\r
200 uint32_t ul_address;
\r
203 gmac_enable_transmit(p_hw, 0);
\r
205 /* Set up the TX descriptors */
\r
206 CIRC_CLEAR(p_dev->l_tx_head, p_dev->l_tx_tail);
\r
207 for( ul_index = 0; ul_index < p_dev->ul_tx_list_size; ul_index++ )
\r
209 #if( ipconfigZERO_COPY_TX_DRIVER != 0 )
\r
211 ul_address = (uint32_t) 0u;
\r
215 ul_address = (uint32_t) (&(p_tx_buff[ul_index * GMAC_TX_UNITSIZE]));
\r
217 #endif /* ipconfigZERO_COPY_TX_DRIVER */
\r
218 p_td[ul_index].addr = ul_address;
\r
219 p_td[ul_index].status.val = GMAC_TXD_USED;
\r
221 p_td[p_dev->ul_tx_list_size - 1].status.val =
\r
222 GMAC_TXD_USED | GMAC_TXD_WRAP;
\r
224 /* Set transmit buffer queue */
\r
225 gmac_set_tx_queue(p_hw, (uint32_t) p_td);
\r
229 * \brief Disable receiver, reset registers and descriptor list.
\r
231 * \param p_drv Pointer to GMAC Driver instance.
\r
233 static void gmac_reset_rx_mem(gmac_device_t* p_dev)
\r
235 Gmac *p_hw = p_dev->p_hw;
\r
236 uint8_t *p_rx_buff = p_dev->p_rx_buffer;
\r
237 gmac_rx_descriptor_t *pRd = p_dev->p_rx_dscr;
\r
240 uint32_t ul_address;
\r
243 gmac_enable_receive(p_hw, 0);
\r
245 /* Set up the RX descriptors */
\r
246 p_dev->ul_rx_idx = 0;
\r
247 for( ul_index = 0; ul_index < p_dev->ul_rx_list_size; ul_index++ )
\r
249 ul_address = (uint32_t) (&(p_rx_buff[ul_index * GMAC_RX_UNITSIZE]));
\r
250 pRd[ul_index].addr.val = ul_address & GMAC_RXD_ADDR_MASK;
\r
251 pRd[ul_index].status.val = 0;
\r
253 pRd[p_dev->ul_rx_list_size - 1].addr.val |= GMAC_RXD_WRAP;
\r
255 /* Set receive buffer queue */
\r
256 gmac_set_rx_queue(p_hw, (uint32_t) pRd);
\r
261 * \brief Initialize the allocated buffer lists for GMAC driver to transfer data.
\r
262 * Must be invoked after gmac_dev_init() but before RX/TX starts.
\r
264 * \note If input address is not 8-byte aligned, the address is automatically
\r
265 * adjusted and the list size is reduced by one.
\r
267 * \param p_gmac Pointer to GMAC instance.
\r
268 * \param p_gmac_dev Pointer to GMAC device instance.
\r
269 * \param p_dev_mm Pointer to the GMAC memory management control block.
\r
270 * \param p_tx_cb Pointer to allocated TX callback list.
\r
272 * \return GMAC_OK or GMAC_PARAM.
\r
274 static uint8_t gmac_init_mem(Gmac* p_gmac, gmac_device_t* p_gmac_dev,
\r
275 gmac_dev_mem_t* p_dev_mm
\r
276 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
277 , gmac_dev_tx_cb_t* p_tx_cb
\r
281 if (p_dev_mm->us_rx_size <= 1 || p_dev_mm->us_tx_size <= 1
\r
282 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
289 /* Assign RX buffers */
\r
290 if (((uint32_t) p_dev_mm->p_rx_buffer & 0x7)
\r
291 || ((uint32_t) p_dev_mm->p_rx_dscr & 0x7)) {
\r
292 p_dev_mm->us_rx_size--;
\r
294 p_gmac_dev->p_rx_buffer =
\r
295 (uint8_t *) ((uint32_t) p_dev_mm->p_rx_buffer & 0xFFFFFFF8);
\r
296 p_gmac_dev->p_rx_dscr =
\r
297 (gmac_rx_descriptor_t *) ((uint32_t) p_dev_mm->p_rx_dscr
\r
299 p_gmac_dev->ul_rx_list_size = p_dev_mm->us_rx_size;
\r
301 /* Assign TX buffers */
\r
302 if (((uint32_t) p_dev_mm->p_tx_buffer & 0x7)
\r
303 || ((uint32_t) p_dev_mm->p_tx_dscr & 0x7)) {
\r
304 p_dev_mm->us_tx_size--;
\r
306 p_gmac_dev->p_tx_buffer =
\r
307 (uint8_t *) ((uint32_t) p_dev_mm->p_tx_buffer & 0xFFFFFFF8);
\r
308 p_gmac_dev->p_tx_dscr =
\r
309 (gmac_tx_descriptor_t *) ((uint32_t) p_dev_mm->p_tx_dscr
\r
311 p_gmac_dev->ul_tx_list_size = p_dev_mm->us_tx_size;
\r
312 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
313 p_gmac_dev->func_tx_cb_list = p_tx_cb;
\r
315 /* Reset TX & RX */
\r
316 gmac_reset_rx_mem(p_gmac_dev);
\r
317 gmac_reset_tx_mem(p_gmac_dev);
\r
319 /* Enable Rx and Tx, plus the statistics register */
\r
320 gmac_enable_transmit(p_gmac, true);
\r
321 gmac_enable_receive(p_gmac, true);
\r
322 gmac_enable_statistics_write(p_gmac, true);
\r
324 /* Set up the interrupts for transmission and errors */
\r
325 gmac_enable_interrupt(p_gmac,
\r
326 GMAC_IER_RXUBR | /* Enable receive used bit read interrupt. */
\r
327 GMAC_IER_TUR | /* Enable transmit underrun interrupt. */
\r
328 GMAC_IER_RLEX | /* Enable retry limit exceeded interrupt. */
\r
329 GMAC_IER_TFC | /* Enable transmit buffers exhausted in mid-frame interrupt. */
\r
330 GMAC_IER_TCOMP | /* Enable transmit complete interrupt. */
\r
331 GMAC_IER_ROVR | /* Enable receive overrun interrupt. */
\r
332 GMAC_IER_HRESP | /* Enable Hresp not OK interrupt. */
\r
333 GMAC_IER_PFNZ | /* Enable pause frame received interrupt. */
\r
334 GMAC_IER_PTZ); /* Enable pause time zero interrupt. */
\r
340 * \brief Read the PHY register.
\r
342 * \param p_gmac Pointer to the GMAC instance.
\r
343 * \param uc_phy_address PHY address.
\r
344 * \param uc_address Register address.
\r
345 * \param p_value Pointer to a 32-bit location to store read data.
\r
347 * \Return GMAC_OK if successfully, GMAC_TIMEOUT if timeout.
\r
349 uint8_t gmac_phy_read(Gmac* p_gmac, uint8_t uc_phy_address, uint8_t uc_address,
\r
352 gmac_maintain_phy(p_gmac, uc_phy_address, uc_address, 1, 0);
\r
354 if (gmac_wait_phy(p_gmac, MAC_PHY_RETRY_MAX) == GMAC_TIMEOUT) {
\r
355 return GMAC_TIMEOUT;
\r
357 *p_value = gmac_get_phy_data(p_gmac);
\r
362 * \brief Write the PHY register.
\r
364 * \param p_gmac Pointer to the GMAC instance.
\r
365 * \param uc_phy_address PHY Address.
\r
366 * \param uc_address Register Address.
\r
367 * \param ul_value Data to write, actually 16-bit data.
\r
369 * \Return GMAC_OK if successfully, GMAC_TIMEOUT if timeout.
\r
371 uint8_t gmac_phy_write(Gmac* p_gmac, uint8_t uc_phy_address,
\r
372 uint8_t uc_address, uint32_t ul_value)
\r
374 gmac_maintain_phy(p_gmac, uc_phy_address, uc_address, 0, ul_value);
\r
376 if (gmac_wait_phy(p_gmac, MAC_PHY_RETRY_MAX) == GMAC_TIMEOUT) {
\r
377 return GMAC_TIMEOUT;
\r
383 * \brief Initialize the GMAC driver.
\r
385 * \param p_gmac Pointer to the GMAC instance.
\r
386 * \param p_gmac_dev Pointer to the GMAC device instance.
\r
387 * \param p_opt GMAC configure options.
\r
389 void gmac_dev_init(Gmac* p_gmac, gmac_device_t* p_gmac_dev,
\r
390 gmac_options_t* p_opt)
\r
392 gmac_dev_mem_t gmac_dev_mm;
\r
394 /* Disable TX & RX and more */
\r
395 gmac_network_control(p_gmac, 0);
\r
396 gmac_disable_interrupt(p_gmac, ~0u);
\r
399 gmac_clear_statistics(p_gmac);
\r
401 /* Clear all status bits in the receive status register. */
\r
402 gmac_clear_rx_status(p_gmac, GMAC_RSR_RXOVR | GMAC_RSR_REC | GMAC_RSR_BNA);
\r
404 /* Clear all status bits in the transmit status register */
\r
405 gmac_clear_tx_status(p_gmac, GMAC_TSR_UBR | GMAC_TSR_COL | GMAC_TSR_RLE
\r
406 | GMAC_TSR_TFC | GMAC_TSR_TXCOMP | GMAC_TSR_UND);
\r
408 /* Clear interrupts */
\r
409 gmac_get_interrupt_status(p_gmac);
\r
410 #if !defined(ETHERNET_CONF_DATA_OFFSET)
\r
411 /* Receive Buffer Offset
\r
412 * Indicates the number of bytes by which the received data
\r
413 * is offset from the start of the receive buffer
\r
414 * which can be handy for alignment reasons */
\r
415 /* Note: FreeRTOS+TCP wants to have this offset set to 2 bytes */
\r
416 #error ETHERNET_CONF_DATA_OFFSET not defined, assuming 0
\r
418 /* Enable the copy of data into the buffers
\r
419 ignore broadcasts, and not copy FCS. */
\r
421 gmac_set_configure(p_gmac,
\r
422 ( gmac_get_configure(p_gmac) & ~GMAC_NCFGR_RXBUFO_Msk ) |
\r
423 GMAC_NCFGR_RFCS | /* Remove FCS, frame check sequence (last 4 bytes) */
\r
424 GMAC_NCFGR_PEN | /* Pause Enable */
\r
425 GMAC_NCFGR_RXBUFO( ETHERNET_CONF_DATA_OFFSET ) |
\r
429 * GMAC_DCFGR_TXCOEN: (GMAC_DCFGR) Transmitter Checksum Generation Offload Enable.
\r
430 * Note: tha SAM4E does have RX checksum offloading
\r
431 * but TX checksum offloading has NOT been implemented.
\r
434 gmac_set_dma(p_gmac,
\r
435 gmac_get_dma(p_gmac) | GMAC_DCFGR_TXCOEN );
\r
437 gmac_enable_copy_all(p_gmac, p_opt->uc_copy_all_frame);
\r
438 gmac_disable_broadcast(p_gmac, p_opt->uc_no_boardcast);
\r
440 /* Fill in GMAC device memory management */
\r
441 gmac_dev_mm.p_rx_buffer = gs_uc_rx_buffer;
\r
442 gmac_dev_mm.p_rx_dscr = gs_rx_desc;
\r
443 gmac_dev_mm.us_rx_size = GMAC_RX_BUFFERS;
\r
445 #if( ipconfigZERO_COPY_TX_DRIVER != 0 )
\r
447 gmac_dev_mm.p_tx_buffer = NULL;
\r
451 gmac_dev_mm.p_tx_buffer = gs_uc_tx_buffer;
\r
454 gmac_dev_mm.p_tx_dscr = gs_tx_desc;
\r
455 gmac_dev_mm.us_tx_size = GMAC_TX_BUFFERS;
\r
457 gmac_init_mem(p_gmac, p_gmac_dev, &gmac_dev_mm
\r
458 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
463 gmac_set_address(p_gmac, 0, p_opt->uc_mac_addr);
\r
467 * \brief Frames can be read from the GMAC in multiple sections.
\r
469 * Returns > 0 if a complete frame is available
\r
470 * It also it cleans up incomplete older frames
\r
473 static uint32_t gmac_dev_poll(gmac_device_t* p_gmac_dev)
\r
475 uint32_t ulReturn = 0;
\r
476 int32_t ulIndex = p_gmac_dev->ul_rx_idx;
\r
477 gmac_rx_descriptor_t *pxHead = &p_gmac_dev->p_rx_dscr[ulIndex];
\r
479 /* Discard any incomplete frames */
\r
480 while ((pxHead->addr.val & GMAC_RXD_OWNERSHIP) &&
\r
481 (pxHead->status.val & GMAC_RXD_SOF) == 0) {
\r
482 pxHead->addr.val &= ~(GMAC_RXD_OWNERSHIP);
\r
483 circ_inc32 (&ulIndex, p_gmac_dev->ul_rx_list_size);
\r
484 pxHead = &p_gmac_dev->p_rx_dscr[ulIndex];
\r
485 p_gmac_dev->ul_rx_idx = ulIndex;
\r
486 #if( GMAC_STATS != 0 )
\r
488 gmacStats.incompCount++;
\r
493 while ((pxHead->addr.val & GMAC_RXD_OWNERSHIP) != 0) {
\r
494 if ((pxHead->status.val & GMAC_RXD_EOF) != 0) {
\r
495 /* Here a complete frame has been seen with SOF and EOF */
\r
496 ulReturn = pxHead->status.bm.len;
\r
499 circ_inc32 (&ulIndex, p_gmac_dev->ul_rx_list_size);
\r
500 pxHead = &p_gmac_dev->p_rx_dscr[ulIndex];
\r
501 if ((pxHead->addr.val & GMAC_RXD_OWNERSHIP) == 0) {
\r
502 /* CPU is not the owner (yet) */
\r
505 if ((pxHead->status.val & GMAC_RXD_SOF) != 0) {
\r
506 /* Strange, we found a new Start Of Frame
\r
507 * discard previous segments */
\r
508 int32_t ulPrev = p_gmac_dev->ul_rx_idx;
\r
509 pxHead = &p_gmac_dev->p_rx_dscr[ulPrev];
\r
511 pxHead->addr.val &= ~(GMAC_RXD_OWNERSHIP);
\r
512 circ_inc32 (&ulPrev, p_gmac_dev->ul_rx_list_size);
\r
513 pxHead = &p_gmac_dev->p_rx_dscr[ulPrev];
\r
514 #if( GMAC_STATS != 0 )
\r
516 gmacStats.truncCount++;
\r
519 } while (ulPrev != ulIndex);
\r
520 p_gmac_dev->ul_rx_idx = ulIndex;
\r
527 * \brief Frames can be read from the GMAC in multiple sections.
\r
528 * Read ul_frame_size bytes from the GMAC receive buffers to pcTo.
\r
529 * p_rcv_size is the size of the entire frame. Generally gmac_read
\r
530 * will be repeatedly called until the sum of all the ul_frame_size equals
\r
531 * the value of p_rcv_size.
\r
533 * \param p_gmac_dev Pointer to the GMAC device instance.
\r
534 * \param p_frame Address of the frame buffer.
\r
535 * \param ul_frame_size Length of the frame.
\r
536 * \param p_rcv_size Received frame size.
\r
538 * \return GMAC_OK if receiving frame successfully, otherwise failed.
\r
540 uint32_t gmac_dev_read(gmac_device_t* p_gmac_dev, uint8_t* p_frame,
\r
541 uint32_t ul_frame_size, uint32_t* p_rcv_size)
\r
543 int32_t nextIdx; /* A copy of the Rx-index 'ul_rx_idx' */
\r
544 int32_t bytesLeft = gmac_dev_poll (p_gmac_dev);
\r
545 gmac_rx_descriptor_t *pxHead;
\r
547 if (bytesLeft == 0 )
\r
549 return GMAC_RX_NULL;
\r
552 /* gmac_dev_poll has confirmed that there is a complete frame at
\r
553 * the current position 'ul_rx_idx'
\r
555 nextIdx = p_gmac_dev->ul_rx_idx;
\r
557 /* Read +2 bytes because buffers are aligned at -2 bytes */
\r
558 bytesLeft = min( bytesLeft + 2, ( int32_t )ul_frame_size );
\r
560 /* The frame will be copied in 1 or 2 memcpy's */
\r
561 if( ( p_frame != NULL ) && ( bytesLeft != 0 ) )
\r
563 const uint8_t *source;
\r
567 source = p_gmac_dev->p_rx_buffer + nextIdx * GMAC_RX_UNITSIZE;
\r
569 toCopy = ( p_gmac_dev->ul_rx_list_size - nextIdx ) * GMAC_RX_UNITSIZE;
\r
574 memcpy (p_frame, source, toCopy);
\r
579 memcpy (p_frame + toCopy, (void*)p_gmac_dev->p_rx_buffer, left);
\r
585 pxHead = &p_gmac_dev->p_rx_dscr[nextIdx];
\r
586 pxHead->addr.val &= ~(GMAC_RXD_OWNERSHIP);
\r
587 circ_inc32 (&nextIdx, p_gmac_dev->ul_rx_list_size);
\r
588 } while ((pxHead->status.val & GMAC_RXD_EOF) == 0);
\r
590 p_gmac_dev->ul_rx_idx = nextIdx;
\r
592 *p_rcv_size = bytesLeft;
\r
598 extern void vGMACGenerateChecksum( uint8_t *apBuffer );
\r
601 * \brief Send ulLength bytes from pcFrom. This copies the buffer to one of the
\r
602 * GMAC Tx buffers, and then indicates to the GMAC that the buffer is ready.
\r
603 * If lEndOfFrame is true then the data being copied is the end of the frame
\r
604 * and the frame can be transmitted.
\r
606 * \param p_gmac_dev Pointer to the GMAC device instance.
\r
607 * \param p_buffer Pointer to the data buffer.
\r
608 * \param ul_size Length of the frame.
\r
609 * \param func_tx_cb Transmit callback function.
\r
611 * \return Length sent.
\r
613 uint32_t gmac_dev_write(gmac_device_t* p_gmac_dev, void *p_buffer,
\r
614 uint32_t ul_size, gmac_dev_tx_cb_t func_tx_cb)
\r
617 volatile gmac_tx_descriptor_t *p_tx_td;
\r
618 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
619 volatile gmac_dev_tx_cb_t *p_func_tx_cb;
\r
622 Gmac *p_hw = p_gmac_dev->p_hw;
\r
624 #if( GMAC_USES_TX_CALLBACK == 0 )
\r
625 ( void )func_tx_cb;
\r
628 /* Check parameter */
\r
629 if (ul_size > GMAC_TX_UNITSIZE) {
\r
633 /* Pointers to the current transmit descriptor */
\r
634 p_tx_td = &p_gmac_dev->p_tx_dscr[p_gmac_dev->l_tx_head];
\r
636 /* If no free TxTd, buffer can't be sent, schedule the wakeup callback */
\r
637 // if (CIRC_SPACE(p_gmac_dev->l_tx_head, p_gmac_dev->l_tx_tail,
\r
638 // p_gmac_dev->ul_tx_list_size) == 0)
\r
640 if ((p_tx_td->status.val & GMAC_TXD_USED) == 0)
\r
641 return GMAC_TX_BUSY;
\r
643 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
644 /* Pointers to the current Tx callback */
\r
645 p_func_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->l_tx_head];
\r
648 /* Set up/copy data to transmission buffer */
\r
649 if (p_buffer && ul_size) {
\r
650 /* Driver manages the ring buffer */
\r
651 /* Calculating the checksum here is faster than calculating it from the GMAC buffer
\r
652 * because withing p_buffer, it is well aligned */
\r
653 #if( ipconfigZERO_COPY_TX_DRIVER != 0 )
\r
656 p_tx_td->addr = ( uint32_t ) p_buffer;
\r
660 /* Or Memcopy... */
\r
661 memcpy((void *)p_tx_td->addr, p_buffer, ul_size);
\r
663 #endif /* ipconfigZERO_COPY_TX_DRIVER */
\r
664 vGMACGenerateChecksum( ( uint8_t * ) p_tx_td->addr );
\r
667 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
669 *p_func_tx_cb = func_tx_cb;
\r
672 /* Update transmit descriptor status */
\r
674 /* The buffer size defined is the length of ethernet frame,
\r
675 so it's always the last buffer of the frame. */
\r
676 if( p_gmac_dev->l_tx_head == ( int32_t )( p_gmac_dev->ul_tx_list_size - 1 ) )
\r
678 /* No need to 'and' with GMAC_TXD_LEN_MASK because ul_size has been checked */
\r
679 p_tx_td->status.val =
\r
680 ul_size | GMAC_TXD_LAST | GMAC_TXD_WRAP;
\r
682 p_tx_td->status.val =
\r
683 ul_size | GMAC_TXD_LAST;
\r
686 circ_inc32( &p_gmac_dev->l_tx_head, p_gmac_dev->ul_tx_list_size );
\r
688 /* Now start to transmit if it is still not done */
\r
689 gmac_start_transmission(p_hw);
\r
695 * \brief Get current load of transmit.
\r
697 * \param p_gmac_dev Pointer to the GMAC device instance.
\r
699 * \return Current load of transmit.
\r
701 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
702 /* Without defining GMAC_USES_TX_CALLBACK, l_tx_tail won't be updated */
\r
703 uint32_t gmac_dev_get_tx_load(gmac_device_t* p_gmac_dev)
\r
705 uint16_t us_head = p_gmac_dev->l_tx_head;
\r
706 uint16_t us_tail = p_gmac_dev->l_tx_tail;
\r
707 return CIRC_CNT(us_head, us_tail, p_gmac_dev->ul_tx_list_size);
\r
712 * \brief Register/Clear RX callback. Callback will be invoked after the next received
\r
715 * When gmac_dev_read() returns GMAC_RX_NULL, the application task calls
\r
716 * gmac_dev_set_rx_callback() to register func_rx_cb() callback and enters suspend state.
\r
717 * The callback is in charge to resume the task once a new frame has been
\r
718 * received. The next time gmac_dev_read() is called, it will be successful.
\r
720 * This function is usually invoked from the RX callback itself with NULL
\r
721 * callback, to unregister. Once the callback has resumed the application task,
\r
722 * there is no need to invoke the callback again.
\r
724 * \param p_gmac_dev Pointer to the GMAC device instance.
\r
725 * \param func_tx_cb Receive callback function.
\r
727 void gmac_dev_set_rx_callback(gmac_device_t* p_gmac_dev,
\r
728 gmac_dev_rx_cb_t func_rx_cb)
\r
730 Gmac *p_hw = p_gmac_dev->p_hw;
\r
732 if (func_rx_cb == NULL) {
\r
733 gmac_disable_interrupt(p_hw, GMAC_IDR_RCOMP);
\r
734 p_gmac_dev->func_rx_cb = NULL;
\r
736 p_gmac_dev->func_rx_cb = func_rx_cb;
\r
737 gmac_enable_interrupt(p_hw, GMAC_IER_RCOMP);
\r
742 * \brief Register/Clear TX wakeup callback.
\r
744 * When gmac_dev_write() returns GMAC_TX_BUSY (all transmit descriptor busy), the application
\r
745 * task calls gmac_dev_set_tx_wakeup_callback() to register func_wakeup() callback and
\r
746 * enters suspend state. The callback is in charge to resume the task once
\r
747 * several transmit descriptors have been released. The next time gmac_dev_write() will be called,
\r
748 * it shall be successful.
\r
750 * This function is usually invoked with NULL callback from the TX wakeup
\r
751 * callback itself, to unregister. Once the callback has resumed the
\r
752 * application task, there is no need to invoke the callback again.
\r
754 * \param p_gmac_dev Pointer to GMAC device instance.
\r
755 * \param func_wakeup Pointer to wakeup callback function.
\r
756 * \param uc_threshold Number of free transmit descriptor before wakeup callback invoked.
\r
758 * \return GMAC_OK, GMAC_PARAM on parameter error.
\r
760 #if( GMAC_USES_WAKEUP_CALLBACK )
\r
761 uint8_t gmac_dev_set_tx_wakeup_callback(gmac_device_t* p_gmac_dev,
\r
762 gmac_dev_wakeup_cb_t func_wakeup_cb, uint8_t uc_threshold)
\r
764 if (func_wakeup_cb == NULL) {
\r
765 p_gmac_dev->func_wakeup_cb = NULL;
\r
767 if (uc_threshold <= p_gmac_dev->ul_tx_list_size) {
\r
768 p_gmac_dev->func_wakeup_cb = func_wakeup_cb;
\r
769 p_gmac_dev->uc_wakeup_threshold = uc_threshold;
\r
777 #endif /* GMAC_USES_WAKEUP_CALLBACK */
\r
780 * \brief Reset TX & RX queue & statistics.
\r
782 * \param p_gmac_dev Pointer to GMAC device instance.
\r
784 void gmac_dev_reset(gmac_device_t* p_gmac_dev)
\r
786 Gmac *p_hw = p_gmac_dev->p_hw;
\r
788 gmac_reset_rx_mem(p_gmac_dev);
\r
789 gmac_reset_tx_mem(p_gmac_dev);
\r
790 gmac_network_control(p_hw, GMAC_NCR_TXEN | GMAC_NCR_RXEN
\r
791 | GMAC_NCR_WESTAT | GMAC_NCR_CLRSTAT);
\r
794 void gmac_dev_halt(Gmac* p_gmac);
\r
796 void gmac_dev_halt(Gmac* p_gmac)
\r
798 gmac_network_control(p_gmac, GMAC_NCR_WESTAT | GMAC_NCR_CLRSTAT);
\r
799 gmac_disable_interrupt(p_gmac, ~0u);
\r
804 * \brief GMAC Interrupt handler.
\r
806 * \param p_gmac_dev Pointer to GMAC device instance.
\r
809 #if( GMAC_STATS != 0 )
\r
810 extern int logPrintf( const char *pcFormat, ... );
\r
812 void gmac_show_irq_counts ()
\r
815 for (index = 0; index < ARRAY_SIZE(intPairs); index++) {
\r
816 if (gmacStats.intStatus[intPairs[index].index]) {
\r
817 logPrintf("%s : %6u\n", intPairs[index].name, gmacStats.intStatus[intPairs[index].index]);
\r
823 void gmac_handler(gmac_device_t* p_gmac_dev)
\r
825 Gmac *p_hw = p_gmac_dev->p_hw;
\r
827 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
828 gmac_tx_descriptor_t *p_tx_td;
\r
829 gmac_dev_tx_cb_t *p_tx_cb = NULL;
\r
830 uint32_t ul_tx_status_flag;
\r
832 #if( GMAC_STATS != 0 )
\r
836 /* volatile */ uint32_t ul_isr;
\r
837 /* volatile */ uint32_t ul_rsr;
\r
838 /* volatile */ uint32_t ul_tsr;
\r
840 ul_isr = gmac_get_interrupt_status(p_hw);
\r
841 ul_rsr = gmac_get_rx_status(p_hw);
\r
842 ul_tsr = gmac_get_tx_status(p_hw);
\r
844 /* Why clear bits that are ignored anyway ? */
\r
845 /* ul_isr &= ~(gmac_get_interrupt_mask(p_hw) | 0xF8030300); */
\r
846 #if( GMAC_STATS != 0 )
\r
848 for (index = 0; index < ARRAY_SIZE(intPairs); index++) {
\r
849 if (ul_isr & intPairs[index].mask)
\r
850 gmacStats.intStatus[intPairs[index].index]++;
\r
853 #endif /* GMAC_STATS != 0 */
\r
856 if ((ul_isr & GMAC_ISR_RCOMP) || (ul_rsr & (GMAC_RSR_REC|GMAC_RSR_RXOVR|GMAC_RSR_BNA))) {
\r
858 gmac_clear_rx_status(p_hw, ul_rsr);
\r
860 if (ul_isr & GMAC_ISR_RCOMP)
\r
861 ul_rsr |= GMAC_RSR_REC;
\r
862 /* Invoke callbacks which can be useful to wake op a task */
\r
863 if (p_gmac_dev->func_rx_cb) {
\r
864 p_gmac_dev->func_rx_cb(ul_rsr);
\r
869 if ((ul_isr & GMAC_ISR_TCOMP) || (ul_tsr & (GMAC_TSR_TXCOMP|GMAC_TSR_COL|GMAC_TSR_RLE|GMAC_TSR_UND))) {
\r
871 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
872 ul_tx_status_flag = GMAC_TSR_TXCOMP;
\r
874 /* A frame transmitted */
\r
877 if (ul_tsr & GMAC_TSR_RLE) {
\r
878 /* Status RLE & Number of discarded buffers */
\r
879 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
880 ul_tx_status_flag = GMAC_TSR_RLE | CIRC_CNT(p_gmac_dev->l_tx_head,
\r
881 p_gmac_dev->l_tx_tail, p_gmac_dev->ul_tx_list_size);
\r
882 p_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->l_tx_tail];
\r
884 gmac_reset_tx_mem(p_gmac_dev);
\r
885 gmac_enable_transmit(p_hw, 1);
\r
888 gmac_clear_tx_status(p_hw, ul_tsr);
\r
890 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
891 if (!CIRC_EMPTY(p_gmac_dev->l_tx_head, p_gmac_dev->l_tx_tail)) {
\r
892 /* Check the buffers */
\r
894 p_tx_td = &p_gmac_dev->p_tx_dscr[p_gmac_dev->l_tx_tail];
\r
895 p_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->l_tx_tail];
\r
896 /* Any error? Exit if buffer has not been sent yet */
\r
897 if ((p_tx_td->status.val & GMAC_TXD_USED) == 0) {
\r
901 /* Notify upper layer that a packet has been sent */
\r
903 (*p_tx_cb) (ul_tx_status_flag, (void*)p_tx_td->addr);
\r
904 #if( ipconfigZERO_COPY_TX_DRIVER != 0 )
\r
906 p_tx_td->addr = 0ul;
\r
908 #endif /* ipconfigZERO_COPY_TX_DRIVER */
\r
911 circ_inc32(&p_gmac_dev->l_tx_tail, p_gmac_dev->ul_tx_list_size);
\r
912 } while (CIRC_CNT(p_gmac_dev->l_tx_head, p_gmac_dev->l_tx_tail,
\r
913 p_gmac_dev->ul_tx_list_size));
\r
916 if (ul_tsr & GMAC_TSR_RLE) {
\r
917 /* Notify upper layer RLE */
\r
919 (*p_tx_cb) (ul_tx_status_flag, NULL);
\r
922 #endif /* GMAC_USES_TX_CALLBACK */
\r
924 #if( GMAC_USES_WAKEUP_CALLBACK )
\r
925 /* If a wakeup has been scheduled, notify upper layer that it can
\r
926 send other packets, and the sending will be successful. */
\r
927 if ((CIRC_SPACE(p_gmac_dev->l_tx_head, p_gmac_dev->l_tx_tail,
\r
928 p_gmac_dev->ul_tx_list_size) >= p_gmac_dev->uc_wakeup_threshold)
\r
929 && p_gmac_dev->func_wakeup_cb) {
\r
930 p_gmac_dev->func_wakeup_cb();
\r