4 * \brief GMAC (Ethernet MAC) driver for SAM.
6 * Copyright (c) 2013 Atmel Corporation. All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions are met:
15 * 1. Redistributions of source code must retain the above copyright notice,
16 * this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright notice,
19 * this list of conditions and the following disclaimer in the documentation
20 * and/or other materials provided with the distribution.
22 * 3. The name of Atmel may not be used to endorse or promote products derived
23 * from this software without specific prior written permission.
25 * 4. This software may only be redistributed and used in connection with an
26 * Atmel microcontroller product.
28 * THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR IMPLIED
29 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
31 * EXPRESSLY AND SPECIFICALLY DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR
32 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
37 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
44 /* Standard includes. */
50 /* FreeRTOS includes. */
54 #include "FreeRTOSIPConfig.h"
57 #include "instance/gmac.h"
58 #include "ethernet_phy.h"
69 #define ARRAY_SIZE(x) (int)( sizeof(x) / sizeof(x)[0] )
72 * \defgroup gmac_group Ethernet Media Access Controller
74 * See \ref gmac_quickstart.
76 * Driver for the GMAC (Ethernet Media Access Controller).
77 * This file contains basic functions for the GMAC, with support for all modes, settings
80 * \section dependencies Dependencies
81 * This driver does not depend on other modules.
86 /** TX descriptor lists */
88 static gmac_tx_descriptor_t gs_tx_desc[ GMAC_TX_BUFFERS ];
89 #if( GMAC_USES_TX_CALLBACK != 0 )
90 /** TX callback lists */
91 static gmac_dev_tx_cb_t gs_tx_callback[ GMAC_TX_BUFFERS ];
93 /** RX descriptors lists */
95 static gmac_rx_descriptor_t gs_rx_desc[ GMAC_RX_BUFFERS ];
97 #if( ipconfigZERO_COPY_TX_DRIVER == 0 )
98 /** Send Buffer. Section 3.6 of AMBA 2.0 spec states that burst should not cross the
99 * 1K Boundaries. Receive buffer manager write operations are burst of 2 words => 3 lsb bits
100 * of the address shall be set to 0.
103 static uint8_t gs_uc_tx_buffer[ GMAC_TX_BUFFERS * GMAC_TX_UNITSIZE ];
104 #endif /* ipconfigZERO_COPY_TX_DRIVER */
106 /** Receive Buffer */
108 static uint8_t gs_uc_rx_buffer[ GMAC_RX_BUFFERS * GMAC_RX_UNITSIZE ];
111 * GMAC device memory management struct.
113 typedef struct gmac_dev_mem {
114 /* Pointer to allocated buffer for RX. The address should be 8-byte aligned
115 and the size should be GMAC_RX_UNITSIZE * wRxSize. */
116 uint8_t *p_rx_buffer;
117 /* Pointer to allocated RX descriptor list. */
118 gmac_rx_descriptor_t *p_rx_dscr;
119 /* RX size, in number of registered units (RX descriptors). */
120 /* Increased size from 16- to 32-bits, because it's more efficient */
122 /* Pointer to allocated buffer for TX. The address should be 8-byte aligned
123 and the size should be GMAC_TX_UNITSIZE * wTxSize. */
124 uint8_t *p_tx_buffer;
125 /* Pointer to allocated TX descriptor list. */
126 gmac_tx_descriptor_t *p_tx_dscr;
127 /* TX size, in number of registered units (TX descriptors). */
131 /** Return count in buffer */
132 #define CIRC_CNT( head, tail, size ) ( ( ( head ) - ( tail ) ) % ( size ) )
135 * Return space available, from 0 to size-1.
136 * Always leave one free char as a completely full buffer that has (head == tail),
137 * which is the same as empty.
139 #define CIRC_SPACE( head, tail, size ) CIRC_CNT( ( tail ), ( ( head ) + 1 ), ( size ) )
141 /** Circular buffer is empty ? */
142 #define CIRC_EMPTY( head, tail ) ( head == tail )
143 /** Clear circular buffer */
144 #define CIRC_CLEAR( head, tail ) do { ( head ) = 0; ( tail ) = 0; } while( 0 )
146 /** Increment head or tail */
147 static __inline void circ_inc32( int32_t *lHeadOrTail, uint32_t ulSize )
150 if( ( *lHeadOrTail ) >= ( int32_t )ulSize )
152 ( *lHeadOrTail ) = 0;
157 * \brief Wait PHY operation to be completed.
159 * \param p_gmac HW controller address.
160 * \param ul_retry The retry times, 0 to wait forever until completeness.
162 * Return GMAC_OK if the operation is completed successfully.
164 static uint8_t gmac_wait_phy(Gmac* p_gmac, const uint32_t ul_retry)
166 volatile uint32_t ul_retry_count = 0;
167 const uint32_t xPHYPollDelay = pdMS_TO_TICKS( 1ul );
169 while (!gmac_is_phy_idle(p_gmac)) {
176 if (ul_retry_count >= ul_retry) {
180 /* Block the task to allow other tasks to execute while the PHY
182 vTaskDelay( xPHYPollDelay );
188 * \brief Disable transfer, reset registers and descriptor lists.
190 * \param p_dev Pointer to GMAC driver instance.
193 static void gmac_reset_tx_mem(gmac_device_t* p_dev)
195 Gmac *p_hw = p_dev->p_hw;
196 uint8_t *p_tx_buff = p_dev->p_tx_buffer;
197 gmac_tx_descriptor_t *p_td = p_dev->p_tx_dscr;
203 gmac_enable_transmit(p_hw, 0);
205 /* Set up the TX descriptors */
206 CIRC_CLEAR(p_dev->l_tx_head, p_dev->l_tx_tail);
207 for( ul_index = 0; ul_index < p_dev->ul_tx_list_size; ul_index++ )
209 #if( ipconfigZERO_COPY_TX_DRIVER != 0 )
211 ul_address = (uint32_t) 0u;
215 ul_address = (uint32_t) (&(p_tx_buff[ul_index * GMAC_TX_UNITSIZE]));
217 #endif /* ipconfigZERO_COPY_TX_DRIVER */
218 p_td[ul_index].addr = ul_address;
219 p_td[ul_index].status.val = GMAC_TXD_USED;
221 p_td[p_dev->ul_tx_list_size - 1].status.val =
222 GMAC_TXD_USED | GMAC_TXD_WRAP;
224 /* Set transmit buffer queue */
225 gmac_set_tx_queue(p_hw, (uint32_t) p_td);
229 * \brief Disable receiver, reset registers and descriptor list.
231 * \param p_drv Pointer to GMAC Driver instance.
233 static void gmac_reset_rx_mem(gmac_device_t* p_dev)
235 Gmac *p_hw = p_dev->p_hw;
236 uint8_t *p_rx_buff = p_dev->p_rx_buffer;
237 gmac_rx_descriptor_t *pRd = p_dev->p_rx_dscr;
243 gmac_enable_receive(p_hw, 0);
245 /* Set up the RX descriptors */
246 p_dev->ul_rx_idx = 0;
247 for( ul_index = 0; ul_index < p_dev->ul_rx_list_size; ul_index++ )
249 ul_address = (uint32_t) (&(p_rx_buff[ul_index * GMAC_RX_UNITSIZE]));
250 pRd[ul_index].addr.val = ul_address & GMAC_RXD_ADDR_MASK;
251 pRd[ul_index].status.val = 0;
253 pRd[p_dev->ul_rx_list_size - 1].addr.val |= GMAC_RXD_WRAP;
255 /* Set receive buffer queue */
256 gmac_set_rx_queue(p_hw, (uint32_t) pRd);
261 * \brief Initialize the allocated buffer lists for GMAC driver to transfer data.
262 * Must be invoked after gmac_dev_init() but before RX/TX starts.
264 * \note If input address is not 8-byte aligned, the address is automatically
265 * adjusted and the list size is reduced by one.
267 * \param p_gmac Pointer to GMAC instance.
268 * \param p_gmac_dev Pointer to GMAC device instance.
269 * \param p_dev_mm Pointer to the GMAC memory management control block.
270 * \param p_tx_cb Pointer to allocated TX callback list.
272 * \return GMAC_OK or GMAC_PARAM.
274 static uint8_t gmac_init_mem(Gmac* p_gmac, gmac_device_t* p_gmac_dev,
275 gmac_dev_mem_t* p_dev_mm
276 #if( GMAC_USES_TX_CALLBACK != 0 )
277 , gmac_dev_tx_cb_t* p_tx_cb
281 if (p_dev_mm->us_rx_size <= 1 || p_dev_mm->us_tx_size <= 1
282 #if( GMAC_USES_TX_CALLBACK != 0 )
289 /* Assign RX buffers */
290 if (((uint32_t) p_dev_mm->p_rx_buffer & 0x7)
291 || ((uint32_t) p_dev_mm->p_rx_dscr & 0x7)) {
292 p_dev_mm->us_rx_size--;
294 p_gmac_dev->p_rx_buffer =
295 (uint8_t *) ((uint32_t) p_dev_mm->p_rx_buffer & 0xFFFFFFF8);
296 p_gmac_dev->p_rx_dscr =
297 (gmac_rx_descriptor_t *) ((uint32_t) p_dev_mm->p_rx_dscr
299 p_gmac_dev->ul_rx_list_size = p_dev_mm->us_rx_size;
301 /* Assign TX buffers */
302 if (((uint32_t) p_dev_mm->p_tx_buffer & 0x7)
303 || ((uint32_t) p_dev_mm->p_tx_dscr & 0x7)) {
304 p_dev_mm->us_tx_size--;
306 p_gmac_dev->p_tx_buffer =
307 (uint8_t *) ((uint32_t) p_dev_mm->p_tx_buffer & 0xFFFFFFF8);
308 p_gmac_dev->p_tx_dscr =
309 (gmac_tx_descriptor_t *) ((uint32_t) p_dev_mm->p_tx_dscr
311 p_gmac_dev->ul_tx_list_size = p_dev_mm->us_tx_size;
312 #if( GMAC_USES_TX_CALLBACK != 0 )
313 p_gmac_dev->func_tx_cb_list = p_tx_cb;
316 gmac_reset_rx_mem(p_gmac_dev);
317 gmac_reset_tx_mem(p_gmac_dev);
319 /* Enable Rx and Tx, plus the statistics register */
320 gmac_enable_transmit(p_gmac, true);
321 gmac_enable_receive(p_gmac, true);
322 gmac_enable_statistics_write(p_gmac, true);
324 /* Set up the interrupts for transmission and errors */
325 gmac_enable_interrupt(p_gmac,
326 GMAC_IER_RXUBR | /* Enable receive used bit read interrupt. */
327 GMAC_IER_TUR | /* Enable transmit underrun interrupt. */
328 GMAC_IER_RLEX | /* Enable retry limit exceeded interrupt. */
329 GMAC_IER_TFC | /* Enable transmit buffers exhausted in mid-frame interrupt. */
330 GMAC_IER_TCOMP | /* Enable transmit complete interrupt. */
331 GMAC_IER_ROVR | /* Enable receive overrun interrupt. */
332 GMAC_IER_HRESP | /* Enable Hresp not OK interrupt. */
333 GMAC_IER_PFNZ | /* Enable pause frame received interrupt. */
334 GMAC_IER_PTZ); /* Enable pause time zero interrupt. */
340 * \brief Read the PHY register.
342 * \param p_gmac Pointer to the GMAC instance.
343 * \param uc_phy_address PHY address.
344 * \param uc_address Register address.
345 * \param p_value Pointer to a 32-bit location to store read data.
347 * \Return GMAC_OK if successfully, GMAC_TIMEOUT if timeout.
349 uint8_t gmac_phy_read(Gmac* p_gmac, uint8_t uc_phy_address, uint8_t uc_address,
352 gmac_maintain_phy(p_gmac, uc_phy_address, uc_address, 1, 0);
354 if (gmac_wait_phy(p_gmac, MAC_PHY_RETRY_MAX) == GMAC_TIMEOUT) {
357 *p_value = gmac_get_phy_data(p_gmac);
362 * \brief Write the PHY register.
364 * \param p_gmac Pointer to the GMAC instance.
365 * \param uc_phy_address PHY Address.
366 * \param uc_address Register Address.
367 * \param ul_value Data to write, actually 16-bit data.
369 * \Return GMAC_OK if successfully, GMAC_TIMEOUT if timeout.
371 uint8_t gmac_phy_write(Gmac* p_gmac, uint8_t uc_phy_address,
372 uint8_t uc_address, uint32_t ul_value)
374 gmac_maintain_phy(p_gmac, uc_phy_address, uc_address, 0, ul_value);
376 if (gmac_wait_phy(p_gmac, MAC_PHY_RETRY_MAX) == GMAC_TIMEOUT) {
383 * \brief Initialize the GMAC driver.
385 * \param p_gmac Pointer to the GMAC instance.
386 * \param p_gmac_dev Pointer to the GMAC device instance.
387 * \param p_opt GMAC configure options.
389 void gmac_dev_init(Gmac* p_gmac, gmac_device_t* p_gmac_dev,
390 gmac_options_t* p_opt)
392 gmac_dev_mem_t gmac_dev_mm;
394 /* Disable TX & RX and more */
395 gmac_network_control(p_gmac, 0);
396 gmac_disable_interrupt(p_gmac, ~0u);
399 gmac_clear_statistics(p_gmac);
401 /* Clear all status bits in the receive status register. */
402 gmac_clear_rx_status(p_gmac, GMAC_RSR_RXOVR | GMAC_RSR_REC | GMAC_RSR_BNA);
404 /* Clear all status bits in the transmit status register */
405 gmac_clear_tx_status(p_gmac, GMAC_TSR_UBR | GMAC_TSR_COL | GMAC_TSR_RLE
406 | GMAC_TSR_TFC | GMAC_TSR_TXCOMP | GMAC_TSR_UND);
408 /* Clear interrupts */
409 gmac_get_interrupt_status(p_gmac);
410 #if !defined(ETHERNET_CONF_DATA_OFFSET)
411 /* Receive Buffer Offset
412 * Indicates the number of bytes by which the received data
413 * is offset from the start of the receive buffer
414 * which can be handy for alignment reasons */
415 /* Note: FreeRTOS+TCP wants to have this offset set to 2 bytes */
416 #error ETHERNET_CONF_DATA_OFFSET not defined, assuming 0
418 /* Enable the copy of data into the buffers
419 ignore broadcasts, and not copy FCS. */
421 gmac_set_configure(p_gmac,
422 ( gmac_get_configure(p_gmac) & ~GMAC_NCFGR_RXBUFO_Msk ) |
423 GMAC_NCFGR_RFCS | /* Remove FCS, frame check sequence (last 4 bytes) */
424 GMAC_NCFGR_PEN | /* Pause Enable */
425 GMAC_NCFGR_RXBUFO( ETHERNET_CONF_DATA_OFFSET ) |
429 * GMAC_DCFGR_TXCOEN: (GMAC_DCFGR) Transmitter Checksum Generation Offload Enable.
430 * Note: tha SAM4E does have RX checksum offloading
431 * but TX checksum offloading has NOT been implemented.
435 gmac_get_dma(p_gmac) | GMAC_DCFGR_TXCOEN );
437 gmac_enable_copy_all(p_gmac, p_opt->uc_copy_all_frame);
438 gmac_disable_broadcast(p_gmac, p_opt->uc_no_boardcast);
440 /* Fill in GMAC device memory management */
441 gmac_dev_mm.p_rx_buffer = gs_uc_rx_buffer;
442 gmac_dev_mm.p_rx_dscr = gs_rx_desc;
443 gmac_dev_mm.us_rx_size = GMAC_RX_BUFFERS;
445 #if( ipconfigZERO_COPY_TX_DRIVER != 0 )
447 gmac_dev_mm.p_tx_buffer = NULL;
451 gmac_dev_mm.p_tx_buffer = gs_uc_tx_buffer;
454 gmac_dev_mm.p_tx_dscr = gs_tx_desc;
455 gmac_dev_mm.us_tx_size = GMAC_TX_BUFFERS;
457 gmac_init_mem(p_gmac, p_gmac_dev, &gmac_dev_mm
458 #if( GMAC_USES_TX_CALLBACK != 0 )
463 gmac_set_address(p_gmac, 0, p_opt->uc_mac_addr);
467 * \brief Frames can be read from the GMAC in multiple sections.
469 * Returns > 0 if a complete frame is available
470 * It also it cleans up incomplete older frames
473 static uint32_t gmac_dev_poll(gmac_device_t* p_gmac_dev)
475 uint32_t ulReturn = 0;
476 int32_t ulIndex = p_gmac_dev->ul_rx_idx;
477 gmac_rx_descriptor_t *pxHead = &p_gmac_dev->p_rx_dscr[ulIndex];
479 /* Discard any incomplete frames */
480 while ((pxHead->addr.val & GMAC_RXD_OWNERSHIP) &&
481 (pxHead->status.val & GMAC_RXD_SOF) == 0) {
482 pxHead->addr.val &= ~(GMAC_RXD_OWNERSHIP);
483 circ_inc32 (&ulIndex, p_gmac_dev->ul_rx_list_size);
484 pxHead = &p_gmac_dev->p_rx_dscr[ulIndex];
485 p_gmac_dev->ul_rx_idx = ulIndex;
486 #if( GMAC_STATS != 0 )
488 gmacStats.incompCount++;
493 while ((pxHead->addr.val & GMAC_RXD_OWNERSHIP) != 0) {
494 if ((pxHead->status.val & GMAC_RXD_EOF) != 0) {
495 /* Here a complete frame has been seen with SOF and EOF */
496 ulReturn = pxHead->status.bm.len;
499 circ_inc32 (&ulIndex, p_gmac_dev->ul_rx_list_size);
500 pxHead = &p_gmac_dev->p_rx_dscr[ulIndex];
501 if ((pxHead->addr.val & GMAC_RXD_OWNERSHIP) == 0) {
502 /* CPU is not the owner (yet) */
505 if ((pxHead->status.val & GMAC_RXD_SOF) != 0) {
506 /* Strange, we found a new Start Of Frame
507 * discard previous segments */
508 int32_t ulPrev = p_gmac_dev->ul_rx_idx;
509 pxHead = &p_gmac_dev->p_rx_dscr[ulPrev];
511 pxHead->addr.val &= ~(GMAC_RXD_OWNERSHIP);
512 circ_inc32 (&ulPrev, p_gmac_dev->ul_rx_list_size);
513 pxHead = &p_gmac_dev->p_rx_dscr[ulPrev];
514 #if( GMAC_STATS != 0 )
516 gmacStats.truncCount++;
519 } while (ulPrev != ulIndex);
520 p_gmac_dev->ul_rx_idx = ulIndex;
527 * \brief Frames can be read from the GMAC in multiple sections.
528 * Read ul_frame_size bytes from the GMAC receive buffers to pcTo.
529 * p_rcv_size is the size of the entire frame. Generally gmac_read
530 * will be repeatedly called until the sum of all the ul_frame_size equals
531 * the value of p_rcv_size.
533 * \param p_gmac_dev Pointer to the GMAC device instance.
534 * \param p_frame Address of the frame buffer.
535 * \param ul_frame_size Length of the frame.
536 * \param p_rcv_size Received frame size.
538 * \return GMAC_OK if receiving frame successfully, otherwise failed.
540 uint32_t gmac_dev_read(gmac_device_t* p_gmac_dev, uint8_t* p_frame,
541 uint32_t ul_frame_size, uint32_t* p_rcv_size)
543 int32_t nextIdx; /* A copy of the Rx-index 'ul_rx_idx' */
544 int32_t bytesLeft = gmac_dev_poll (p_gmac_dev);
545 gmac_rx_descriptor_t *pxHead;
552 /* gmac_dev_poll has confirmed that there is a complete frame at
553 * the current position 'ul_rx_idx'
555 nextIdx = p_gmac_dev->ul_rx_idx;
557 /* Read +2 bytes because buffers are aligned at -2 bytes */
558 bytesLeft = min( bytesLeft + 2, ( int32_t )ul_frame_size );
560 /* The frame will be copied in 1 or 2 memcpy's */
561 if( ( p_frame != NULL ) && ( bytesLeft != 0 ) )
563 const uint8_t *source;
567 source = p_gmac_dev->p_rx_buffer + nextIdx * GMAC_RX_UNITSIZE;
569 toCopy = ( p_gmac_dev->ul_rx_list_size - nextIdx ) * GMAC_RX_UNITSIZE;
574 memcpy (p_frame, source, toCopy);
579 memcpy (p_frame + toCopy, (void*)p_gmac_dev->p_rx_buffer, left);
585 pxHead = &p_gmac_dev->p_rx_dscr[nextIdx];
586 pxHead->addr.val &= ~(GMAC_RXD_OWNERSHIP);
587 circ_inc32 (&nextIdx, p_gmac_dev->ul_rx_list_size);
588 } while ((pxHead->status.val & GMAC_RXD_EOF) == 0);
590 p_gmac_dev->ul_rx_idx = nextIdx;
592 *p_rcv_size = bytesLeft;
598 extern void vGMACGenerateChecksum( uint8_t *apBuffer );
601 * \brief Send ulLength bytes from pcFrom. This copies the buffer to one of the
602 * GMAC Tx buffers, and then indicates to the GMAC that the buffer is ready.
603 * If lEndOfFrame is true then the data being copied is the end of the frame
604 * and the frame can be transmitted.
606 * \param p_gmac_dev Pointer to the GMAC device instance.
607 * \param p_buffer Pointer to the data buffer.
608 * \param ul_size Length of the frame.
609 * \param func_tx_cb Transmit callback function.
611 * \return Length sent.
613 uint32_t gmac_dev_write(gmac_device_t* p_gmac_dev, void *p_buffer,
614 uint32_t ul_size, gmac_dev_tx_cb_t func_tx_cb)
617 volatile gmac_tx_descriptor_t *p_tx_td;
618 #if( GMAC_USES_TX_CALLBACK != 0 )
619 volatile gmac_dev_tx_cb_t *p_func_tx_cb;
622 Gmac *p_hw = p_gmac_dev->p_hw;
624 #if( GMAC_USES_TX_CALLBACK == 0 )
628 /* Check parameter */
629 if (ul_size > GMAC_TX_UNITSIZE) {
633 /* Pointers to the current transmit descriptor */
634 p_tx_td = &p_gmac_dev->p_tx_dscr[p_gmac_dev->l_tx_head];
636 /* If no free TxTd, buffer can't be sent, schedule the wakeup callback */
637 // if (CIRC_SPACE(p_gmac_dev->l_tx_head, p_gmac_dev->l_tx_tail,
638 // p_gmac_dev->ul_tx_list_size) == 0)
640 if ((p_tx_td->status.val & GMAC_TXD_USED) == 0)
643 #if( GMAC_USES_TX_CALLBACK != 0 )
644 /* Pointers to the current Tx callback */
645 p_func_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->l_tx_head];
648 /* Set up/copy data to transmission buffer */
649 if (p_buffer && ul_size) {
650 /* Driver manages the ring buffer */
651 /* Calculating the checksum here is faster than calculating it from the GMAC buffer
652 * because withing p_buffer, it is well aligned */
653 #if( ipconfigZERO_COPY_TX_DRIVER != 0 )
656 p_tx_td->addr = ( uint32_t ) p_buffer;
661 memcpy((void *)p_tx_td->addr, p_buffer, ul_size);
663 #endif /* ipconfigZERO_COPY_TX_DRIVER */
664 vGMACGenerateChecksum( ( uint8_t * ) p_tx_td->addr );
667 #if( GMAC_USES_TX_CALLBACK != 0 )
669 *p_func_tx_cb = func_tx_cb;
672 /* Update transmit descriptor status */
674 /* The buffer size defined is the length of ethernet frame,
675 so it's always the last buffer of the frame. */
676 if( p_gmac_dev->l_tx_head == ( int32_t )( p_gmac_dev->ul_tx_list_size - 1 ) )
678 /* No need to 'and' with GMAC_TXD_LEN_MASK because ul_size has been checked */
679 p_tx_td->status.val =
680 ul_size | GMAC_TXD_LAST | GMAC_TXD_WRAP;
682 p_tx_td->status.val =
683 ul_size | GMAC_TXD_LAST;
686 circ_inc32( &p_gmac_dev->l_tx_head, p_gmac_dev->ul_tx_list_size );
688 /* Now start to transmit if it is still not done */
689 gmac_start_transmission(p_hw);
695 * \brief Get current load of transmit.
697 * \param p_gmac_dev Pointer to the GMAC device instance.
699 * \return Current load of transmit.
701 #if( GMAC_USES_TX_CALLBACK != 0 )
702 /* Without defining GMAC_USES_TX_CALLBACK, l_tx_tail won't be updated */
703 uint32_t gmac_dev_get_tx_load(gmac_device_t* p_gmac_dev)
705 uint16_t us_head = p_gmac_dev->l_tx_head;
706 uint16_t us_tail = p_gmac_dev->l_tx_tail;
707 return CIRC_CNT(us_head, us_tail, p_gmac_dev->ul_tx_list_size);
712 * \brief Register/Clear RX callback. Callback will be invoked after the next received
715 * When gmac_dev_read() returns GMAC_RX_NULL, the application task calls
716 * gmac_dev_set_rx_callback() to register func_rx_cb() callback and enters suspend state.
717 * The callback is in charge to resume the task once a new frame has been
718 * received. The next time gmac_dev_read() is called, it will be successful.
720 * This function is usually invoked from the RX callback itself with NULL
721 * callback, to unregister. Once the callback has resumed the application task,
722 * there is no need to invoke the callback again.
724 * \param p_gmac_dev Pointer to the GMAC device instance.
725 * \param func_tx_cb Receive callback function.
727 void gmac_dev_set_rx_callback(gmac_device_t* p_gmac_dev,
728 gmac_dev_rx_cb_t func_rx_cb)
730 Gmac *p_hw = p_gmac_dev->p_hw;
732 if (func_rx_cb == NULL) {
733 gmac_disable_interrupt(p_hw, GMAC_IDR_RCOMP);
734 p_gmac_dev->func_rx_cb = NULL;
736 p_gmac_dev->func_rx_cb = func_rx_cb;
737 gmac_enable_interrupt(p_hw, GMAC_IER_RCOMP);
742 * \brief Register/Clear TX wakeup callback.
744 * When gmac_dev_write() returns GMAC_TX_BUSY (all transmit descriptor busy), the application
745 * task calls gmac_dev_set_tx_wakeup_callback() to register func_wakeup() callback and
746 * enters suspend state. The callback is in charge to resume the task once
747 * several transmit descriptors have been released. The next time gmac_dev_write() will be called,
748 * it shall be successful.
750 * This function is usually invoked with NULL callback from the TX wakeup
751 * callback itself, to unregister. Once the callback has resumed the
752 * application task, there is no need to invoke the callback again.
754 * \param p_gmac_dev Pointer to GMAC device instance.
755 * \param func_wakeup Pointer to wakeup callback function.
756 * \param uc_threshold Number of free transmit descriptor before wakeup callback invoked.
758 * \return GMAC_OK, GMAC_PARAM on parameter error.
760 #if( GMAC_USES_WAKEUP_CALLBACK )
761 uint8_t gmac_dev_set_tx_wakeup_callback(gmac_device_t* p_gmac_dev,
762 gmac_dev_wakeup_cb_t func_wakeup_cb, uint8_t uc_threshold)
764 if (func_wakeup_cb == NULL) {
765 p_gmac_dev->func_wakeup_cb = NULL;
767 if (uc_threshold <= p_gmac_dev->ul_tx_list_size) {
768 p_gmac_dev->func_wakeup_cb = func_wakeup_cb;
769 p_gmac_dev->uc_wakeup_threshold = uc_threshold;
777 #endif /* GMAC_USES_WAKEUP_CALLBACK */
780 * \brief Reset TX & RX queue & statistics.
782 * \param p_gmac_dev Pointer to GMAC device instance.
784 void gmac_dev_reset(gmac_device_t* p_gmac_dev)
786 Gmac *p_hw = p_gmac_dev->p_hw;
788 gmac_reset_rx_mem(p_gmac_dev);
789 gmac_reset_tx_mem(p_gmac_dev);
790 gmac_network_control(p_hw, GMAC_NCR_TXEN | GMAC_NCR_RXEN
791 | GMAC_NCR_WESTAT | GMAC_NCR_CLRSTAT);
794 void gmac_dev_halt(Gmac* p_gmac);
796 void gmac_dev_halt(Gmac* p_gmac)
798 gmac_network_control(p_gmac, GMAC_NCR_WESTAT | GMAC_NCR_CLRSTAT);
799 gmac_disable_interrupt(p_gmac, ~0u);
804 * \brief GMAC Interrupt handler.
806 * \param p_gmac_dev Pointer to GMAC device instance.
809 #if( GMAC_STATS != 0 )
810 extern int logPrintf( const char *pcFormat, ... );
812 void gmac_show_irq_counts ()
815 for (index = 0; index < ARRAY_SIZE(intPairs); index++) {
816 if (gmacStats.intStatus[intPairs[index].index]) {
817 logPrintf("%s : %6u\n", intPairs[index].name, gmacStats.intStatus[intPairs[index].index]);
823 void gmac_handler(gmac_device_t* p_gmac_dev)
825 Gmac *p_hw = p_gmac_dev->p_hw;
827 #if( GMAC_USES_TX_CALLBACK != 0 )
828 gmac_tx_descriptor_t *p_tx_td;
829 gmac_dev_tx_cb_t *p_tx_cb = NULL;
830 uint32_t ul_tx_status_flag;
832 #if( GMAC_STATS != 0 )
836 /* volatile */ uint32_t ul_isr;
837 /* volatile */ uint32_t ul_rsr;
838 /* volatile */ uint32_t ul_tsr;
840 ul_isr = gmac_get_interrupt_status(p_hw);
841 ul_rsr = gmac_get_rx_status(p_hw);
842 ul_tsr = gmac_get_tx_status(p_hw);
844 /* Why clear bits that are ignored anyway ? */
845 /* ul_isr &= ~(gmac_get_interrupt_mask(p_hw) | 0xF8030300); */
846 #if( GMAC_STATS != 0 )
848 for (index = 0; index < ARRAY_SIZE(intPairs); index++) {
849 if (ul_isr & intPairs[index].mask)
850 gmacStats.intStatus[intPairs[index].index]++;
853 #endif /* GMAC_STATS != 0 */
856 if ((ul_isr & GMAC_ISR_RCOMP) || (ul_rsr & (GMAC_RSR_REC|GMAC_RSR_RXOVR|GMAC_RSR_BNA))) {
858 gmac_clear_rx_status(p_hw, ul_rsr);
860 if (ul_isr & GMAC_ISR_RCOMP)
861 ul_rsr |= GMAC_RSR_REC;
862 /* Invoke callbacks which can be useful to wake op a task */
863 if (p_gmac_dev->func_rx_cb) {
864 p_gmac_dev->func_rx_cb(ul_rsr);
869 if ((ul_isr & GMAC_ISR_TCOMP) || (ul_tsr & (GMAC_TSR_TXCOMP|GMAC_TSR_COL|GMAC_TSR_RLE|GMAC_TSR_UND))) {
871 #if( GMAC_USES_TX_CALLBACK != 0 )
872 ul_tx_status_flag = GMAC_TSR_TXCOMP;
874 /* A frame transmitted */
877 if (ul_tsr & GMAC_TSR_RLE) {
878 /* Status RLE & Number of discarded buffers */
879 #if( GMAC_USES_TX_CALLBACK != 0 )
880 ul_tx_status_flag = GMAC_TSR_RLE | CIRC_CNT(p_gmac_dev->l_tx_head,
881 p_gmac_dev->l_tx_tail, p_gmac_dev->ul_tx_list_size);
882 p_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->l_tx_tail];
884 gmac_reset_tx_mem(p_gmac_dev);
885 gmac_enable_transmit(p_hw, 1);
888 gmac_clear_tx_status(p_hw, ul_tsr);
890 #if( GMAC_USES_TX_CALLBACK != 0 )
891 if (!CIRC_EMPTY(p_gmac_dev->l_tx_head, p_gmac_dev->l_tx_tail)) {
892 /* Check the buffers */
894 p_tx_td = &p_gmac_dev->p_tx_dscr[p_gmac_dev->l_tx_tail];
895 p_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->l_tx_tail];
896 /* Any error? Exit if buffer has not been sent yet */
897 if ((p_tx_td->status.val & GMAC_TXD_USED) == 0) {
901 /* Notify upper layer that a packet has been sent */
903 (*p_tx_cb) (ul_tx_status_flag, (void*)p_tx_td->addr);
904 #if( ipconfigZERO_COPY_TX_DRIVER != 0 )
908 #endif /* ipconfigZERO_COPY_TX_DRIVER */
911 circ_inc32(&p_gmac_dev->l_tx_tail, p_gmac_dev->ul_tx_list_size);
912 } while (CIRC_CNT(p_gmac_dev->l_tx_head, p_gmac_dev->l_tx_tail,
913 p_gmac_dev->ul_tx_list_size));
916 if (ul_tsr & GMAC_TSR_RLE) {
917 /* Notify upper layer RLE */
919 (*p_tx_cb) (ul_tx_status_flag, NULL);
922 #endif /* GMAC_USES_TX_CALLBACK */
924 #if( GMAC_USES_WAKEUP_CALLBACK )
925 /* If a wakeup has been scheduled, notify upper layer that it can
926 send other packets, and the sending will be successful. */
927 if ((CIRC_SPACE(p_gmac_dev->l_tx_head, p_gmac_dev->l_tx_tail,
928 p_gmac_dev->ul_tx_list_size) >= p_gmac_dev->uc_wakeup_threshold)
929 && p_gmac_dev->func_wakeup_cb) {
930 p_gmac_dev->func_wakeup_cb();