4 * \brief GMAC (Ethernet MAC) driver for SAM.
\r
6 * Copyright (c) 2013 Atmel Corporation. All rights reserved.
\r
12 * Redistribution and use in source and binary forms, with or without
\r
13 * modification, are permitted provided that the following conditions are met:
\r
15 * 1. Redistributions of source code must retain the above copyright notice,
\r
16 * this list of conditions and the following disclaimer.
\r
18 * 2. Redistributions in binary form must reproduce the above copyright notice,
\r
19 * this list of conditions and the following disclaimer in the documentation
\r
20 * and/or other materials provided with the distribution.
\r
22 * 3. The name of Atmel may not be used to endorse or promote products derived
\r
23 * from this software without specific prior written permission.
\r
25 * 4. This software may only be redistributed and used in connection with an
\r
26 * Atmel microcontroller product.
\r
28 * THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR IMPLIED
\r
29 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
\r
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
\r
31 * EXPRESSLY AND SPECIFICALLY DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR
\r
32 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
\r
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
\r
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
\r
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
\r
36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
\r
37 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
\r
38 * POSSIBILITY OF SUCH DAMAGE.
\r
44 /* Standard includes. */
\r
50 /* FreeRTOS includes. */
\r
51 #include "FreeRTOS.h"
\r
54 #include "FreeRTOSIPConfig.h"
\r
56 #include "compiler.h"
\r
57 #include "instance/gmac.h"
\r
58 #include "ethernet_phy.h"
\r
69 #define ARRAY_SIZE(x) (int)( sizeof(x) / sizeof(x)[0] )
\r
72 * \defgroup gmac_group Ethernet Media Access Controller
\r
74 * See \ref gmac_quickstart.
\r
76 * Driver for the GMAC (Ethernet Media Access Controller).
\r
77 * This file contains basic functions for the GMAC, with support for all modes, settings
\r
80 * \section dependencies Dependencies
\r
81 * This driver does not depend on other modules.
\r
86 /** TX descriptor lists */
\r
88 static gmac_tx_descriptor_t gs_tx_desc[ GMAC_TX_BUFFERS ];
\r
89 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
90 /** TX callback lists */
\r
91 static gmac_dev_tx_cb_t gs_tx_callback[ GMAC_TX_BUFFERS ];
\r
93 /** RX descriptors lists */
\r
95 static gmac_rx_descriptor_t gs_rx_desc[ GMAC_RX_BUFFERS ];
\r
97 #if( ipconfigZERO_COPY_TX_DRIVER == 0 )
\r
98 /** Send Buffer. Section 3.6 of AMBA 2.0 spec states that burst should not cross the
\r
99 * 1K Boundaries. Receive buffer manager write operations are burst of 2 words => 3 lsb bits
\r
100 * of the address shall be set to 0.
\r
102 COMPILER_ALIGNED(8)
\r
103 static uint8_t gs_uc_tx_buffer[ GMAC_TX_BUFFERS * GMAC_TX_UNITSIZE ];
\r
104 #endif /* ipconfigZERO_COPY_TX_DRIVER */
\r
106 /** Receive Buffer */
\r
107 COMPILER_ALIGNED(8)
\r
108 static uint8_t gs_uc_rx_buffer[ GMAC_RX_BUFFERS * GMAC_RX_UNITSIZE ];
\r
111 * GMAC device memory management struct.
\r
113 typedef struct gmac_dev_mem {
\r
114 /* Pointer to allocated buffer for RX. The address should be 8-byte aligned
\r
115 and the size should be GMAC_RX_UNITSIZE * wRxSize. */
\r
116 uint8_t *p_rx_buffer;
\r
117 /* Pointer to allocated RX descriptor list. */
\r
118 gmac_rx_descriptor_t *p_rx_dscr;
\r
119 /* RX size, in number of registered units (RX descriptors). */
\r
120 /* Increased size from 16- to 32-bits, because it's more efficient */
\r
121 uint32_t us_rx_size;
\r
122 /* Pointer to allocated buffer for TX. The address should be 8-byte aligned
\r
123 and the size should be GMAC_TX_UNITSIZE * wTxSize. */
\r
124 uint8_t *p_tx_buffer;
\r
125 /* Pointer to allocated TX descriptor list. */
\r
126 gmac_tx_descriptor_t *p_tx_dscr;
\r
127 /* TX size, in number of registered units (TX descriptors). */
\r
128 uint32_t us_tx_size;
\r
131 /** Return count in buffer */
\r
132 #define CIRC_CNT( head, tail, size ) ( ( ( head ) - ( tail ) ) % ( size ) )
\r
135 * Return space available, from 0 to size-1.
\r
136 * Always leave one free char as a completely full buffer that has (head == tail),
\r
137 * which is the same as empty.
\r
139 #define CIRC_SPACE( head, tail, size ) CIRC_CNT( ( tail ), ( ( head ) + 1 ), ( size ) )
\r
141 /** Circular buffer is empty ? */
\r
142 #define CIRC_EMPTY( head, tail ) ( head == tail )
\r
143 /** Clear circular buffer */
\r
144 #define CIRC_CLEAR( head, tail ) do { ( head ) = 0; ( tail ) = 0; } while( 0 )
\r
146 /** Increment head or tail */
\r
147 static __inline void circ_inc32( int32_t *lHeadOrTail, uint32_t ulSize )
\r
149 ( *lHeadOrTail ) ++;
\r
150 if( ( *lHeadOrTail ) >= ( int32_t )ulSize )
\r
152 ( *lHeadOrTail ) = 0;
\r
157 * \brief Wait PHY operation to be completed.
\r
159 * \param p_gmac HW controller address.
\r
160 * \param ul_retry The retry times, 0 to wait forever until completeness.
\r
162 * Return GMAC_OK if the operation is completed successfully.
\r
164 static uint8_t gmac_wait_phy(Gmac* p_gmac, const uint32_t ul_retry)
\r
166 volatile uint32_t ul_retry_count = 0;
\r
167 const uint32_t xPHYPollDelay = pdMS_TO_TICKS( 1ul );
\r
169 while (!gmac_is_phy_idle(p_gmac)) {
\r
170 if (ul_retry == 0) {
\r
176 if (ul_retry_count >= ul_retry) {
\r
177 return GMAC_TIMEOUT;
\r
180 /* Block the task to allow other tasks to execute while the PHY
\r
181 is not connected. */
\r
182 vTaskDelay( xPHYPollDelay );
\r
188 * \brief Disable transfer, reset registers and descriptor lists.
\r
190 * \param p_dev Pointer to GMAC driver instance.
\r
193 static void gmac_reset_tx_mem(gmac_device_t* p_dev)
\r
195 Gmac *p_hw = p_dev->p_hw;
\r
196 uint8_t *p_tx_buff = p_dev->p_tx_buffer;
\r
197 gmac_tx_descriptor_t *p_td = p_dev->p_tx_dscr;
\r
200 uint32_t ul_address;
\r
203 gmac_enable_transmit(p_hw, 0);
\r
205 /* Set up the TX descriptors */
\r
206 CIRC_CLEAR(p_dev->l_tx_head, p_dev->l_tx_tail);
\r
207 for( ul_index = 0; ul_index < p_dev->ul_tx_list_size; ul_index++ )
\r
209 #if( ipconfigZERO_COPY_TX_DRIVER != 0 )
\r
211 ul_address = (uint32_t) 0u;
\r
215 ul_address = (uint32_t) (&(p_tx_buff[ul_index * GMAC_TX_UNITSIZE]));
\r
217 #endif /* ipconfigZERO_COPY_TX_DRIVER */
\r
218 p_td[ul_index].addr = ul_address;
\r
219 p_td[ul_index].status.val = GMAC_TXD_USED;
\r
221 p_td[p_dev->ul_tx_list_size - 1].status.val =
\r
222 GMAC_TXD_USED | GMAC_TXD_WRAP;
\r
224 /* Set transmit buffer queue */
\r
225 gmac_set_tx_queue(p_hw, (uint32_t) p_td);
\r
229 * \brief Disable receiver, reset registers and descriptor list.
\r
231 * \param p_drv Pointer to GMAC Driver instance.
\r
233 static void gmac_reset_rx_mem(gmac_device_t* p_dev)
\r
235 Gmac *p_hw = p_dev->p_hw;
\r
236 uint8_t *p_rx_buff = p_dev->p_rx_buffer;
\r
237 gmac_rx_descriptor_t *pRd = p_dev->p_rx_dscr;
\r
240 uint32_t ul_address;
\r
243 gmac_enable_receive(p_hw, 0);
\r
245 /* Set up the RX descriptors */
\r
246 p_dev->ul_rx_idx = 0;
\r
247 for( ul_index = 0; ul_index < p_dev->ul_rx_list_size; ul_index++ )
\r
249 ul_address = (uint32_t) (&(p_rx_buff[ul_index * GMAC_RX_UNITSIZE]));
\r
250 pRd[ul_index].addr.val = ul_address & GMAC_RXD_ADDR_MASK;
\r
251 pRd[ul_index].status.val = 0;
\r
253 pRd[p_dev->ul_rx_list_size - 1].addr.val |= GMAC_RXD_WRAP;
\r
255 /* Set receive buffer queue */
\r
256 gmac_set_rx_queue(p_hw, (uint32_t) pRd);
\r
261 * \brief Initialize the allocated buffer lists for GMAC driver to transfer data.
\r
262 * Must be invoked after gmac_dev_init() but before RX/TX starts.
\r
264 * \note If input address is not 8-byte aligned, the address is automatically
\r
265 * adjusted and the list size is reduced by one.
\r
267 * \param p_gmac Pointer to GMAC instance.
\r
268 * \param p_gmac_dev Pointer to GMAC device instance.
\r
269 * \param p_dev_mm Pointer to the GMAC memory management control block.
\r
270 * \param p_tx_cb Pointer to allocated TX callback list.
\r
272 * \return GMAC_OK or GMAC_PARAM.
\r
274 static uint8_t gmac_init_mem(Gmac* p_gmac, gmac_device_t* p_gmac_dev,
\r
275 gmac_dev_mem_t* p_dev_mm
\r
276 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
277 , gmac_dev_tx_cb_t* p_tx_cb
\r
281 if (p_dev_mm->us_rx_size <= 1 || p_dev_mm->us_tx_size <= 1
\r
282 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
289 /* Assign RX buffers */
\r
290 if (((uint32_t) p_dev_mm->p_rx_buffer & 0x7)
\r
291 || ((uint32_t) p_dev_mm->p_rx_dscr & 0x7)) {
\r
292 p_dev_mm->us_rx_size--;
\r
294 p_gmac_dev->p_rx_buffer =
\r
295 (uint8_t *) ((uint32_t) p_dev_mm->p_rx_buffer & 0xFFFFFFF8);
\r
296 p_gmac_dev->p_rx_dscr =
\r
297 (gmac_rx_descriptor_t *) ((uint32_t) p_dev_mm->p_rx_dscr
\r
299 p_gmac_dev->ul_rx_list_size = p_dev_mm->us_rx_size;
\r
301 /* Assign TX buffers */
\r
302 if (((uint32_t) p_dev_mm->p_tx_buffer & 0x7)
\r
303 || ((uint32_t) p_dev_mm->p_tx_dscr & 0x7)) {
\r
304 p_dev_mm->us_tx_size--;
\r
306 p_gmac_dev->p_tx_buffer =
\r
307 (uint8_t *) ((uint32_t) p_dev_mm->p_tx_buffer & 0xFFFFFFF8);
\r
308 p_gmac_dev->p_tx_dscr =
\r
309 (gmac_tx_descriptor_t *) ((uint32_t) p_dev_mm->p_tx_dscr
\r
311 p_gmac_dev->ul_tx_list_size = p_dev_mm->us_tx_size;
\r
312 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
313 p_gmac_dev->func_tx_cb_list = p_tx_cb;
\r
315 /* Reset TX & RX */
\r
316 gmac_reset_rx_mem(p_gmac_dev);
\r
317 gmac_reset_tx_mem(p_gmac_dev);
\r
319 /* Enable Rx and Tx, plus the statistics register */
\r
320 gmac_enable_transmit(p_gmac, true);
\r
321 gmac_enable_receive(p_gmac, true);
\r
322 gmac_enable_statistics_write(p_gmac, true);
\r
324 /* Set up the interrupts for transmission and errors */
\r
325 gmac_enable_interrupt(p_gmac,
\r
326 GMAC_IER_RXUBR | /* Enable receive used bit read interrupt. */
\r
327 GMAC_IER_TUR | /* Enable transmit underrun interrupt. */
\r
328 GMAC_IER_RLEX | /* Enable retry limit exceeded interrupt. */
\r
329 GMAC_IER_TFC | /* Enable transmit buffers exhausted in mid-frame interrupt. */
\r
330 GMAC_IER_TCOMP | /* Enable transmit complete interrupt. */
\r
331 GMAC_IER_ROVR | /* Enable receive overrun interrupt. */
\r
332 GMAC_IER_HRESP | /* Enable Hresp not OK interrupt. */
\r
333 GMAC_IER_PFNZ | /* Enable pause frame received interrupt. */
\r
334 GMAC_IER_PTZ); /* Enable pause time zero interrupt. */
\r
340 * \brief Read the PHY register.
\r
342 * \param p_gmac Pointer to the GMAC instance.
\r
343 * \param uc_phy_address PHY address.
\r
344 * \param uc_address Register address.
\r
345 * \param p_value Pointer to a 32-bit location to store read data.
\r
347 * \Return GMAC_OK if successfully, GMAC_TIMEOUT if timeout.
\r
349 uint8_t gmac_phy_read(Gmac* p_gmac, uint8_t uc_phy_address, uint8_t uc_address,
\r
352 gmac_maintain_phy(p_gmac, uc_phy_address, uc_address, 1, 0);
\r
354 if (gmac_wait_phy(p_gmac, MAC_PHY_RETRY_MAX) == GMAC_TIMEOUT) {
\r
355 return GMAC_TIMEOUT;
\r
357 *p_value = gmac_get_phy_data(p_gmac);
\r
362 * \brief Write the PHY register.
\r
364 * \param p_gmac Pointer to the GMAC instance.
\r
365 * \param uc_phy_address PHY Address.
\r
366 * \param uc_address Register Address.
\r
367 * \param ul_value Data to write, actually 16-bit data.
\r
369 * \Return GMAC_OK if successfully, GMAC_TIMEOUT if timeout.
\r
371 uint8_t gmac_phy_write(Gmac* p_gmac, uint8_t uc_phy_address,
\r
372 uint8_t uc_address, uint32_t ul_value)
\r
374 gmac_maintain_phy(p_gmac, uc_phy_address, uc_address, 0, ul_value);
\r
376 if (gmac_wait_phy(p_gmac, MAC_PHY_RETRY_MAX) == GMAC_TIMEOUT) {
\r
377 return GMAC_TIMEOUT;
\r
383 * \brief Initialize the GMAC driver.
\r
385 * \param p_gmac Pointer to the GMAC instance.
\r
386 * \param p_gmac_dev Pointer to the GMAC device instance.
\r
387 * \param p_opt GMAC configure options.
\r
389 void gmac_dev_init(Gmac* p_gmac, gmac_device_t* p_gmac_dev,
\r
390 gmac_options_t* p_opt)
\r
392 gmac_dev_mem_t gmac_dev_mm;
\r
394 /* Disable TX & RX and more */
\r
395 gmac_network_control(p_gmac, 0);
\r
396 gmac_disable_interrupt(p_gmac, ~0u);
\r
399 gmac_clear_statistics(p_gmac);
\r
401 /* Clear all status bits in the receive status register. */
\r
402 gmac_clear_rx_status(p_gmac, GMAC_RSR_RXOVR | GMAC_RSR_REC | GMAC_RSR_BNA);
\r
404 /* Clear all status bits in the transmit status register */
\r
405 gmac_clear_tx_status(p_gmac, GMAC_TSR_UBR | GMAC_TSR_COL | GMAC_TSR_RLE
\r
406 | GMAC_TSR_TFC | GMAC_TSR_TXCOMP | GMAC_TSR_UND);
\r
408 /* Clear interrupts */
\r
409 gmac_get_interrupt_status(p_gmac);
\r
410 #if !defined(ETHERNET_CONF_DATA_OFFSET)
\r
411 /* Receive Buffer Offset
\r
412 * Indicates the number of bytes by which the received data
\r
413 * is offset from the start of the receive buffer
\r
414 * which can be handy for alignment reasons */
\r
415 /* Note: FreeRTOS+TCP wants to have this offset set to 2 bytes */
\r
416 #error ETHERNET_CONF_DATA_OFFSET not defined, assuming 0
\r
418 /* Enable the copy of data into the buffers
\r
419 ignore broadcasts, and not copy FCS. */
\r
421 gmac_set_configure(p_gmac,
\r
422 ( gmac_get_configure(p_gmac) & ~GMAC_NCFGR_RXBUFO_Msk ) |
\r
423 GMAC_NCFGR_RFCS | /* Remove FCS, frame check sequence (last 4 bytes) */
\r
424 GMAC_NCFGR_PEN | /* Pause Enable */
\r
425 GMAC_NCFGR_RXBUFO( ETHERNET_CONF_DATA_OFFSET ) |
\r
429 * GMAC_DCFGR_TXCOEN: (GMAC_DCFGR) Transmitter Checksum Generation Offload Enable.
\r
430 * Note: tha SAM4E does have RX checksum offloading
\r
431 * but TX checksum offloading has NOT been implemented.
\r
432 * http://community.atmel.com/forum/sam4e-gmac-transmit-checksum-offload-enablesolved
\r
435 gmac_set_dma(p_gmac,
\r
436 gmac_get_dma(p_gmac) | GMAC_DCFGR_TXCOEN );
\r
438 gmac_enable_copy_all(p_gmac, p_opt->uc_copy_all_frame);
\r
439 gmac_disable_broadcast(p_gmac, p_opt->uc_no_boardcast);
\r
441 /* Fill in GMAC device memory management */
\r
442 gmac_dev_mm.p_rx_buffer = gs_uc_rx_buffer;
\r
443 gmac_dev_mm.p_rx_dscr = gs_rx_desc;
\r
444 gmac_dev_mm.us_rx_size = GMAC_RX_BUFFERS;
\r
446 #if( ipconfigZERO_COPY_TX_DRIVER != 0 )
\r
448 gmac_dev_mm.p_tx_buffer = NULL;
\r
452 gmac_dev_mm.p_tx_buffer = gs_uc_tx_buffer;
\r
455 gmac_dev_mm.p_tx_dscr = gs_tx_desc;
\r
456 gmac_dev_mm.us_tx_size = GMAC_TX_BUFFERS;
\r
458 gmac_init_mem(p_gmac, p_gmac_dev, &gmac_dev_mm
\r
459 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
464 gmac_set_address(p_gmac, 0, p_opt->uc_mac_addr);
\r
468 * \brief Frames can be read from the GMAC in multiple sections.
\r
470 * Returns > 0 if a complete frame is available
\r
471 * It also it cleans up incomplete older frames
\r
474 static uint32_t gmac_dev_poll(gmac_device_t* p_gmac_dev)
\r
476 uint32_t ulReturn = 0;
\r
477 int32_t ulIndex = p_gmac_dev->ul_rx_idx;
\r
478 gmac_rx_descriptor_t *pxHead = &p_gmac_dev->p_rx_dscr[ulIndex];
\r
480 /* Discard any incomplete frames */
\r
481 while ((pxHead->addr.val & GMAC_RXD_OWNERSHIP) &&
\r
482 (pxHead->status.val & GMAC_RXD_SOF) == 0) {
\r
483 pxHead->addr.val &= ~(GMAC_RXD_OWNERSHIP);
\r
484 circ_inc32 (&ulIndex, p_gmac_dev->ul_rx_list_size);
\r
485 pxHead = &p_gmac_dev->p_rx_dscr[ulIndex];
\r
486 p_gmac_dev->ul_rx_idx = ulIndex;
\r
487 #if( GMAC_STATS != 0 )
\r
489 gmacStats.incompCount++;
\r
494 while ((pxHead->addr.val & GMAC_RXD_OWNERSHIP) != 0) {
\r
495 if ((pxHead->status.val & GMAC_RXD_EOF) != 0) {
\r
496 /* Here a complete frame has been seen with SOF and EOF */
\r
497 ulReturn = pxHead->status.bm.len;
\r
500 circ_inc32 (&ulIndex, p_gmac_dev->ul_rx_list_size);
\r
501 pxHead = &p_gmac_dev->p_rx_dscr[ulIndex];
\r
502 if ((pxHead->addr.val & GMAC_RXD_OWNERSHIP) == 0) {
\r
503 /* CPU is not the owner (yet) */
\r
506 if ((pxHead->status.val & GMAC_RXD_SOF) != 0) {
\r
507 /* Strange, we found a new Start Of Frame
\r
508 * discard previous segments */
\r
509 int32_t ulPrev = p_gmac_dev->ul_rx_idx;
\r
510 pxHead = &p_gmac_dev->p_rx_dscr[ulPrev];
\r
512 pxHead->addr.val &= ~(GMAC_RXD_OWNERSHIP);
\r
513 circ_inc32 (&ulPrev, p_gmac_dev->ul_rx_list_size);
\r
514 pxHead = &p_gmac_dev->p_rx_dscr[ulPrev];
\r
515 #if( GMAC_STATS != 0 )
\r
517 gmacStats.truncCount++;
\r
520 } while (ulPrev != ulIndex);
\r
521 p_gmac_dev->ul_rx_idx = ulIndex;
\r
528 * \brief Frames can be read from the GMAC in multiple sections.
\r
529 * Read ul_frame_size bytes from the GMAC receive buffers to pcTo.
\r
530 * p_rcv_size is the size of the entire frame. Generally gmac_read
\r
531 * will be repeatedly called until the sum of all the ul_frame_size equals
\r
532 * the value of p_rcv_size.
\r
534 * \param p_gmac_dev Pointer to the GMAC device instance.
\r
535 * \param p_frame Address of the frame buffer.
\r
536 * \param ul_frame_size Length of the frame.
\r
537 * \param p_rcv_size Received frame size.
\r
539 * \return GMAC_OK if receiving frame successfully, otherwise failed.
\r
541 uint32_t gmac_dev_read(gmac_device_t* p_gmac_dev, uint8_t* p_frame,
\r
542 uint32_t ul_frame_size, uint32_t* p_rcv_size)
\r
544 int32_t nextIdx; /* A copy of the Rx-index 'ul_rx_idx' */
\r
545 int32_t bytesLeft = gmac_dev_poll (p_gmac_dev);
\r
546 gmac_rx_descriptor_t *pxHead;
\r
548 if (bytesLeft == 0 )
\r
550 return GMAC_RX_NULL;
\r
553 /* gmac_dev_poll has confirmed that there is a complete frame at
\r
554 * the current position 'ul_rx_idx'
\r
556 nextIdx = p_gmac_dev->ul_rx_idx;
\r
558 /* Read +2 bytes because buffers are aligned at -2 bytes */
\r
559 bytesLeft = min( bytesLeft + 2, ( int32_t )ul_frame_size );
\r
561 /* The frame will be copied in 1 or 2 memcpy's */
\r
562 if( ( p_frame != NULL ) && ( bytesLeft != 0 ) )
\r
564 const uint8_t *source;
\r
568 source = p_gmac_dev->p_rx_buffer + nextIdx * GMAC_RX_UNITSIZE;
\r
570 toCopy = ( p_gmac_dev->ul_rx_list_size - nextIdx ) * GMAC_RX_UNITSIZE;
\r
575 memcpy (p_frame, source, toCopy);
\r
580 memcpy (p_frame + toCopy, (void*)p_gmac_dev->p_rx_buffer, left);
\r
586 pxHead = &p_gmac_dev->p_rx_dscr[nextIdx];
\r
587 pxHead->addr.val &= ~(GMAC_RXD_OWNERSHIP);
\r
588 circ_inc32 (&nextIdx, p_gmac_dev->ul_rx_list_size);
\r
589 } while ((pxHead->status.val & GMAC_RXD_EOF) == 0);
\r
591 p_gmac_dev->ul_rx_idx = nextIdx;
\r
593 *p_rcv_size = bytesLeft;
\r
599 extern void vGMACGenerateChecksum( uint8_t *apBuffer );
\r
602 * \brief Send ulLength bytes from pcFrom. This copies the buffer to one of the
\r
603 * GMAC Tx buffers, and then indicates to the GMAC that the buffer is ready.
\r
604 * If lEndOfFrame is true then the data being copied is the end of the frame
\r
605 * and the frame can be transmitted.
\r
607 * \param p_gmac_dev Pointer to the GMAC device instance.
\r
608 * \param p_buffer Pointer to the data buffer.
\r
609 * \param ul_size Length of the frame.
\r
610 * \param func_tx_cb Transmit callback function.
\r
612 * \return Length sent.
\r
614 uint32_t gmac_dev_write(gmac_device_t* p_gmac_dev, void *p_buffer,
\r
615 uint32_t ul_size, gmac_dev_tx_cb_t func_tx_cb)
\r
618 volatile gmac_tx_descriptor_t *p_tx_td;
\r
619 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
620 volatile gmac_dev_tx_cb_t *p_func_tx_cb;
\r
623 Gmac *p_hw = p_gmac_dev->p_hw;
\r
625 #if( GMAC_USES_TX_CALLBACK == 0 )
\r
626 ( void )func_tx_cb;
\r
629 /* Check parameter */
\r
630 if (ul_size > GMAC_TX_UNITSIZE) {
\r
634 /* Pointers to the current transmit descriptor */
\r
635 p_tx_td = &p_gmac_dev->p_tx_dscr[p_gmac_dev->l_tx_head];
\r
637 /* If no free TxTd, buffer can't be sent, schedule the wakeup callback */
\r
638 // if (CIRC_SPACE(p_gmac_dev->l_tx_head, p_gmac_dev->l_tx_tail,
\r
639 // p_gmac_dev->ul_tx_list_size) == 0)
\r
641 if ((p_tx_td->status.val & GMAC_TXD_USED) == 0)
\r
642 return GMAC_TX_BUSY;
\r
644 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
645 /* Pointers to the current Tx callback */
\r
646 p_func_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->l_tx_head];
\r
649 /* Set up/copy data to transmission buffer */
\r
650 if (p_buffer && ul_size) {
\r
651 /* Driver manages the ring buffer */
\r
652 /* Calculating the checksum here is faster than calculating it from the GMAC buffer
\r
653 * because withing p_buffer, it is well aligned */
\r
654 #if( ipconfigZERO_COPY_TX_DRIVER != 0 )
\r
657 p_tx_td->addr = ( uint32_t ) p_buffer;
\r
661 /* Or memcopy... */
\r
662 memcpy((void *)p_tx_td->addr, p_buffer, ul_size);
\r
664 #endif /* ipconfigZERO_COPY_TX_DRIVER */
\r
665 vGMACGenerateChecksum( ( uint8_t * ) p_tx_td->addr );
\r
668 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
670 *p_func_tx_cb = func_tx_cb;
\r
673 /* Update transmit descriptor status */
\r
675 /* The buffer size defined is the length of ethernet frame,
\r
676 so it's always the last buffer of the frame. */
\r
677 if( p_gmac_dev->l_tx_head == ( int32_t )( p_gmac_dev->ul_tx_list_size - 1 ) )
\r
679 /* No need to 'and' with GMAC_TXD_LEN_MASK because ul_size has been checked */
\r
680 p_tx_td->status.val =
\r
681 ul_size | GMAC_TXD_LAST | GMAC_TXD_WRAP;
\r
683 p_tx_td->status.val =
\r
684 ul_size | GMAC_TXD_LAST;
\r
687 circ_inc32( &p_gmac_dev->l_tx_head, p_gmac_dev->ul_tx_list_size );
\r
689 /* Now start to transmit if it is still not done */
\r
690 gmac_start_transmission(p_hw);
\r
696 * \brief Get current load of transmit.
\r
698 * \param p_gmac_dev Pointer to the GMAC device instance.
\r
700 * \return Current load of transmit.
\r
702 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
703 /* Without defining GMAC_USES_TX_CALLBACK, l_tx_tail won't be updated */
\r
704 uint32_t gmac_dev_get_tx_load(gmac_device_t* p_gmac_dev)
\r
706 uint16_t us_head = p_gmac_dev->l_tx_head;
\r
707 uint16_t us_tail = p_gmac_dev->l_tx_tail;
\r
708 return CIRC_CNT(us_head, us_tail, p_gmac_dev->ul_tx_list_size);
\r
713 * \brief Register/Clear RX callback. Callback will be invoked after the next received
\r
716 * When gmac_dev_read() returns GMAC_RX_NULL, the application task calls
\r
717 * gmac_dev_set_rx_callback() to register func_rx_cb() callback and enters suspend state.
\r
718 * The callback is in charge to resume the task once a new frame has been
\r
719 * received. The next time gmac_dev_read() is called, it will be successful.
\r
721 * This function is usually invoked from the RX callback itself with NULL
\r
722 * callback, to unregister. Once the callback has resumed the application task,
\r
723 * there is no need to invoke the callback again.
\r
725 * \param p_gmac_dev Pointer to the GMAC device instance.
\r
726 * \param func_tx_cb Receive callback function.
\r
728 void gmac_dev_set_rx_callback(gmac_device_t* p_gmac_dev,
\r
729 gmac_dev_rx_cb_t func_rx_cb)
\r
731 Gmac *p_hw = p_gmac_dev->p_hw;
\r
733 if (func_rx_cb == NULL) {
\r
734 gmac_disable_interrupt(p_hw, GMAC_IDR_RCOMP);
\r
735 p_gmac_dev->func_rx_cb = NULL;
\r
737 p_gmac_dev->func_rx_cb = func_rx_cb;
\r
738 gmac_enable_interrupt(p_hw, GMAC_IER_RCOMP);
\r
743 * \brief Register/Clear TX wakeup callback.
\r
745 * When gmac_dev_write() returns GMAC_TX_BUSY (all transmit descriptor busy), the application
\r
746 * task calls gmac_dev_set_tx_wakeup_callback() to register func_wakeup() callback and
\r
747 * enters suspend state. The callback is in charge to resume the task once
\r
748 * several transmit descriptors have been released. The next time gmac_dev_write() will be called,
\r
749 * it shall be successful.
\r
751 * This function is usually invoked with NULL callback from the TX wakeup
\r
752 * callback itself, to unregister. Once the callback has resumed the
\r
753 * application task, there is no need to invoke the callback again.
\r
755 * \param p_gmac_dev Pointer to GMAC device instance.
\r
756 * \param func_wakeup Pointer to wakeup callback function.
\r
757 * \param uc_threshold Number of free transmit descriptor before wakeup callback invoked.
\r
759 * \return GMAC_OK, GMAC_PARAM on parameter error.
\r
761 #if( GMAC_USES_WAKEUP_CALLBACK )
\r
762 uint8_t gmac_dev_set_tx_wakeup_callback(gmac_device_t* p_gmac_dev,
\r
763 gmac_dev_wakeup_cb_t func_wakeup_cb, uint8_t uc_threshold)
\r
765 if (func_wakeup_cb == NULL) {
\r
766 p_gmac_dev->func_wakeup_cb = NULL;
\r
768 if (uc_threshold <= p_gmac_dev->ul_tx_list_size) {
\r
769 p_gmac_dev->func_wakeup_cb = func_wakeup_cb;
\r
770 p_gmac_dev->uc_wakeup_threshold = uc_threshold;
\r
778 #endif /* GMAC_USES_WAKEUP_CALLBACK */
\r
781 * \brief Reset TX & RX queue & statistics.
\r
783 * \param p_gmac_dev Pointer to GMAC device instance.
\r
785 void gmac_dev_reset(gmac_device_t* p_gmac_dev)
\r
787 Gmac *p_hw = p_gmac_dev->p_hw;
\r
789 gmac_reset_rx_mem(p_gmac_dev);
\r
790 gmac_reset_tx_mem(p_gmac_dev);
\r
791 gmac_network_control(p_hw, GMAC_NCR_TXEN | GMAC_NCR_RXEN
\r
792 | GMAC_NCR_WESTAT | GMAC_NCR_CLRSTAT);
\r
795 void gmac_dev_halt(Gmac* p_gmac);
\r
797 void gmac_dev_halt(Gmac* p_gmac)
\r
799 gmac_network_control(p_gmac, GMAC_NCR_WESTAT | GMAC_NCR_CLRSTAT);
\r
800 gmac_disable_interrupt(p_gmac, ~0u);
\r
805 * \brief GMAC Interrupt handler.
\r
807 * \param p_gmac_dev Pointer to GMAC device instance.
\r
810 #if( GMAC_STATS != 0 )
\r
811 extern int logPrintf( const char *pcFormat, ... );
\r
813 void gmac_show_irq_counts ()
\r
816 for (index = 0; index < ARRAY_SIZE(intPairs); index++) {
\r
817 if (gmacStats.intStatus[intPairs[index].index]) {
\r
818 logPrintf("%s : %6u\n", intPairs[index].name, gmacStats.intStatus[intPairs[index].index]);
\r
824 void gmac_handler(gmac_device_t* p_gmac_dev)
\r
826 Gmac *p_hw = p_gmac_dev->p_hw;
\r
828 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
829 gmac_tx_descriptor_t *p_tx_td;
\r
830 gmac_dev_tx_cb_t *p_tx_cb = NULL;
\r
831 uint32_t ul_tx_status_flag;
\r
833 #if( GMAC_STATS != 0 )
\r
837 /* volatile */ uint32_t ul_isr;
\r
838 /* volatile */ uint32_t ul_rsr;
\r
839 /* volatile */ uint32_t ul_tsr;
\r
841 ul_isr = gmac_get_interrupt_status(p_hw);
\r
842 ul_rsr = gmac_get_rx_status(p_hw);
\r
843 ul_tsr = gmac_get_tx_status(p_hw);
\r
845 /* Why clear bits that are ignored anyway ? */
\r
846 /* ul_isr &= ~(gmac_get_interrupt_mask(p_hw) | 0xF8030300); */
\r
847 #if( GMAC_STATS != 0 )
\r
849 for (index = 0; index < ARRAY_SIZE(intPairs); index++) {
\r
850 if (ul_isr & intPairs[index].mask)
\r
851 gmacStats.intStatus[intPairs[index].index]++;
\r
854 #endif /* GMAC_STATS != 0 */
\r
857 if ((ul_isr & GMAC_ISR_RCOMP) || (ul_rsr & (GMAC_RSR_REC|GMAC_RSR_RXOVR|GMAC_RSR_BNA))) {
\r
859 gmac_clear_rx_status(p_hw, ul_rsr);
\r
861 if (ul_isr & GMAC_ISR_RCOMP)
\r
862 ul_rsr |= GMAC_RSR_REC;
\r
863 /* Invoke callbacks which can be useful to wake op a task */
\r
864 if (p_gmac_dev->func_rx_cb) {
\r
865 p_gmac_dev->func_rx_cb(ul_rsr);
\r
870 if ((ul_isr & GMAC_ISR_TCOMP) || (ul_tsr & (GMAC_TSR_TXCOMP|GMAC_TSR_COL|GMAC_TSR_RLE|GMAC_TSR_UND))) {
\r
872 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
873 ul_tx_status_flag = GMAC_TSR_TXCOMP;
\r
875 /* A frame transmitted */
\r
878 if (ul_tsr & GMAC_TSR_RLE) {
\r
879 /* Status RLE & Number of discarded buffers */
\r
880 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
881 ul_tx_status_flag = GMAC_TSR_RLE | CIRC_CNT(p_gmac_dev->l_tx_head,
\r
882 p_gmac_dev->l_tx_tail, p_gmac_dev->ul_tx_list_size);
\r
883 p_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->l_tx_tail];
\r
885 gmac_reset_tx_mem(p_gmac_dev);
\r
886 gmac_enable_transmit(p_hw, 1);
\r
889 gmac_clear_tx_status(p_hw, ul_tsr);
\r
891 #if( GMAC_USES_TX_CALLBACK != 0 )
\r
892 if (!CIRC_EMPTY(p_gmac_dev->l_tx_head, p_gmac_dev->l_tx_tail)) {
\r
893 /* Check the buffers */
\r
895 p_tx_td = &p_gmac_dev->p_tx_dscr[p_gmac_dev->l_tx_tail];
\r
896 p_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->l_tx_tail];
\r
897 /* Any error? Exit if buffer has not been sent yet */
\r
898 if ((p_tx_td->status.val & GMAC_TXD_USED) == 0) {
\r
902 /* Notify upper layer that a packet has been sent */
\r
904 (*p_tx_cb) (ul_tx_status_flag, (void*)p_tx_td->addr);
\r
905 #if( ipconfigZERO_COPY_TX_DRIVER != 0 )
\r
907 p_tx_td->addr = 0ul;
\r
909 #endif /* ipconfigZERO_COPY_TX_DRIVER */
\r
912 circ_inc32(&p_gmac_dev->l_tx_tail, p_gmac_dev->ul_tx_list_size);
\r
913 } while (CIRC_CNT(p_gmac_dev->l_tx_head, p_gmac_dev->l_tx_tail,
\r
914 p_gmac_dev->ul_tx_list_size));
\r
917 if (ul_tsr & GMAC_TSR_RLE) {
\r
918 /* Notify upper layer RLE */
\r
920 (*p_tx_cb) (ul_tx_status_flag, NULL);
\r
923 #endif /* GMAC_USES_TX_CALLBACK */
\r
925 #if( GMAC_USES_WAKEUP_CALLBACK )
\r
926 /* If a wakeup has been scheduled, notify upper layer that it can
\r
927 send other packets, and the sending will be successful. */
\r
928 if ((CIRC_SPACE(p_gmac_dev->l_tx_head, p_gmac_dev->l_tx_tail,
\r
929 p_gmac_dev->ul_tx_list_size) >= p_gmac_dev->uc_wakeup_threshold)
\r
930 && p_gmac_dev->func_wakeup_cb) {
\r
931 p_gmac_dev->func_wakeup_cb();
\r