1 /* ----------------------------------------------------------------------------
\r
2 * SAM Software Package License
\r
3 * ----------------------------------------------------------------------------
\r
4 * Copyright (c) 2015, Atmel Corporation
\r
6 * All rights reserved.
\r
8 * Redistribution and use in source and binary forms, with or without
\r
9 * modification, are permitted provided that the following conditions are met:
\r
11 * - Redistributions of source code must retain the above copyright notice,
\r
12 * this list of conditions and the disclaimer below.
\r
14 * Atmel's name may not be used to endorse or promote products derived from
\r
15 * this software without specific prior written permission.
\r
17 * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
\r
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
\r
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
\r
20 * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
\r
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
\r
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
\r
23 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
\r
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
\r
25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
\r
26 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\r
27 * ----------------------------------------------------------------------------
\r
31 #ifdef CONFIG_HAVE_FLEXCOM
\r
32 #include "peripherals/flexcom.h"
\r
34 #include "peripherals/pmc.h"
\r
35 #include "peripherals/twid.h"
\r
36 #include "peripherals/twi.h"
\r
37 #include "peripherals/xdmad.h"
\r
38 #include "peripherals/l2cc.h"
\r
40 #include "cortex-a/cp15.h"
\r
49 #define TWID_DMA_THRESHOLD 16
\r
50 #define TWID_TIMEOUT 100
\r
52 static uint32_t _twid_wait_twi_transfer(struct _twi_desc* desc)
\r
54 struct _timeout timeout;
\r
55 timer_start_timeout(&timeout, TWID_TIMEOUT);
\r
56 while(!twi_is_transfer_complete(desc->addr)){
\r
57 if (timer_timeout_reached(&timeout)) {
\r
58 trace_error("twid: Unable to complete transfert!\r\n");
\r
59 twid_configure(desc);
\r
60 return TWID_ERROR_TRANSFER;
\r
63 return TWID_SUCCESS;
\r
66 static void _twid_xdmad_callback_wrapper(struct _xdmad_channel* channel,
\r
69 trace_debug("TWID DMA Transfert Finished\r\n");
\r
70 struct _twi_desc* twid = (struct _twi_desc*) args;
\r
72 xdmad_free_channel(channel);
\r
74 if (twid->region_start && twid->region_end) {
\r
75 l2cc_invalidate_region(twid->region_start, twid->region_end);
\r
78 if (twid && twid->callback)
\r
79 twid->callback(twid, twid->cb_args);
\r
83 static void _twid_init_dma_read_channel(const struct _twi_desc* desc,
\r
84 struct _xdmad_channel** channel,
\r
85 struct _xdmad_cfg* cfg)
\r
90 uint32_t id = get_twi_id_from_addr(desc->addr);
\r
91 assert(id < ID_PERIPH_COUNT);
\r
93 memset(cfg, 0x0, sizeof(*cfg));
\r
96 xdmad_allocate_channel(id, XDMAD_PERIPH_MEMORY);
\r
99 xdmad_prepare_channel(*channel);
\r
100 cfg->cfg.uint32_value = XDMAC_CC_TYPE_PER_TRAN
\r
101 | XDMAC_CC_DSYNC_PER2MEM
\r
102 | XDMAC_CC_MEMSET_NORMAL_MODE
\r
103 | XDMAC_CC_CSIZE_CHK_1
\r
104 | XDMAC_CC_DWIDTH_BYTE
\r
105 | XDMAC_CC_DIF_AHB_IF0
\r
106 | XDMAC_CC_SIF_AHB_IF1
\r
107 | XDMAC_CC_SAM_FIXED_AM;
\r
109 cfg->src_addr = (void*)&desc->addr->TWI_RHR;
\r
112 static void _twid_dma_read(const struct _twi_desc* desc,
\r
113 struct _buffer* buffer)
\r
115 struct _xdmad_channel* channel = NULL;
\r
116 struct _xdmad_cfg cfg;
\r
118 _twid_init_dma_read_channel(desc, &channel, &cfg);
\r
120 cfg.cfg.bitfield.dam = XDMAC_CC_DAM_INCREMENTED_AM
\r
121 >> XDMAC_CC_DAM_Pos;
\r
122 cfg.dest_addr = buffer->data;
\r
123 cfg.ublock_size = buffer->size;
\r
124 cfg.block_size = 0;
\r
125 xdmad_configure_transfer(channel, &cfg, 0, 0);
\r
126 xdmad_set_callback(channel, _twid_xdmad_callback_wrapper,
\r
129 l2cc_clean_region(desc->region_start, desc->region_end);
\r
131 xdmad_start_transfer(channel);
\r
134 static void _twid_init_dma_write_channel(struct _twi_desc* desc,
\r
135 struct _xdmad_channel** channel,
\r
136 struct _xdmad_cfg* cfg)
\r
141 uint32_t id = get_twi_id_from_addr(desc->addr);
\r
142 assert(id < ID_PERIPH_COUNT);
\r
143 memset(cfg, 0x0, sizeof(*cfg));
\r
146 xdmad_allocate_channel(XDMAD_PERIPH_MEMORY, id);
\r
149 xdmad_prepare_channel(*channel);
\r
150 cfg->cfg.uint32_value = XDMAC_CC_TYPE_PER_TRAN
\r
151 | XDMAC_CC_DSYNC_MEM2PER
\r
152 | XDMAC_CC_MEMSET_NORMAL_MODE
\r
153 | XDMAC_CC_CSIZE_CHK_1
\r
154 | XDMAC_CC_DWIDTH_BYTE
\r
155 | XDMAC_CC_DIF_AHB_IF1
\r
156 | XDMAC_CC_SIF_AHB_IF0
\r
157 | XDMAC_CC_DAM_FIXED_AM;
\r
159 cfg->dest_addr = (void*)&desc->addr->TWI_THR;
\r
162 static void _twid_dma_write(struct _twi_desc* desc,
\r
163 struct _buffer* buffer)
\r
165 struct _xdmad_channel* channel = NULL;
\r
166 struct _xdmad_cfg cfg;
\r
168 _twid_init_dma_write_channel(desc, &channel, &cfg);
\r
170 cfg.cfg.bitfield.sam = XDMAC_CC_SAM_INCREMENTED_AM
\r
171 >> XDMAC_CC_SAM_Pos;
\r
172 cfg.src_addr = buffer->data;
\r
173 cfg.ublock_size = buffer->size;
\r
174 cfg.block_size = 0;
\r
175 xdmad_configure_transfer(channel, &cfg, 0, 0);
\r
176 xdmad_set_callback(channel, _twid_xdmad_callback_wrapper,
\r
179 l2cc_clean_region(desc->region_start, desc->region_end);
\r
181 xdmad_start_transfer(channel);
\r
184 void twid_configure(struct _twi_desc* desc)
\r
186 uint32_t id = get_twi_id_from_addr(desc->addr);
\r
187 assert(id < ID_PERIPH_COUNT);
\r
189 #ifdef CONFIG_HAVE_FLEXCOM
\r
190 Flexcom* flexcom = get_flexcom_addr_from_id(get_twi_id_from_addr(desc->addr));
\r
192 flexcom_select(flexcom, FLEX_MR_OPMODE_TWI);
\r
196 pmc_enable_peripheral(id);
\r
197 twi_configure_master(desc->addr, desc->freq);
\r
199 #ifdef CONFIG_HAVE_TWI_FIFO
\r
200 if (desc->transfert_mode == TWID_MODE_FIFO) {
\r
201 uint32_t fifo_depth = get_peripheral_fifo_depth(desc->addr);
\r
202 twi_fifo_configure(desc->addr, fifo_depth/2, fifo_depth/2,
\r
203 TWI_FMR_RXRDYM_ONE_DATA | TWI_FMR_TXRDYM_ONE_DATA);
\r
208 static uint32_t _twid_poll_write(struct _twi_desc* desc, struct _buffer* buffer)
\r
211 struct _timeout timeout;
\r
212 twi_init_write_transfert(desc->addr,
\r
217 if (twi_get_status(desc->addr) & TWI_SR_NACK) {
\r
218 trace_error("twid: command NACK!\r\n");
\r
219 return TWID_ERROR_ACK;
\r
221 for (i = 0; i < buffer->size; ++i) {
\r
222 timer_start_timeout(&timeout, TWID_TIMEOUT);
\r
223 while(!twi_byte_sent(desc->addr)) {
\r
224 if (timer_timeout_reached(&timeout)) {
\r
225 trace_error("twid: Device doesn't answer, "
\r
226 "(TX TIMEOUT)\r\n");
\r
230 twi_write_byte(desc->addr, buffer->data[i]);
\r
231 if(twi_get_status(desc->addr) & TWI_SR_NACK) {
\r
232 trace_error("twid: command NACK!\r\n");
\r
233 return TWID_ERROR_ACK;
\r
236 /* wait transfert to be finished */
\r
237 return _twid_wait_twi_transfer(desc);
\r
240 static uint32_t _twid_poll_read(struct _twi_desc* desc, struct _buffer* buffer)
\r
243 struct _timeout timeout;
\r
244 twi_init_read_transfert(desc->addr,
\r
249 if (twi_get_status(desc->addr) & TWI_SR_NACK) {
\r
250 trace_error("twid: command NACK!\r\n");
\r
251 return TWID_ERROR_ACK;
\r
253 for (i = 0; i < buffer->size; ++i) {
\r
254 timer_start_timeout(&timeout, TWID_TIMEOUT);
\r
255 while(!twi_is_byte_received(desc->addr)) {
\r
256 if (timer_timeout_reached(&timeout)) {
\r
257 trace_error("twid: Device doesn't answer, "
\r
258 "(RX TIMEOUT)\r\n");
\r
262 buffer->data[i] = twi_read_byte(desc->addr);
\r
263 if(twi_get_status(desc->addr) & TWI_SR_NACK) {
\r
264 trace_error("twid: command NACK\r\n");
\r
265 return TWID_ERROR_ACK;
\r
268 /* wait transfert to be finished */
\r
269 return _twid_wait_twi_transfer(desc);
\r
272 uint32_t twid_transfert(struct _twi_desc* desc, struct _buffer* rx,
\r
273 struct _buffer* tx, twid_callback_t cb,
\r
276 uint32_t status = TWID_SUCCESS;
\r
278 desc->callback = cb;
\r
279 desc->cb_args = user_args;
\r
281 if (mutex_try_lock(&desc->mutex)) {
\r
282 return TWID_ERROR_LOCK;
\r
285 switch (desc->transfert_mode) {
\r
286 case TWID_MODE_POLLING:
\r
288 status = _twid_poll_write(desc, tx);
\r
292 status = _twid_poll_read(desc, rx);
\r
296 cb(desc, user_args);
\r
297 mutex_free(&desc->mutex);
\r
300 case TWID_MODE_DMA:
\r
302 status = TWID_ERROR_DUPLEX;
\r
306 if (tx->size < TWID_DMA_THRESHOLD) {
\r
307 status = _twid_poll_write(desc, tx);
\r
310 cb(desc, user_args);
\r
311 mutex_free(&desc->mutex);
\r
313 twi_init_write_transfert(desc->addr,
\r
318 desc->region_start = (uint32_t)tx->data;
\r
319 desc->region_end = desc->region_start
\r
321 _twid_dma_write(desc, tx);
\r
325 if (rx->size < TWID_DMA_THRESHOLD) {
\r
326 status = _twid_poll_read(desc, rx);
\r
329 cb(desc, user_args);
\r
330 mutex_free(&desc->mutex);
\r
332 twi_init_read_transfert(desc->addr,
\r
337 desc->region_start = (uint32_t)rx->data;
\r
338 desc->region_end = desc->region_start
\r
340 if(twi_get_status(desc->addr) & TWI_SR_NACK) {
\r
341 trace_error("twid: Acknolegment "
\r
343 status = TWID_ERROR_ACK;
\r
346 _twid_dma_read(desc, rx);
\r
351 #ifdef CONFIG_HAVE_TWI_FIFO
\r
352 case TWID_MODE_FIFO:
\r
354 status = twi_write_stream(desc->addr, desc->slave_addr,
\r
355 desc->iaddr, desc->isize,
\r
356 tx->data, tx->size);
\r
357 status = status ? TWID_SUCCESS : TWID_ERROR_ACK;
\r
360 status = _twid_wait_twi_transfer(desc);
\r
365 status = twi_read_stream(desc->addr, desc->slave_addr,
\r
366 desc->iaddr, desc->isize,
\r
367 rx->data, rx->size);
\r
368 status = status ? TWID_SUCCESS : TWID_ERROR_ACK;
\r
371 status = _twid_wait_twi_transfer(desc);
\r
376 cb(desc, user_args);
\r
377 mutex_free(&desc->mutex);
\r
381 trace_debug("Unkown mode");
\r
385 mutex_free(&desc->mutex);
\r
390 void twid_finish_transfert_callback(struct _twi_desc* desc, void* user_args)
\r
393 twid_finish_transfert(desc);
\r
396 void twid_finish_transfert(struct _twi_desc* desc)
\r
398 mutex_free(&desc->mutex);
\r
401 uint32_t twid_is_busy(const struct _twi_desc* desc)
\r
403 return mutex_is_locked(&desc->mutex);
\r
406 void twid_wait_transfert(const struct _twi_desc* desc)
\r
408 while (mutex_is_locked(&desc->mutex));
\r