1 /* ----------------------------------------------------------------------------
\r
2 * SAM Software Package License
\r
3 * ----------------------------------------------------------------------------
\r
4 * Copyright (c) 2015, Atmel Corporation
\r
6 * All rights reserved.
\r
8 * Redistribution and use in source and binary forms, with or without
\r
9 * modification, are permitted provided that the following conditions are met:
\r
11 * - Redistributions of source code must retain the above copyright notice,
\r
12 * this list of conditions and the disclaimer below.
\r
14 * Atmel's name may not be used to endorse or promote products derived from
\r
15 * this software without specific prior written permission.
\r
17 * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
\r
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
\r
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
\r
20 * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
\r
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
\r
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
\r
23 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
\r
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
\r
25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
\r
26 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\r
27 * ----------------------------------------------------------------------------
\r
31 #include "peripherals/aic.h"
\r
32 #ifdef CONFIG_HAVE_FLEXCOM
\r
33 #include "peripherals/flexcom.h"
\r
35 #include "peripherals/pmc.h"
\r
36 #include "peripherals/spid.h"
\r
37 #include "peripherals/spi.h"
\r
38 #include "peripherals/xdmac.h"
\r
39 #include "peripherals/xdmad.h"
\r
40 #include "peripherals/l2cc.h"
\r
42 #include "cortex-a/cp15.h"
\r
51 #define SPID_ATTRIBUTE_MASK (SPI_MR_PS | SPI_MR_MODFDIS | SPI_MR_MSTR | SPI_MR_WDRBT)
\r
52 #define SPID_DMA_THRESHOLD 16
\r
54 static uint32_t _garbage = 0;
\r
56 static void _spid_xdmad_callback_wrapper(struct _xdmad_channel *channel,
\r
59 trace_debug("SPID DMA Transfert Finished\r\n");
\r
60 struct _spi_desc* spid = (struct _spi_desc*) arg;
\r
62 xdmad_free_channel(channel);
\r
64 if (spid->region_start && spid->region_end) {
\r
65 l2cc_invalidate_region(spid->region_start, spid->region_end);
\r
68 if (spid && spid->callback)
\r
69 spid->callback(spid, spid->cb_args);
\r
72 static void _spid_xdmad_cleanup_callback(struct _xdmad_channel *channel,
\r
75 xdmad_free_channel(channel);
\r
78 #ifdef CONFIG_HAVE_SPI_FIFO
\r
79 static void spid_fifo_error(void)
\r
81 trace_error("Fifo pointer error encountered !!\r\n");
\r
85 void spid_configure(struct _spi_desc* desc)
\r
87 uint32_t id = get_spi_id_from_addr(desc->addr);
\r
89 #ifdef CONFIG_HAVE_FLEXCOM
\r
90 Flexcom* flexcom = get_flexcom_addr_from_id(id);
\r
92 flexcom_select(flexcom, FLEX_MR_OPMODE_SPI);
\r
95 /* Enable SPI early otherwise FIFO configuration won't be applied */
\r
96 pmc_enable_peripheral(id);
\r
97 if (desc->transfert_mode == SPID_MODE_FIFO) {
\r
98 desc->attributes &= ~SPI_MR_WDRBT;
\r
100 spi_configure(desc->addr, (desc->attributes & SPID_ATTRIBUTE_MASK) | SPI_MR_MSTR);
\r
101 spi_chip_select(desc->addr, desc->chip_select);
\r
102 spi_configure_cs(desc->addr, desc->chip_select, desc->bitrate,
\r
103 desc->dlybs, desc->dlybct, desc->spi_mode, 0);
\r
104 #ifdef CONFIG_HAVE_SPI_FIFO
\r
105 if (desc->transfert_mode == SPID_MODE_FIFO) {
\r
106 spi_fifo_configure(desc->addr, SPI_FIFO_DEPTH, SPI_FIFO_DEPTH,
\r
107 SPI_FMR_TXRDYM_ONE_DATA | SPI_FMR_RXRDYM_ONE_DATA);
\r
108 spi_enable_it(desc->addr, SPI_IER_TXFPTEF | SPI_IER_RXFPTEF);
\r
109 aic_set_source_vector(id, spid_fifo_error);
\r
113 (void)spi_get_status(desc->addr);
\r
115 spi_enable(desc->addr);
\r
118 void spid_begin_transfert(struct _spi_desc* desc)
\r
120 spi_chip_select(desc->addr, desc->chip_select);
\r
121 spi_configure_cs_mode(desc->addr, desc->chip_select, SPI_KEEP_CS_OW);
\r
124 static void _spid_init_dma_write_channel(const struct _spi_desc* desc,
\r
125 struct _xdmad_channel** channel,
\r
126 struct _xdmad_cfg* cfg)
\r
131 uint32_t id = get_spi_id_from_addr(desc->addr);
\r
133 memset(cfg, 0x0, sizeof(*cfg));
\r
136 xdmad_allocate_channel(XDMAD_PERIPH_MEMORY, id);
\r
139 xdmad_prepare_channel(*channel);
\r
140 cfg->cfg.uint32_value = XDMAC_CC_TYPE_PER_TRAN
\r
141 | XDMAC_CC_DSYNC_MEM2PER
\r
142 | XDMAC_CC_MEMSET_NORMAL_MODE
\r
143 | XDMAC_CC_CSIZE_CHK_1
\r
144 | XDMAC_CC_DWIDTH_BYTE
\r
145 | XDMAC_CC_DIF_AHB_IF1
\r
146 | XDMAC_CC_SIF_AHB_IF0
\r
147 | XDMAC_CC_DAM_FIXED_AM;
\r
149 cfg->dest_addr = (void*)&desc->addr->SPI_TDR;
\r
152 static void _spid_init_dma_read_channel(const struct _spi_desc* desc,
\r
153 struct _xdmad_channel** channel,
\r
154 struct _xdmad_cfg* cfg)
\r
159 uint32_t id = get_spi_id_from_addr(desc->addr);
\r
161 memset(cfg, 0x0, sizeof(*cfg));
\r
164 xdmad_allocate_channel(id, XDMAD_PERIPH_MEMORY);
\r
167 xdmad_prepare_channel(*channel);
\r
168 cfg->cfg.uint32_value = XDMAC_CC_TYPE_PER_TRAN
\r
169 | XDMAC_CC_DSYNC_PER2MEM
\r
170 | XDMAC_CC_MEMSET_NORMAL_MODE
\r
171 | XDMAC_CC_CSIZE_CHK_1
\r
172 | XDMAC_CC_DWIDTH_BYTE
\r
173 | XDMAC_CC_DIF_AHB_IF0
\r
174 | XDMAC_CC_SIF_AHB_IF1
\r
175 | XDMAC_CC_SAM_FIXED_AM;
\r
177 cfg->src_addr = (void*)&desc->addr->SPI_RDR;
\r
180 static void _spid_dma_write(const struct _spi_desc* desc,
\r
181 const struct _buffer* buffer)
\r
183 struct _xdmad_channel* w_channel = NULL;
\r
184 struct _xdmad_channel* r_channel = NULL;
\r
185 struct _xdmad_cfg w_cfg;
\r
186 struct _xdmad_cfg r_cfg;
\r
188 _spid_init_dma_write_channel(desc, &w_channel, &w_cfg);
\r
189 _spid_init_dma_read_channel(desc, &r_channel, &r_cfg);
\r
191 w_cfg.cfg.bitfield.sam = XDMAC_CC_SAM_INCREMENTED_AM
\r
192 >> XDMAC_CC_SAM_Pos;
\r
193 w_cfg.src_addr = buffer->data;
\r
194 w_cfg.ublock_size = buffer->size;
\r
195 xdmad_configure_transfer(w_channel, &w_cfg, 0, 0);
\r
196 xdmad_set_callback(w_channel, _spid_xdmad_cleanup_callback,
\r
199 r_cfg.cfg.bitfield.dam = XDMAC_CC_DAM_FIXED_AM
\r
200 >> XDMAC_CC_DAM_Pos;
\r
201 r_cfg.dest_addr = &_garbage;
\r
202 r_cfg.ublock_size = buffer->size;
\r
203 xdmad_configure_transfer(r_channel, &r_cfg, 0, 0);
\r
204 xdmad_set_callback(r_channel, _spid_xdmad_callback_wrapper,
\r
207 l2cc_clean_region(desc->region_start, desc->region_end);
\r
209 xdmad_start_transfer(w_channel);
\r
210 xdmad_start_transfer(r_channel);
\r
213 static void _spid_dma_read(const struct _spi_desc* desc,
\r
214 struct _buffer* buffer)
\r
216 struct _xdmad_channel* w_channel = NULL;
\r
217 struct _xdmad_channel* r_channel = NULL;
\r
218 struct _xdmad_cfg w_cfg;
\r
219 struct _xdmad_cfg r_cfg;
\r
221 _spid_init_dma_write_channel(desc, &w_channel, &w_cfg);
\r
222 _spid_init_dma_read_channel(desc, &r_channel, &r_cfg);
\r
224 w_cfg.cfg.bitfield.sam = XDMAC_CC_SAM_FIXED_AM
\r
225 >> XDMAC_CC_SAM_Pos;
\r
226 w_cfg.src_addr = buffer->data;
\r
227 w_cfg.ublock_size = buffer->size;
\r
228 w_cfg.block_size = 0;
\r
229 xdmad_configure_transfer(w_channel, &w_cfg, 0, 0);
\r
230 xdmad_set_callback(w_channel, _spid_xdmad_cleanup_callback, NULL);
\r
232 r_cfg.cfg.bitfield.dam = XDMAC_CC_DAM_INCREMENTED_AM
\r
233 >> XDMAC_CC_DAM_Pos;
\r
234 r_cfg.dest_addr = buffer->data;
\r
235 r_cfg.ublock_size = buffer->size;
\r
236 xdmad_configure_transfer(r_channel, &r_cfg, 0, 0);
\r
237 xdmad_set_callback(r_channel, _spid_xdmad_callback_wrapper,
\r
240 l2cc_clean_region(desc->region_start, desc->region_end);
\r
242 xdmad_start_transfer(w_channel);
\r
243 xdmad_start_transfer(r_channel);
\r
246 uint32_t spid_transfert(struct _spi_desc* desc, struct _buffer* rx,
\r
247 struct _buffer* tx, spid_callback_t cb,
\r
250 Spi* spi = desc->addr;
\r
253 desc->callback = cb;
\r
254 desc->cb_args = user_args;
\r
256 if (mutex_try_lock(&desc->mutex)) {
\r
257 return SPID_ERROR_LOCK;
\r
260 switch (desc->transfert_mode) {
\r
261 case SPID_MODE_POLLING:
\r
263 for (i = 0; i < tx->size; ++i) {
\r
264 spi_write(spi, desc->chip_select, tx->data[i]);
\r
268 for (i = 0; i < rx->size; ++i) {
\r
269 rx->data[i] = spi_read(spi, desc->chip_select);
\r
272 mutex_free(&desc->mutex);
\r
274 cb(desc, user_args);
\r
276 case SPID_MODE_DMA:
\r
278 if (tx->size < SPID_DMA_THRESHOLD) {
\r
279 for (i = 0; i < tx->size; ++i) {
\r
280 spi_write(spi, desc->chip_select, tx->data[i]);
\r
284 cb(desc, user_args);
\r
285 mutex_free(&desc->mutex);
\r
288 desc->region_start = (uint32_t)tx->data;
\r
289 desc->region_end = desc->region_start
\r
291 _spid_dma_write(desc, tx);
\r
293 spid_wait_transfert(desc);
\r
294 mutex_lock(&desc->mutex);
\r
299 if (rx->size < SPID_DMA_THRESHOLD) {
\r
300 for (i = 0; i < rx->size; ++i) {
\r
301 rx->data[i] = spi_read(spi, desc->chip_select);
\r
304 cb(desc, user_args);
\r
305 mutex_free(&desc->mutex);
\r
307 desc->region_start = (uint32_t)rx->data;
\r
308 desc->region_end = desc->region_start
\r
310 _spid_dma_read(desc, rx);
\r
314 #ifdef CONFIG_HAVE_SPI_FIFO
\r
315 case SPID_MODE_FIFO:
\r
317 spi_write_stream(spi, desc->chip_select,
\r
318 tx->data, tx->size);
\r
321 spi_read_stream(spi, desc->chip_select,
\r
322 rx->data, rx->size);
\r
324 mutex_free(&desc->mutex);
\r
326 cb(desc, user_args);
\r
330 trace_debug("Unkown mode");
\r
333 return SPID_SUCCESS;
\r
336 void spid_finish_transfert_callback(struct _spi_desc* desc, void* user_args)
\r
339 spid_finish_transfert(desc);
\r
342 void spid_finish_transfert(struct _spi_desc* desc)
\r
344 spi_release_cs(desc->addr);
\r
345 mutex_free(&desc->mutex);
\r
348 void spid_close(const struct _spi_desc* desc)
\r
350 uint32_t id = get_spi_id_from_addr(desc->addr);
\r
351 #ifdef CONFIG_HAVE_SPI_FIFO
\r
352 spi_fifo_disable(desc->addr);
\r
353 spi_disable_it(desc->addr, SPI_IER_TXFPTEF | SPI_IER_RXFPTEF);
\r
356 spi_disable(desc->addr);
\r
357 pmc_disable_peripheral(id);
\r
360 uint32_t spid_is_busy(const struct _spi_desc* desc)
\r
362 return mutex_is_locked(&desc->mutex);
\r
365 void spid_wait_transfert(const struct _spi_desc* desc)
\r
367 while (mutex_is_locked(&desc->mutex));
\r