]> git.sur5r.net Git - freertos/blob - FreeRTOS/Demo/CORTEX_A5_SAMA5D2x_Xplained_IAR/AtmelFiles/drivers/peripherals/spid.c
Add SAMA5D2 Xplained IAR demo.
[freertos] / FreeRTOS / Demo / CORTEX_A5_SAMA5D2x_Xplained_IAR / AtmelFiles / drivers / peripherals / spid.c
1 /* ----------------------------------------------------------------------------\r
2  *         SAM Software Package License\r
3  * ----------------------------------------------------------------------------\r
4  * Copyright (c) 2015, Atmel Corporation\r
5  *\r
6  * All rights reserved.\r
7  *\r
8  * Redistribution and use in source and binary forms, with or without\r
9  * modification, are permitted provided that the following conditions are met:\r
10  *\r
11  * - Redistributions of source code must retain the above copyright notice,\r
12  * this list of conditions and the disclaimer below.\r
13  *\r
14  * Atmel's name may not be used to endorse or promote products derived from\r
15  * this software without specific prior written permission.\r
16  *\r
17  * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR\r
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\r
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE\r
20  * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,\r
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,\r
23  * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\r
24  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\r
25  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\r
26  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
27  * ----------------------------------------------------------------------------\r
28  */\r
29 \r
30 \r
31 #include "peripherals/aic.h"\r
32 #ifdef CONFIG_HAVE_FLEXCOM\r
33 #include "peripherals/flexcom.h"\r
34 #endif\r
35 #include "peripherals/pmc.h"\r
36 #include "peripherals/spid.h"\r
37 #include "peripherals/spi.h"\r
38 #include "peripherals/xdmac.h"\r
39 #include "peripherals/xdmad.h"\r
40 #include "peripherals/l2cc.h"\r
41 \r
42 #include "cortex-a/cp15.h"\r
43 \r
44 #include "trace.h"\r
45 \r
46 #include <stddef.h>\r
47 #include <stdint.h>\r
48 #include <assert.h>\r
49 #include <string.h>\r
50 \r
51 #define SPID_ATTRIBUTE_MASK     (SPI_MR_PS | SPI_MR_MODFDIS | SPI_MR_MSTR | SPI_MR_WDRBT)\r
52 #define SPID_DMA_THRESHOLD      16\r
53 \r
54 static uint32_t _garbage = 0;\r
55 \r
56 static void _spid_xdmad_callback_wrapper(struct _xdmad_channel *channel,\r
57                                          void *arg)\r
58 {\r
59         trace_debug("SPID DMA Transfert Finished\r\n");\r
60         struct _spi_desc* spid = (struct _spi_desc*) arg;\r
61 \r
62         xdmad_free_channel(channel);\r
63 \r
64         if (spid->region_start && spid->region_end) {\r
65                 l2cc_invalidate_region(spid->region_start, spid->region_end);\r
66         }\r
67 \r
68         if (spid && spid->callback)\r
69                 spid->callback(spid, spid->cb_args);\r
70 }\r
71 \r
72 static void _spid_xdmad_cleanup_callback(struct _xdmad_channel *channel,\r
73                                          void *arg)\r
74 {\r
75         xdmad_free_channel(channel);\r
76 }\r
77 \r
78 #ifdef CONFIG_HAVE_SPI_FIFO\r
79 static void spid_fifo_error(void)\r
80 {\r
81         trace_error("Fifo pointer error encountered !!\r\n");\r
82 }\r
83 #endif\r
84 \r
85 void spid_configure(struct _spi_desc* desc)\r
86 {\r
87         uint32_t id = get_spi_id_from_addr(desc->addr);\r
88 \r
89 #ifdef CONFIG_HAVE_FLEXCOM\r
90         Flexcom* flexcom = get_flexcom_addr_from_id(id);\r
91         if (flexcom) {\r
92                 flexcom_select(flexcom, FLEX_MR_OPMODE_SPI);\r
93         }\r
94 #endif\r
95         /* Enable SPI early otherwise FIFO configuration won't be applied */\r
96         pmc_enable_peripheral(id);\r
97         if (desc->transfert_mode == SPID_MODE_FIFO) {\r
98                 desc->attributes &= ~SPI_MR_WDRBT;\r
99         }\r
100         spi_configure(desc->addr, (desc->attributes & SPID_ATTRIBUTE_MASK) | SPI_MR_MSTR);\r
101         spi_chip_select(desc->addr, desc->chip_select);\r
102         spi_configure_cs(desc->addr, desc->chip_select, desc->bitrate,\r
103                          desc->dlybs, desc->dlybct, desc->spi_mode, 0);\r
104 #ifdef CONFIG_HAVE_SPI_FIFO\r
105         if (desc->transfert_mode == SPID_MODE_FIFO) {\r
106                 spi_fifo_configure(desc->addr, SPI_FIFO_DEPTH, SPI_FIFO_DEPTH,\r
107                                    SPI_FMR_TXRDYM_ONE_DATA | SPI_FMR_RXRDYM_ONE_DATA);\r
108                 spi_enable_it(desc->addr, SPI_IER_TXFPTEF | SPI_IER_RXFPTEF);\r
109                 aic_set_source_vector(id, spid_fifo_error);\r
110                 aic_enable(id);\r
111         }\r
112 #endif\r
113         (void)spi_get_status(desc->addr);\r
114 \r
115         spi_enable(desc->addr);\r
116 }\r
117 \r
118 void spid_begin_transfert(struct _spi_desc* desc)\r
119 {\r
120         spi_chip_select(desc->addr, desc->chip_select);\r
121         spi_configure_cs_mode(desc->addr, desc->chip_select, SPI_KEEP_CS_OW);\r
122 }\r
123 \r
124 static void _spid_init_dma_write_channel(const struct _spi_desc* desc,\r
125                                          struct _xdmad_channel** channel,\r
126                                          struct _xdmad_cfg* cfg)\r
127 {\r
128         assert(cfg);\r
129         assert(channel);\r
130 \r
131         uint32_t id = get_spi_id_from_addr(desc->addr);\r
132 \r
133         memset(cfg, 0x0, sizeof(*cfg));\r
134 \r
135         *channel =\r
136                 xdmad_allocate_channel(XDMAD_PERIPH_MEMORY, id);\r
137         assert(*channel);\r
138 \r
139         xdmad_prepare_channel(*channel);\r
140         cfg->cfg.uint32_value = XDMAC_CC_TYPE_PER_TRAN\r
141                 | XDMAC_CC_DSYNC_MEM2PER\r
142                 | XDMAC_CC_MEMSET_NORMAL_MODE\r
143                 | XDMAC_CC_CSIZE_CHK_1\r
144                 | XDMAC_CC_DWIDTH_BYTE\r
145                 | XDMAC_CC_DIF_AHB_IF1\r
146                 | XDMAC_CC_SIF_AHB_IF0\r
147                 | XDMAC_CC_DAM_FIXED_AM;\r
148 \r
149         cfg->dest_addr = (void*)&desc->addr->SPI_TDR;\r
150 }\r
151 \r
152 static void _spid_init_dma_read_channel(const struct _spi_desc* desc,\r
153                                          struct _xdmad_channel** channel,\r
154                                          struct _xdmad_cfg* cfg)\r
155 {\r
156         assert(cfg);\r
157         assert(channel);\r
158 \r
159         uint32_t id = get_spi_id_from_addr(desc->addr);\r
160 \r
161         memset(cfg, 0x0, sizeof(*cfg));\r
162 \r
163         *channel =\r
164                 xdmad_allocate_channel(id, XDMAD_PERIPH_MEMORY);\r
165         assert(*channel);\r
166 \r
167         xdmad_prepare_channel(*channel);\r
168         cfg->cfg.uint32_value = XDMAC_CC_TYPE_PER_TRAN\r
169                 | XDMAC_CC_DSYNC_PER2MEM\r
170                 | XDMAC_CC_MEMSET_NORMAL_MODE\r
171                 | XDMAC_CC_CSIZE_CHK_1\r
172                 | XDMAC_CC_DWIDTH_BYTE\r
173                 | XDMAC_CC_DIF_AHB_IF0\r
174                 | XDMAC_CC_SIF_AHB_IF1\r
175                 | XDMAC_CC_SAM_FIXED_AM;\r
176 \r
177         cfg->src_addr = (void*)&desc->addr->SPI_RDR;\r
178 }\r
179 \r
180 static void _spid_dma_write(const struct _spi_desc* desc,\r
181                            const struct _buffer* buffer)\r
182 {\r
183         struct _xdmad_channel* w_channel = NULL;\r
184         struct _xdmad_channel* r_channel = NULL;\r
185         struct _xdmad_cfg w_cfg;\r
186         struct _xdmad_cfg r_cfg;\r
187 \r
188         _spid_init_dma_write_channel(desc, &w_channel, &w_cfg);\r
189         _spid_init_dma_read_channel(desc, &r_channel, &r_cfg);\r
190 \r
191         w_cfg.cfg.bitfield.sam = XDMAC_CC_SAM_INCREMENTED_AM\r
192                 >> XDMAC_CC_SAM_Pos;\r
193         w_cfg.src_addr = buffer->data;\r
194         w_cfg.ublock_size = buffer->size;\r
195         xdmad_configure_transfer(w_channel, &w_cfg, 0, 0);\r
196         xdmad_set_callback(w_channel, _spid_xdmad_cleanup_callback,\r
197                            NULL);\r
198 \r
199         r_cfg.cfg.bitfield.dam = XDMAC_CC_DAM_FIXED_AM\r
200                 >> XDMAC_CC_DAM_Pos;\r
201         r_cfg.dest_addr = &_garbage;\r
202         r_cfg.ublock_size = buffer->size;\r
203         xdmad_configure_transfer(r_channel, &r_cfg, 0, 0);\r
204         xdmad_set_callback(r_channel, _spid_xdmad_callback_wrapper,\r
205                            (void*)desc);\r
206 \r
207         l2cc_clean_region(desc->region_start, desc->region_end);\r
208 \r
209         xdmad_start_transfer(w_channel);\r
210         xdmad_start_transfer(r_channel);\r
211 }\r
212 \r
213 static void _spid_dma_read(const struct _spi_desc* desc,\r
214                            struct _buffer* buffer)\r
215 {\r
216         struct _xdmad_channel* w_channel = NULL;\r
217         struct _xdmad_channel* r_channel = NULL;\r
218         struct _xdmad_cfg w_cfg;\r
219         struct _xdmad_cfg r_cfg;\r
220 \r
221         _spid_init_dma_write_channel(desc, &w_channel, &w_cfg);\r
222         _spid_init_dma_read_channel(desc, &r_channel, &r_cfg);\r
223 \r
224         w_cfg.cfg.bitfield.sam = XDMAC_CC_SAM_FIXED_AM\r
225                 >> XDMAC_CC_SAM_Pos;\r
226         w_cfg.src_addr = buffer->data;\r
227         w_cfg.ublock_size = buffer->size;\r
228         w_cfg.block_size = 0;\r
229         xdmad_configure_transfer(w_channel, &w_cfg, 0, 0);\r
230         xdmad_set_callback(w_channel, _spid_xdmad_cleanup_callback, NULL);\r
231 \r
232         r_cfg.cfg.bitfield.dam = XDMAC_CC_DAM_INCREMENTED_AM\r
233                 >> XDMAC_CC_DAM_Pos;\r
234         r_cfg.dest_addr = buffer->data;\r
235         r_cfg.ublock_size = buffer->size;\r
236         xdmad_configure_transfer(r_channel, &r_cfg, 0, 0);\r
237         xdmad_set_callback(r_channel, _spid_xdmad_callback_wrapper,\r
238                            (void*)desc);\r
239 \r
240         l2cc_clean_region(desc->region_start, desc->region_end);\r
241 \r
242         xdmad_start_transfer(w_channel);\r
243         xdmad_start_transfer(r_channel);\r
244 }\r
245 \r
246 uint32_t spid_transfert(struct _spi_desc* desc, struct _buffer* rx,\r
247                         struct _buffer* tx, spid_callback_t cb,\r
248                         void* user_args)\r
249 {\r
250         Spi* spi = desc->addr;\r
251         uint32_t i = 0;\r
252 \r
253         desc->callback = cb;\r
254         desc->cb_args = user_args;\r
255 \r
256         if (mutex_try_lock(&desc->mutex)) {\r
257                 return SPID_ERROR_LOCK;\r
258         }\r
259 \r
260         switch (desc->transfert_mode) {\r
261         case SPID_MODE_POLLING:\r
262                 if (tx) {\r
263                         for (i = 0; i < tx->size; ++i) {\r
264                                 spi_write(spi, desc->chip_select, tx->data[i]);\r
265                         }\r
266                 }\r
267                 if (rx) {\r
268                         for (i = 0; i < rx->size; ++i) {\r
269                                 rx->data[i] = spi_read(spi, desc->chip_select);\r
270                         }\r
271                 }\r
272                 mutex_free(&desc->mutex);\r
273                 if (cb)\r
274                         cb(desc, user_args);\r
275                 break;\r
276         case SPID_MODE_DMA:\r
277                 if (tx) {\r
278                         if (tx->size < SPID_DMA_THRESHOLD) {\r
279                                 for (i = 0; i < tx->size; ++i) {\r
280                                         spi_write(spi, desc->chip_select, tx->data[i]);\r
281                                 }\r
282                                 if (!rx) {\r
283                                         if (cb)\r
284                                                 cb(desc, user_args);\r
285                                         mutex_free(&desc->mutex);\r
286                                 }\r
287                         } else {\r
288                                 desc->region_start = (uint32_t)tx->data;\r
289                                 desc->region_end = desc->region_start\r
290                                         + tx->size;\r
291                                 _spid_dma_write(desc, tx);\r
292                                 if (rx) {\r
293                                         spid_wait_transfert(desc);\r
294                                         mutex_lock(&desc->mutex);\r
295                                 }\r
296                         }\r
297                 }\r
298                 if (rx) {\r
299                         if (rx->size < SPID_DMA_THRESHOLD) {\r
300                                 for (i = 0; i < rx->size; ++i) {\r
301                                         rx->data[i] = spi_read(spi, desc->chip_select);\r
302                                 }\r
303                                 if (cb)\r
304                                         cb(desc, user_args);\r
305                                 mutex_free(&desc->mutex);\r
306                         } else {\r
307                                 desc->region_start = (uint32_t)rx->data;\r
308                                 desc->region_end = desc->region_start\r
309                                         + rx->size;\r
310                                 _spid_dma_read(desc, rx);\r
311                         }\r
312                 }\r
313                 break;\r
314 #ifdef CONFIG_HAVE_SPI_FIFO\r
315         case SPID_MODE_FIFO:\r
316                 if (tx) {\r
317                         spi_write_stream(spi, desc->chip_select,\r
318                                          tx->data, tx->size);\r
319                 }\r
320                 if (rx) {\r
321                         spi_read_stream(spi, desc->chip_select,\r
322                                         rx->data, rx->size);\r
323                 }\r
324                 mutex_free(&desc->mutex);\r
325                 if (cb)\r
326                         cb(desc, user_args);\r
327                 break;\r
328 #endif\r
329         default:\r
330                 trace_debug("Unkown mode");\r
331         }\r
332 \r
333         return SPID_SUCCESS;\r
334 }\r
335 \r
336 void spid_finish_transfert_callback(struct _spi_desc* desc, void* user_args)\r
337 {\r
338         (void)user_args;\r
339         spid_finish_transfert(desc);\r
340 }\r
341 \r
342 void spid_finish_transfert(struct _spi_desc* desc)\r
343 {\r
344         spi_release_cs(desc->addr);\r
345         mutex_free(&desc->mutex);\r
346 }\r
347 \r
348 void spid_close(const struct _spi_desc* desc)\r
349 {\r
350         uint32_t id = get_spi_id_from_addr(desc->addr);\r
351 #ifdef CONFIG_HAVE_SPI_FIFO\r
352         spi_fifo_disable(desc->addr);\r
353         spi_disable_it(desc->addr, SPI_IER_TXFPTEF | SPI_IER_RXFPTEF);\r
354         aic_disable(id);\r
355 #endif\r
356         spi_disable(desc->addr);\r
357         pmc_disable_peripheral(id);\r
358 }\r
359 \r
360 uint32_t spid_is_busy(const struct _spi_desc* desc)\r
361 {\r
362         return mutex_is_locked(&desc->mutex);\r
363 }\r
364 \r
365 void spid_wait_transfert(const struct _spi_desc* desc)\r
366 {\r
367         while (mutex_is_locked(&desc->mutex));\r
368 }\r