1 /***************************************************************************
2 * Copyright (C) 2016 by Matthias Welwarsky *
3 * matthias.welwarsky@sysgo.com *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
17 ***************************************************************************/
23 #include "armv8_cache.h"
24 #include "armv8_dpm.h"
25 #include "armv8_opcodes.h"
27 /* CLIDR cache types */
28 #define CACHE_LEVEL_HAS_UNIFIED_CACHE 0x4
29 #define CACHE_LEVEL_HAS_D_CACHE 0x2
30 #define CACHE_LEVEL_HAS_I_CACHE 0x1
32 static int armv8_d_cache_sanity_check(struct armv8_common *armv8)
34 struct armv8_cache_common *armv8_cache = &armv8->armv8_mmu.armv8_cache;
36 if (armv8_cache->d_u_cache_enabled)
39 return ERROR_TARGET_INVALID;
42 static int armv8_i_cache_sanity_check(struct armv8_common *armv8)
44 struct armv8_cache_common *armv8_cache = &armv8->armv8_mmu.armv8_cache;
46 if (armv8_cache->i_cache_enabled)
49 return ERROR_TARGET_INVALID;
52 static int armv8_cache_d_inner_flush_level(struct arm_dpm *dpm, struct armv8_cachesize *size, int cl)
54 int retval = ERROR_OK;
55 int32_t c_way, c_index = size->index;
57 LOG_DEBUG("cl %" PRId32, cl);
61 uint32_t value = (c_index << size->index_shift)
62 | (c_way << size->way_shift) | (cl << 1);
64 * DC CISW - Clean and invalidate data cache
67 retval = dpm->instr_write_data_r0(dpm,
68 ARMV8_SYS(SYSTEM_DCCISW, 0), value);
69 if (retval != ERROR_OK)
74 } while (c_index >= 0);
80 static int armv8_cache_d_inner_clean_inval_all(struct armv8_common *armv8)
82 struct armv8_cache_common *cache = &(armv8->armv8_mmu.armv8_cache);
83 struct arm_dpm *dpm = armv8->arm.dpm;
87 retval = armv8_d_cache_sanity_check(armv8);
88 if (retval != ERROR_OK)
91 retval = dpm->prepare(dpm);
92 if (retval != ERROR_OK)
95 for (cl = 0; cl < cache->loc; cl++) {
96 /* skip i-only caches */
97 if (cache->arch[cl].ctype < CACHE_LEVEL_HAS_D_CACHE)
100 armv8_cache_d_inner_flush_level(dpm, &cache->arch[cl].d_u_size, cl);
103 retval = dpm->finish(dpm);
107 LOG_ERROR("clean invalidate failed");
113 int armv8_cache_d_inner_flush_virt(struct armv8_common *armv8, target_addr_t va, size_t size)
115 struct arm_dpm *dpm = armv8->arm.dpm;
116 struct armv8_cache_common *armv8_cache = &armv8->armv8_mmu.armv8_cache;
117 uint64_t linelen = armv8_cache->dminline;
118 target_addr_t va_line, va_end;
121 retval = armv8_d_cache_sanity_check(armv8);
122 if (retval != ERROR_OK)
125 retval = dpm->prepare(dpm);
126 if (retval != ERROR_OK)
129 va_line = va & (-linelen);
132 while (va_line < va_end) {
134 /* Aarch32: DCCIMVAC: ARMV4_5_MCR(15, 0, 0, 7, 14, 1) */
135 retval = dpm->instr_write_data_r0_64(dpm,
136 ARMV8_SYS(SYSTEM_DCCIVAC, 0), va_line);
137 if (retval != ERROR_OK)
146 LOG_ERROR("d-cache invalidate failed");
152 int armv8_cache_i_inner_inval_virt(struct armv8_common *armv8, target_addr_t va, size_t size)
154 struct arm_dpm *dpm = armv8->arm.dpm;
155 struct armv8_cache_common *armv8_cache = &armv8->armv8_mmu.armv8_cache;
156 uint64_t linelen = armv8_cache->iminline;
157 target_addr_t va_line, va_end;
160 retval = armv8_i_cache_sanity_check(armv8);
161 if (retval != ERROR_OK)
164 retval = dpm->prepare(dpm);
165 if (retval != ERROR_OK)
168 va_line = va & (-linelen);
171 while (va_line < va_end) {
172 /* IC IVAU - Invalidate instruction cache by VA to PoU. */
173 retval = dpm->instr_write_data_r0_64(dpm,
174 ARMV8_SYS(SYSTEM_ICIVAU, 0), va_line);
175 if (retval != ERROR_OK)
184 LOG_ERROR("d-cache invalidate failed");
190 static int armv8_handle_inner_cache_info_command(struct command_context *cmd_ctx,
191 struct armv8_cache_common *armv8_cache)
195 if (armv8_cache->info == -1) {
196 command_print(cmd_ctx, "cache not yet identified");
200 for (cl = 0; cl < armv8_cache->loc; cl++) {
201 struct armv8_arch_cache *arch = &(armv8_cache->arch[cl]);
203 if (arch->ctype & 1) {
204 command_print(cmd_ctx,
205 "L%d I-Cache: linelen %" PRIi32
206 ", associativity %" PRIi32
208 ", cachesize %" PRId32 " KBytes",
210 arch->i_size.linelen,
211 arch->i_size.associativity,
213 arch->i_size.cachesize);
216 if (arch->ctype >= 2) {
217 command_print(cmd_ctx,
218 "L%d D-Cache: linelen %" PRIi32
219 ", associativity %" PRIi32
221 ", cachesize %" PRId32 " KBytes",
223 arch->d_u_size.linelen,
224 arch->d_u_size.associativity,
225 arch->d_u_size.nsets,
226 arch->d_u_size.cachesize);
233 static int _armv8_flush_all_data(struct target *target)
235 return armv8_cache_d_inner_clean_inval_all(target_to_armv8(target));
238 static int armv8_flush_all_data(struct target *target)
240 int retval = ERROR_FAIL;
241 /* check that armv8_cache is correctly identify */
242 struct armv8_common *armv8 = target_to_armv8(target);
243 if (armv8->armv8_mmu.armv8_cache.info == -1) {
244 LOG_ERROR("trying to flush un-identified cache");
249 /* look if all the other target have been flushed in order to flush level
251 struct target_list *head;
254 while (head != (struct target_list *)NULL) {
256 if (curr->state == TARGET_HALTED) {
257 LOG_INFO("Wait flushing data l1 on core %" PRId32, curr->coreid);
258 retval = _armv8_flush_all_data(curr);
263 retval = _armv8_flush_all_data(target);
267 static int get_cache_info(struct arm_dpm *dpm, int cl, int ct, uint32_t *cache_reg)
269 struct armv8_common *armv8 = dpm->arm->arch_info;
270 int retval = ERROR_OK;
272 /* select cache level */
273 retval = dpm->instr_write_data_r0(dpm,
274 armv8_opcode(armv8, WRITE_REG_CSSELR),
275 (cl << 1) | (ct == 1 ? 1 : 0));
276 if (retval != ERROR_OK)
279 retval = dpm->instr_read_data_r0(dpm,
280 armv8_opcode(armv8, READ_REG_CCSIDR),
286 static struct armv8_cachesize decode_cache_reg(uint32_t cache_reg)
288 struct armv8_cachesize size;
291 size.linelen = 16 << (cache_reg & 0x7);
292 size.associativity = ((cache_reg >> 3) & 0x3ff) + 1;
293 size.nsets = ((cache_reg >> 13) & 0x7fff) + 1;
294 size.cachesize = size.linelen * size.associativity * size.nsets / 1024;
296 /* compute info for set way operation on cache */
297 size.index_shift = (cache_reg & 0x7) + 4;
298 size.index = (cache_reg >> 13) & 0x7fff;
299 size.way = ((cache_reg >> 3) & 0x3ff);
301 while (((size.way << i) & 0x80000000) == 0)
308 int armv8_identify_cache(struct armv8_common *armv8)
310 /* read cache descriptor */
311 int retval = ERROR_FAIL;
312 struct arm_dpm *dpm = armv8->arm.dpm;
313 uint32_t csselr, clidr, ctr;
316 struct armv8_cache_common *cache = &(armv8->armv8_mmu.armv8_cache);
318 retval = dpm->prepare(dpm);
319 if (retval != ERROR_OK)
323 retval = dpm->instr_read_data_r0(dpm,
324 armv8_opcode(armv8, READ_REG_CTR), &ctr);
325 if (retval != ERROR_OK)
328 cache->iminline = 4UL << (ctr & 0xf);
329 cache->dminline = 4UL << ((ctr & 0xf0000) >> 16);
330 LOG_DEBUG("ctr %" PRIx32 " ctr.iminline %" PRId32 " ctr.dminline %" PRId32,
331 ctr, cache->iminline, cache->dminline);
334 retval = dpm->instr_read_data_r0(dpm,
335 armv8_opcode(armv8, READ_REG_CLIDR), &clidr);
336 if (retval != ERROR_OK)
339 cache->loc = (clidr & 0x7000000) >> 24;
340 LOG_DEBUG("Number of cache levels to PoC %" PRId32, cache->loc);
342 /* retrieve selected cache for later restore
343 * MRC p15, 2,<Rd>, c0, c0, 0; Read CSSELR */
344 retval = dpm->instr_read_data_r0(dpm,
345 armv8_opcode(armv8, READ_REG_CSSELR), &csselr);
346 if (retval != ERROR_OK)
349 /* retrieve all available inner caches */
350 for (cl = 0; cl < cache->loc; clidr >>= 3, cl++) {
352 /* isolate cache type at current level */
355 /* skip reserved values */
356 if (ctype > CACHE_LEVEL_HAS_UNIFIED_CACHE)
359 /* separate d or unified d/i cache at this level ? */
360 if (ctype & (CACHE_LEVEL_HAS_UNIFIED_CACHE | CACHE_LEVEL_HAS_D_CACHE)) {
361 /* retrieve d-cache info */
362 retval = get_cache_info(dpm, cl, 0, &cache_reg);
363 if (retval != ERROR_OK)
365 cache->arch[cl].d_u_size = decode_cache_reg(cache_reg);
367 LOG_DEBUG("data/unified cache index %d << %d, way %d << %d",
368 cache->arch[cl].d_u_size.index,
369 cache->arch[cl].d_u_size.index_shift,
370 cache->arch[cl].d_u_size.way,
371 cache->arch[cl].d_u_size.way_shift);
373 LOG_DEBUG("cacheline %d bytes %d KBytes asso %d ways",
374 cache->arch[cl].d_u_size.linelen,
375 cache->arch[cl].d_u_size.cachesize,
376 cache->arch[cl].d_u_size.associativity);
379 /* separate i-cache at this level ? */
380 if (ctype & CACHE_LEVEL_HAS_I_CACHE) {
381 /* retrieve i-cache info */
382 retval = get_cache_info(dpm, cl, 1, &cache_reg);
383 if (retval != ERROR_OK)
385 cache->arch[cl].i_size = decode_cache_reg(cache_reg);
387 LOG_DEBUG("instruction cache index %d << %d, way %d << %d",
388 cache->arch[cl].i_size.index,
389 cache->arch[cl].i_size.index_shift,
390 cache->arch[cl].i_size.way,
391 cache->arch[cl].i_size.way_shift);
393 LOG_DEBUG("cacheline %d bytes %d KBytes asso %d ways",
394 cache->arch[cl].i_size.linelen,
395 cache->arch[cl].i_size.cachesize,
396 cache->arch[cl].i_size.associativity);
399 cache->arch[cl].ctype = ctype;
402 /* restore selected cache */
403 dpm->instr_write_data_r0(dpm,
404 armv8_opcode(armv8, WRITE_REG_CSSELR), csselr);
405 if (retval != ERROR_OK)
408 armv8->armv8_mmu.armv8_cache.info = 1;
410 /* if no l2 cache initialize l1 data cache flush function function */
411 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache == NULL) {
412 armv8->armv8_mmu.armv8_cache.display_cache_info =
413 armv8_handle_inner_cache_info_command;
414 armv8->armv8_mmu.armv8_cache.flush_all_data_cache =
415 armv8_flush_all_data;