]> git.sur5r.net Git - u-boot/blobdiff - common/hash.c
ddr: altera: Add ECC DRAM scrubbing support for Arria10
[u-boot] / common / hash.c
index dcf016d8a78119413cb8c0ef1bbf556b84b88d1e..ef146513a076f4b8d9da4adcf231361404350450 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Copyright (c) 2012 The Chromium OS Authors.
  *
@@ -6,8 +7,6 @@
  *
  * (C) Copyright 2000
  * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
- *
- * SPDX-License-Identifier:    GPL-2.0+
  */
 
 #ifndef USE_HOSTCC
@@ -30,7 +29,7 @@
 #include <u-boot/sha256.h>
 #include <u-boot/md5.h>
 
-#ifdef CONFIG_SHA1
+#if defined(CONFIG_SHA1) && !defined(CONFIG_SHA_PROG_HW_ACCEL)
 static int hash_init_sha1(struct hash_algo *algo, void **ctxp)
 {
        sha1_context *ctx = malloc(sizeof(sha1_context));
@@ -58,7 +57,7 @@ static int hash_finish_sha1(struct hash_algo *algo, void *ctx, void *dest_buf,
 }
 #endif
 
-#ifdef CONFIG_SHA256
+#if defined(CONFIG_SHA256) && !defined(CONFIG_SHA_PROG_HW_ACCEL)
 static int hash_init_sha256(struct hash_algo *algo, void **ctxp)
 {
        sha256_context *ctx = malloc(sizeof(sha256_context));
@@ -113,68 +112,61 @@ static int hash_finish_crc32(struct hash_algo *algo, void *ctx, void *dest_buf,
 }
 
 /*
- * These are the hash algorithms we support. Chips which support accelerated
- * crypto could perhaps add named version of these algorithms here. Note that
- * algorithm names must be in lower case.
+ * These are the hash algorithms we support.  If we have hardware acceleration
+ * is enable we will use that, otherwise a software version of the algorithm.
+ * Note that algorithm names must be in lower case.
  */
 static struct hash_algo hash_algo[] = {
-       /*
-        * CONFIG_SHA_HW_ACCEL is defined if hardware acceleration is
-        * available.
-        */
-#ifdef CONFIG_SHA_HW_ACCEL
+#ifdef CONFIG_SHA1
        {
-               "sha1",
-               SHA1_SUM_LEN,
-               hw_sha1,
-               CHUNKSZ_SHA1,
-#ifdef CONFIG_SHA_PROG_HW_ACCEL
-               hw_sha_init,
-               hw_sha_update,
-               hw_sha_finish,
+               .name           = "sha1",
+               .digest_size    = SHA1_SUM_LEN,
+               .chunk_size     = CHUNKSZ_SHA1,
+#ifdef CONFIG_SHA_HW_ACCEL
+               .hash_func_ws   = hw_sha1,
+#else
+               .hash_func_ws   = sha1_csum_wd,
 #endif
-       }, {
-               "sha256",
-               SHA256_SUM_LEN,
-               hw_sha256,
-               CHUNKSZ_SHA256,
 #ifdef CONFIG_SHA_PROG_HW_ACCEL
-               hw_sha_init,
-               hw_sha_update,
-               hw_sha_finish,
-#endif
-       },
+               .hash_init      = hw_sha_init,
+               .hash_update    = hw_sha_update,
+               .hash_finish    = hw_sha_finish,
+#else
+               .hash_init      = hash_init_sha1,
+               .hash_update    = hash_update_sha1,
+               .hash_finish    = hash_finish_sha1,
 #endif
-#ifdef CONFIG_SHA1
-       {
-               "sha1",
-               SHA1_SUM_LEN,
-               sha1_csum_wd,
-               CHUNKSZ_SHA1,
-               hash_init_sha1,
-               hash_update_sha1,
-               hash_finish_sha1,
        },
 #endif
 #ifdef CONFIG_SHA256
        {
-               "sha256",
-               SHA256_SUM_LEN,
-               sha256_csum_wd,
-               CHUNKSZ_SHA256,
-               hash_init_sha256,
-               hash_update_sha256,
-               hash_finish_sha256,
+               .name           = "sha256",
+               .digest_size    = SHA256_SUM_LEN,
+               .chunk_size     = CHUNKSZ_SHA256,
+#ifdef CONFIG_SHA_HW_ACCEL
+               .hash_func_ws   = hw_sha256,
+#else
+               .hash_func_ws   = sha256_csum_wd,
+#endif
+#ifdef CONFIG_SHA_PROG_HW_ACCEL
+               .hash_init      = hw_sha_init,
+               .hash_update    = hw_sha_update,
+               .hash_finish    = hw_sha_finish,
+#else
+               .hash_init      = hash_init_sha256,
+               .hash_update    = hash_update_sha256,
+               .hash_finish    = hash_finish_sha256,
+#endif
        },
 #endif
        {
-               "crc32",
-               4,
-               crc32_wd_buf,
-               CHUNKSZ_CRC32,
-               hash_init_crc32,
-               hash_update_crc32,
-               hash_finish_crc32,
+               .name           = "crc32",
+               .digest_size    = 4,
+               .chunk_size     = CHUNKSZ_CRC32,
+               .hash_func_ws   = crc32_wd_buf,
+               .hash_init      = hash_init_crc32,
+               .hash_update    = hash_update_crc32,
+               .hash_finish    = hash_finish_crc32,
        },
 };
 
@@ -397,7 +389,7 @@ int hash_command(const char *algo_name, int flags, cmd_tbl_t *cmdtp, int flag,
 
        if (multi_hash()) {
                struct hash_algo *algo;
-               uint8_t output[HASH_MAX_DIGEST_SIZE];
+               u8 *output;
                uint8_t vsum[HASH_MAX_DIGEST_SIZE];
                void *buf;
 
@@ -412,6 +404,9 @@ int hash_command(const char *algo_name, int flags, cmd_tbl_t *cmdtp, int flag,
                        return 1;
                }
 
+               output = memalign(ARCH_DMA_MINALIGN,
+                                 sizeof(uint32_t) * HASH_MAX_DIGEST_SIZE);
+
                buf = map_sysmem(addr, len);
                algo->hash_func_ws(buf, len, output, algo->chunk_size);
                unmap_sysmem(buf);
@@ -447,6 +442,8 @@ int hash_command(const char *algo_name, int flags, cmd_tbl_t *cmdtp, int flag,
                                store_result(algo, output, *argv,
                                        flags & HASH_FLAG_ENV);
                        }
+               unmap_sysmem(output);
+
                }
 
        /* Horrible code size hack for boards that just want crc32 */