1101 lines
29 KiB
Diff
1101 lines
29 KiB
Diff
From ba1e344b74257a65d21b7b82f99df31dcf3f5a7c Mon Sep 17 00:00:00 2001
|
|
From: Lionel VITTE <lionel.vitte@st.com>
|
|
Date: Fri, 8 Nov 2019 16:52:38 +0100
|
|
Subject: [PATCH 03/31] ARM stm32mp1 r3 CRYPTO
|
|
|
|
---
|
|
crypto/testmgr.c | 7 +
|
|
drivers/crypto/stm32/Kconfig | 1 +
|
|
drivers/crypto/stm32/Makefile | 2 +-
|
|
drivers/crypto/stm32/stm32-crc32.c | 441 +++++++++++++++++++++++++++++++++++++
|
|
drivers/crypto/stm32/stm32-cryp.c | 72 ++++--
|
|
drivers/crypto/stm32/stm32-hash.c | 6 +-
|
|
drivers/crypto/stm32/stm32_crc32.c | 387 --------------------------------
|
|
7 files changed, 502 insertions(+), 414 deletions(-)
|
|
create mode 100644 drivers/crypto/stm32/stm32-crc32.c
|
|
delete mode 100644 drivers/crypto/stm32/stm32_crc32.c
|
|
|
|
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
|
|
index 13cb2ea..35a4ac5 100644
|
|
--- a/crypto/testmgr.c
|
|
+++ b/crypto/testmgr.c
|
|
@@ -1918,6 +1918,13 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
|
|
shash->tfm = tfm;
|
|
shash->flags = 0;
|
|
|
|
+ err = crypto_shash_init(shash);
|
|
+ if (err) {
|
|
+ printk(KERN_ERR "alg: crc32c: init failed for "
|
|
+ "%s: %d\n", driver, err);
|
|
+ break;
|
|
+ }
|
|
+
|
|
*ctx = le32_to_cpu(420553207);
|
|
err = crypto_shash_final(shash, (u8 *)&val);
|
|
if (err) {
|
|
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
|
|
index 63aa78c..4491e21 100644
|
|
--- a/drivers/crypto/stm32/Kconfig
|
|
+++ b/drivers/crypto/stm32/Kconfig
|
|
@@ -24,6 +24,7 @@ config CRYPTO_DEV_STM32_CRYP
|
|
depends on ARCH_STM32
|
|
select CRYPTO_HASH
|
|
select CRYPTO_ENGINE
|
|
+ select CRYPTO_DES
|
|
help
|
|
This enables support for the CRYP (AES/DES/TDES) hw accelerator which
|
|
can be found on STMicroelectronics STM32 SOC.
|
|
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
|
|
index 53d1bb9..23ce3bc 100644
|
|
--- a/drivers/crypto/stm32/Makefile
|
|
+++ b/drivers/crypto/stm32/Makefile
|
|
@@ -1,3 +1,3 @@
|
|
-obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32_crc32.o
|
|
+obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32-crc32.o
|
|
obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o
|
|
obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
|
|
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
|
|
new file mode 100644
|
|
index 0000000..2597710
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/stm32/stm32-crc32.c
|
|
@@ -0,0 +1,441 @@
|
|
+/*
|
|
+ * Copyright (C) STMicroelectronics SA 2017
|
|
+ * Author: Fabien Dessenne <fabien.dessenne@st.com>
|
|
+ * License terms: GNU General Public License (GPL), version 2
|
|
+ */
|
|
+
|
|
+#include <linux/bitrev.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/crc32poly.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/mod_devicetable.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/pm_runtime.h>
|
|
+
|
|
+#include <crypto/internal/hash.h>
|
|
+
|
|
+#include <asm/unaligned.h>
|
|
+
|
|
+#define DRIVER_NAME "stm32-crc32"
|
|
+#define CHKSUM_DIGEST_SIZE 4
|
|
+#define CHKSUM_BLOCK_SIZE 1
|
|
+
|
|
+/* Registers */
|
|
+#define CRC_DR 0x00000000
|
|
+#define CRC_CR 0x00000008
|
|
+#define CRC_INIT 0x00000010
|
|
+#define CRC_POL 0x00000014
|
|
+
|
|
+/* Registers values */
|
|
+#define CRC_CR_RESET BIT(0)
|
|
+#define CRC_CR_REV_IN_W (BIT(6) | BIT(5))
|
|
+#define CRC_CR_REV_IN_B BIT(5)
|
|
+#define CRC_CR_REV_OUT BIT(7)
|
|
+#define CRC32C_INIT_DEFAULT 0xFFFFFFFF
|
|
+
|
|
+#define CRC_AUTOSUSPEND_DELAY 50
|
|
+
|
|
+struct stm32_crc {
|
|
+ struct list_head list;
|
|
+ struct device *dev;
|
|
+ void __iomem *regs;
|
|
+ struct clk *clk;
|
|
+ u8 pending_data[sizeof(u32)];
|
|
+ size_t nb_pending_bytes;
|
|
+};
|
|
+
|
|
+struct stm32_crc_list {
|
|
+ struct list_head dev_list;
|
|
+ spinlock_t lock; /* protect dev_list */
|
|
+};
|
|
+
|
|
+static struct stm32_crc_list crc_list = {
|
|
+ .dev_list = LIST_HEAD_INIT(crc_list.dev_list),
|
|
+ .lock = __SPIN_LOCK_UNLOCKED(crc_list.lock),
|
|
+};
|
|
+
|
|
+struct stm32_crc_ctx {
|
|
+ u32 key;
|
|
+ u32 poly;
|
|
+};
|
|
+
|
|
+struct stm32_crc_desc_ctx {
|
|
+ u32 partial; /* crc32c: partial in first 4 bytes of that struct */
|
|
+ struct stm32_crc *crc;
|
|
+};
|
|
+
|
|
+struct stm32_crc_algs_info {
|
|
+ struct shash_alg algs[2];
|
|
+ unsigned int registered;
|
|
+};
|
|
+
|
|
+static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
|
|
+{
|
|
+ struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
|
|
+
|
|
+ mctx->key = 0;
|
|
+ mctx->poly = CRC32_POLY_LE;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
|
|
+{
|
|
+ struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
|
|
+
|
|
+ mctx->key = CRC32C_INIT_DEFAULT;
|
|
+ mctx->poly = CRC32C_POLY_LE;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
|
|
+ unsigned int keylen)
|
|
+{
|
|
+ struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
|
|
+
|
|
+ if (keylen != sizeof(u32)) {
|
|
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ mctx->key = get_unaligned_le32(key);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct stm32_crc *stm32_crc_find_dev(struct stm32_crc_desc_ctx *ctx)
|
|
+{
|
|
+ struct stm32_crc *crc;
|
|
+
|
|
+ spin_lock_bh(&crc_list.lock);
|
|
+ crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list);
|
|
+ list_move_tail(&crc->list, &crc_list.dev_list);
|
|
+ ctx->crc = crc;
|
|
+ spin_unlock_bh(&crc_list.lock);
|
|
+
|
|
+ return crc;
|
|
+}
|
|
+
|
|
+static int stm32_crc_init(struct shash_desc *desc)
|
|
+{
|
|
+ struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
|
|
+ struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
|
|
+
|
|
+ if (!stm32_crc_find_dev(ctx))
|
|
+ return -ENODEV;
|
|
+
|
|
+ pm_runtime_get_sync(ctx->crc->dev);
|
|
+
|
|
+ /* Reset, set key, poly and configure in bit reverse mode */
|
|
+ writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
|
|
+ writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
|
|
+ writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_W | CRC_CR_REV_OUT,
|
|
+ ctx->crc->regs + CRC_CR);
|
|
+
|
|
+ /* Store partial result */
|
|
+ ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
|
|
+ ctx->crc->nb_pending_bytes = 0;
|
|
+
|
|
+ pm_runtime_mark_last_busy(ctx->crc->dev);
|
|
+ pm_runtime_put_autosuspend(ctx->crc->dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
|
|
+ unsigned int length)
|
|
+{
|
|
+ struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
|
|
+ struct stm32_crc *crc = ctx->crc;
|
|
+ u32 *d32;
|
|
+ unsigned int i;
|
|
+
|
|
+ pm_runtime_get_sync(crc->dev);
|
|
+
|
|
+ if (unlikely(crc->nb_pending_bytes)) {
|
|
+ while (crc->nb_pending_bytes != sizeof(u32) && length) {
|
|
+ /* Fill in pending data */
|
|
+ crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
|
|
+ length--;
|
|
+ }
|
|
+
|
|
+ if (crc->nb_pending_bytes == sizeof(u32)) {
|
|
+ /* Process completed pending data */
|
|
+ writel_relaxed(*(u32 *)crc->pending_data,
|
|
+ crc->regs + CRC_DR);
|
|
+ crc->nb_pending_bytes = 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ d32 = (u32 *)d8;
|
|
+ for (i = 0; i < length >> 2; i++)
|
|
+ /* Process 32 bits data */
|
|
+ writel_relaxed(*(d32++), crc->regs + CRC_DR);
|
|
+
|
|
+ /* Store partial result */
|
|
+ ctx->partial = readl_relaxed(crc->regs + CRC_DR);
|
|
+
|
|
+ pm_runtime_mark_last_busy(crc->dev);
|
|
+ pm_runtime_put_autosuspend(crc->dev);
|
|
+
|
|
+ /* Check for pending data (non 32 bits) */
|
|
+ length &= 3;
|
|
+ if (likely(!length))
|
|
+ return 0;
|
|
+
|
|
+ if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
|
|
+ /* Shall not happen */
|
|
+ dev_err(crc->dev, "Pending data overflow\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ d8 = (const u8 *)d32;
|
|
+ for (i = 0; i < length; i++)
|
|
+ /* Store pending data */
|
|
+ crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int stm32_crc_final(struct shash_desc *desc, u8 *out)
|
|
+{
|
|
+ struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
|
|
+ struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
|
|
+ struct stm32_crc *crc = ctx->crc;
|
|
+ unsigned int i = 0;
|
|
+
|
|
+ if (unlikely(crc->nb_pending_bytes)) {
|
|
+ pm_runtime_get_sync(crc->dev);
|
|
+ /* Process pending data */
|
|
+ writel_relaxed(CRC_CR_REV_IN_B | CRC_CR_REV_OUT,
|
|
+ ctx->crc->regs + CRC_CR);
|
|
+ while (i != crc->nb_pending_bytes) {
|
|
+ writeb_relaxed(crc->pending_data[i++],
|
|
+ crc->regs + CRC_DR);
|
|
+ }
|
|
+
|
|
+ crc->nb_pending_bytes = 0;
|
|
+ ctx->partial = readl_relaxed(crc->regs + CRC_DR);
|
|
+
|
|
+ pm_runtime_mark_last_busy(crc->dev);
|
|
+ pm_runtime_put_autosuspend(crc->dev);
|
|
+ }
|
|
+
|
|
+ /* Send computed CRC */
|
|
+ put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ?
|
|
+ ~ctx->partial : ctx->partial, out);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int stm32_crc_finup(struct shash_desc *desc, const u8 *data,
|
|
+ unsigned int length, u8 *out)
|
|
+{
|
|
+ return stm32_crc_update(desc, data, length) ?:
|
|
+ stm32_crc_final(desc, out);
|
|
+}
|
|
+
|
|
+static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
|
|
+ unsigned int length, u8 *out)
|
|
+{
|
|
+ return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
|
|
+}
|
|
+
|
|
+static struct stm32_crc_algs_info crc_algs[] = {
|
|
+ {
|
|
+ .algs = {
|
|
+ /* CRC-32 */
|
|
+ {
|
|
+ .setkey = stm32_crc_setkey,
|
|
+ .init = stm32_crc_init,
|
|
+ .update = stm32_crc_update,
|
|
+ .final = stm32_crc_final,
|
|
+ .finup = stm32_crc_finup,
|
|
+ .digest = stm32_crc_digest,
|
|
+ .descsize =
|
|
+ sizeof(struct stm32_crc_desc_ctx),
|
|
+ .digestsize = CHKSUM_DIGEST_SIZE,
|
|
+ .base = {
|
|
+ .cra_name = "crc32",
|
|
+ .cra_driver_name = DRIVER_NAME,
|
|
+ .cra_priority = 200,
|
|
+ .cra_flags =
|
|
+ CRYPTO_ALG_OPTIONAL_KEY,
|
|
+ .cra_blocksize =
|
|
+ CHKSUM_BLOCK_SIZE,
|
|
+ .cra_alignmask = 3,
|
|
+ .cra_ctxsize =
|
|
+ sizeof(struct stm32_crc_ctx),
|
|
+ .cra_module = THIS_MODULE,
|
|
+ .cra_init =
|
|
+ stm32_crc32_cra_init,
|
|
+ }
|
|
+ },
|
|
+ /* CRC-32Castagnoli */
|
|
+ {
|
|
+ .setkey = stm32_crc_setkey,
|
|
+ .init = stm32_crc_init,
|
|
+ .update = stm32_crc_update,
|
|
+ .final = stm32_crc_final,
|
|
+ .finup = stm32_crc_finup,
|
|
+ .digest = stm32_crc_digest,
|
|
+ .descsize =
|
|
+ sizeof(struct stm32_crc_desc_ctx),
|
|
+ .digestsize = CHKSUM_DIGEST_SIZE,
|
|
+ .base = {
|
|
+ .cra_name = "crc32c",
|
|
+ .cra_driver_name = DRIVER_NAME,
|
|
+ .cra_priority = 200,
|
|
+ .cra_flags =
|
|
+ CRYPTO_ALG_OPTIONAL_KEY,
|
|
+ .cra_blocksize =
|
|
+ CHKSUM_BLOCK_SIZE,
|
|
+ .cra_alignmask = 3,
|
|
+ .cra_ctxsize =
|
|
+ sizeof(struct stm32_crc_ctx),
|
|
+ .cra_module = THIS_MODULE,
|
|
+ .cra_init =
|
|
+ stm32_crc32c_cra_init,
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+};
|
|
+
|
|
+static int stm32_crc_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct device *dev = &pdev->dev;
|
|
+ struct stm32_crc *crc;
|
|
+ struct resource *res;
|
|
+ int ret;
|
|
+
|
|
+ crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
|
|
+ if (!crc)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ crc->dev = dev;
|
|
+
|
|
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
+ crc->regs = devm_ioremap_resource(dev, res);
|
|
+ if (IS_ERR(crc->regs)) {
|
|
+ dev_err(dev, "Cannot map CRC IO\n");
|
|
+ return PTR_ERR(crc->regs);
|
|
+ }
|
|
+
|
|
+ crc->clk = devm_clk_get(dev, NULL);
|
|
+ if (IS_ERR(crc->clk)) {
|
|
+ dev_err(dev, "Could not get clock\n");
|
|
+ return PTR_ERR(crc->clk);
|
|
+ }
|
|
+
|
|
+ ret = clk_prepare_enable(crc->clk);
|
|
+ if (ret) {
|
|
+ dev_err(crc->dev, "Failed to enable clock\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY);
|
|
+ pm_runtime_use_autosuspend(dev);
|
|
+
|
|
+ pm_runtime_get_noresume(dev);
|
|
+ pm_runtime_set_active(dev);
|
|
+ pm_runtime_enable(dev);
|
|
+
|
|
+ platform_set_drvdata(pdev, crc);
|
|
+
|
|
+ spin_lock(&crc_list.lock);
|
|
+ list_add(&crc->list, &crc_list.dev_list);
|
|
+ spin_unlock(&crc_list.lock);
|
|
+
|
|
+ if (!crc_algs->registered) {
|
|
+ ret = crypto_register_shashes(crc_algs->algs,
|
|
+ ARRAY_SIZE(crc_algs->algs));
|
|
+
|
|
+ if (ret) {
|
|
+ dev_err(dev, "Failed to register\n");
|
|
+ clk_disable_unprepare(crc->clk);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ crc_algs->registered++;
|
|
+ dev_info(dev, "Initialized\n");
|
|
+ pm_runtime_put_sync(dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int stm32_crc_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct stm32_crc *crc = platform_get_drvdata(pdev);
|
|
+ int ret = pm_runtime_get_sync(crc->dev);
|
|
+
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ spin_lock(&crc_list.lock);
|
|
+ list_del(&crc->list);
|
|
+ spin_unlock(&crc_list.lock);
|
|
+
|
|
+ if (!--crc_algs->registered)
|
|
+ crypto_unregister_shashes(crc_algs->algs,
|
|
+ ARRAY_SIZE(crc_algs->algs));
|
|
+
|
|
+ pm_runtime_disable(crc->dev);
|
|
+ pm_runtime_put_noidle(crc->dev);
|
|
+ clk_disable_unprepare(crc->clk);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PM
|
|
+static int stm32_crc_runtime_suspend(struct device *dev)
|
|
+{
|
|
+ struct stm32_crc *crc = dev_get_drvdata(dev);
|
|
+
|
|
+ clk_disable_unprepare(crc->clk);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int stm32_crc_runtime_resume(struct device *dev)
|
|
+{
|
|
+ struct stm32_crc *crc = dev_get_drvdata(dev);
|
|
+ int ret;
|
|
+
|
|
+ ret = clk_prepare_enable(crc->clk);
|
|
+ if (ret) {
|
|
+ dev_err(crc->dev, "Failed to prepare_enable clock\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static const struct dev_pm_ops stm32_crc_pm_ops = {
|
|
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
|
+ pm_runtime_force_resume)
|
|
+ SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
|
|
+ stm32_crc_runtime_resume, NULL)
|
|
+};
|
|
+
|
|
+static const struct of_device_id stm32_dt_ids[] = {
|
|
+ { .compatible = "st,stm32f7-crc", },
|
|
+ {},
|
|
+};
|
|
+MODULE_DEVICE_TABLE(of, stm32_dt_ids);
|
|
+
|
|
+static struct platform_driver stm32_crc_driver = {
|
|
+ .probe = stm32_crc_probe,
|
|
+ .remove = stm32_crc_remove,
|
|
+ .driver = {
|
|
+ .name = DRIVER_NAME,
|
|
+ .pm = &stm32_crc_pm_ops,
|
|
+ .of_match_table = stm32_dt_ids,
|
|
+ },
|
|
+};
|
|
+
|
|
+module_platform_driver(stm32_crc_driver);
|
|
+
|
|
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
|
|
+MODULE_DESCRIPTION("STMicrolectronics STM32 CRC32 hardware driver");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
|
|
index 23b0b7b..cd8c439 100644
|
|
--- a/drivers/crypto/stm32/stm32-cryp.c
|
|
+++ b/drivers/crypto/stm32/stm32-cryp.c
|
|
@@ -137,7 +137,6 @@ struct stm32_cryp {
|
|
|
|
struct crypto_engine *engine;
|
|
|
|
- struct mutex lock; /* protects req / areq */
|
|
struct ablkcipher_request *req;
|
|
struct aead_request *areq;
|
|
|
|
@@ -394,6 +393,23 @@ static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
|
|
}
|
|
}
|
|
|
|
+static void stm32_cryp_get_iv(struct stm32_cryp *cryp)
|
|
+{
|
|
+ struct ablkcipher_request *req = cryp->req;
|
|
+ u32 *tmp = req->info;
|
|
+
|
|
+ if (!tmp)
|
|
+ return;
|
|
+
|
|
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0LR));
|
|
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0RR));
|
|
+
|
|
+ if (is_aes(cryp)) {
|
|
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1LR));
|
|
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1RR));
|
|
+ }
|
|
+}
|
|
+
|
|
static void stm32_cryp_hw_write_key(struct stm32_cryp *c)
|
|
{
|
|
unsigned int i;
|
|
@@ -623,6 +639,9 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
|
|
/* Phase 4 : output tag */
|
|
err = stm32_cryp_read_auth_tag(cryp);
|
|
|
|
+ if (!err && (!(is_gcm(cryp) || is_ccm(cryp))))
|
|
+ stm32_cryp_get_iv(cryp);
|
|
+
|
|
if (cryp->sgs_copied) {
|
|
void *buf_in, *buf_out;
|
|
int pages, len;
|
|
@@ -645,18 +664,13 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
|
|
pm_runtime_mark_last_busy(cryp->dev);
|
|
pm_runtime_put_autosuspend(cryp->dev);
|
|
|
|
- if (is_gcm(cryp) || is_ccm(cryp)) {
|
|
+ if (is_gcm(cryp) || is_ccm(cryp))
|
|
crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
|
|
- cryp->areq = NULL;
|
|
- } else {
|
|
+ else
|
|
crypto_finalize_ablkcipher_request(cryp->engine, cryp->req,
|
|
err);
|
|
- cryp->req = NULL;
|
|
- }
|
|
|
|
memset(cryp->ctx->key, 0, cryp->ctx->keylen);
|
|
-
|
|
- mutex_unlock(&cryp->lock);
|
|
}
|
|
|
|
static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
|
|
@@ -753,19 +767,37 @@ static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
|
static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
|
unsigned int keylen)
|
|
{
|
|
+ u32 tmp[DES_EXPKEY_WORDS];
|
|
+
|
|
if (keylen != DES_KEY_SIZE)
|
|
return -EINVAL;
|
|
- else
|
|
- return stm32_cryp_setkey(tfm, key, keylen);
|
|
+
|
|
+ if ((crypto_ablkcipher_get_flags(tfm) &
|
|
+ CRYPTO_TFM_REQ_WEAK_KEY) &&
|
|
+ unlikely(!des_ekey(tmp, key))) {
|
|
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return stm32_cryp_setkey(tfm, key, keylen);
|
|
}
|
|
|
|
static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
|
unsigned int keylen)
|
|
{
|
|
+ u32 tmp[DES_EXPKEY_WORDS];
|
|
+
|
|
if (keylen != (3 * DES_KEY_SIZE))
|
|
return -EINVAL;
|
|
- else
|
|
- return stm32_cryp_setkey(tfm, key, keylen);
|
|
+
|
|
+ if ((crypto_ablkcipher_get_flags(tfm) &
|
|
+ CRYPTO_TFM_REQ_WEAK_KEY) &&
|
|
+ unlikely(!des_ekey(tmp, key))) {
|
|
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return stm32_cryp_setkey(tfm, key, keylen);
|
|
}
|
|
|
|
static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
|
|
@@ -917,8 +949,6 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
|
|
if (!cryp)
|
|
return -ENODEV;
|
|
|
|
- mutex_lock(&cryp->lock);
|
|
-
|
|
rctx = req ? ablkcipher_request_ctx(req) : aead_request_ctx(areq);
|
|
rctx->mode &= FLG_MODE_MASK;
|
|
|
|
@@ -930,6 +960,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
|
|
|
|
if (req) {
|
|
cryp->req = req;
|
|
+ cryp->areq = NULL;
|
|
cryp->total_in = req->nbytes;
|
|
cryp->total_out = cryp->total_in;
|
|
} else {
|
|
@@ -955,6 +986,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
|
|
* <---------- total_out ----------------->
|
|
*/
|
|
cryp->areq = areq;
|
|
+ cryp->req = NULL;
|
|
cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
|
|
cryp->total_in = areq->assoclen + areq->cryptlen;
|
|
if (is_encrypt(cryp))
|
|
@@ -976,19 +1008,19 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
|
|
if (cryp->in_sg_len < 0) {
|
|
dev_err(cryp->dev, "Cannot get in_sg_len\n");
|
|
ret = cryp->in_sg_len;
|
|
- goto out;
|
|
+ return ret;
|
|
}
|
|
|
|
cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out);
|
|
if (cryp->out_sg_len < 0) {
|
|
dev_err(cryp->dev, "Cannot get out_sg_len\n");
|
|
ret = cryp->out_sg_len;
|
|
- goto out;
|
|
+ return ret;
|
|
}
|
|
|
|
ret = stm32_cryp_copy_sgs(cryp);
|
|
if (ret)
|
|
- goto out;
|
|
+ return ret;
|
|
|
|
scatterwalk_start(&cryp->in_walk, cryp->in_sg);
|
|
scatterwalk_start(&cryp->out_walk, cryp->out_sg);
|
|
@@ -1000,10 +1032,6 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
|
|
}
|
|
|
|
ret = stm32_cryp_hw_init(cryp);
|
|
-out:
|
|
- if (ret)
|
|
- mutex_unlock(&cryp->lock);
|
|
-
|
|
return ret;
|
|
}
|
|
|
|
@@ -1943,8 +1971,6 @@ static int stm32_cryp_probe(struct platform_device *pdev)
|
|
|
|
cryp->dev = dev;
|
|
|
|
- mutex_init(&cryp->lock);
|
|
-
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
cryp->regs = devm_ioremap_resource(dev, res);
|
|
if (IS_ERR(cryp->regs))
|
|
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
|
|
index 641b110..990bbbf 100644
|
|
--- a/drivers/crypto/stm32/stm32-hash.c
|
|
+++ b/drivers/crypto/stm32/stm32-hash.c
|
|
@@ -463,8 +463,8 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
|
|
|
|
dma_async_issue_pending(hdev->dma_lch);
|
|
|
|
- if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion,
|
|
- msecs_to_jiffies(100)))
|
|
+ if (!wait_for_completion_timeout(&hdev->dma_completion,
|
|
+ msecs_to_jiffies(100)))
|
|
err = -ETIMEDOUT;
|
|
|
|
if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
|
|
@@ -977,7 +977,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
|
|
|
|
pm_runtime_get_sync(hdev->dev);
|
|
|
|
- while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
|
|
+ while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
|
|
cpu_relax();
|
|
|
|
rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
|
|
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
|
|
deleted file mode 100644
|
|
index 29d2095..0000000
|
|
--- a/drivers/crypto/stm32/stm32_crc32.c
|
|
+++ /dev/null
|
|
@@ -1,387 +0,0 @@
|
|
-/*
|
|
- * Copyright (C) STMicroelectronics SA 2017
|
|
- * Author: Fabien Dessenne <fabien.dessenne@st.com>
|
|
- * License terms: GNU General Public License (GPL), version 2
|
|
- */
|
|
-
|
|
-#include <linux/bitrev.h>
|
|
-#include <linux/clk.h>
|
|
-#include <linux/crc32poly.h>
|
|
-#include <linux/module.h>
|
|
-#include <linux/mod_devicetable.h>
|
|
-#include <linux/platform_device.h>
|
|
-#include <linux/pm_runtime.h>
|
|
-
|
|
-#include <crypto/internal/hash.h>
|
|
-
|
|
-#include <asm/unaligned.h>
|
|
-
|
|
-#define DRIVER_NAME "stm32-crc32"
|
|
-#define CHKSUM_DIGEST_SIZE 4
|
|
-#define CHKSUM_BLOCK_SIZE 1
|
|
-
|
|
-/* Registers */
|
|
-#define CRC_DR 0x00000000
|
|
-#define CRC_CR 0x00000008
|
|
-#define CRC_INIT 0x00000010
|
|
-#define CRC_POL 0x00000014
|
|
-
|
|
-/* Registers values */
|
|
-#define CRC_CR_RESET BIT(0)
|
|
-#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
|
|
-#define CRC_INIT_DEFAULT 0xFFFFFFFF
|
|
-
|
|
-#define CRC_AUTOSUSPEND_DELAY 50
|
|
-
|
|
-struct stm32_crc {
|
|
- struct list_head list;
|
|
- struct device *dev;
|
|
- void __iomem *regs;
|
|
- struct clk *clk;
|
|
- u8 pending_data[sizeof(u32)];
|
|
- size_t nb_pending_bytes;
|
|
-};
|
|
-
|
|
-struct stm32_crc_list {
|
|
- struct list_head dev_list;
|
|
- spinlock_t lock; /* protect dev_list */
|
|
-};
|
|
-
|
|
-static struct stm32_crc_list crc_list = {
|
|
- .dev_list = LIST_HEAD_INIT(crc_list.dev_list),
|
|
- .lock = __SPIN_LOCK_UNLOCKED(crc_list.lock),
|
|
-};
|
|
-
|
|
-struct stm32_crc_ctx {
|
|
- u32 key;
|
|
- u32 poly;
|
|
-};
|
|
-
|
|
-struct stm32_crc_desc_ctx {
|
|
- u32 partial; /* crc32c: partial in first 4 bytes of that struct */
|
|
- struct stm32_crc *crc;
|
|
-};
|
|
-
|
|
-static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
|
|
-{
|
|
- struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
|
|
-
|
|
- mctx->key = CRC_INIT_DEFAULT;
|
|
- mctx->poly = CRC32_POLY_LE;
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
|
|
-{
|
|
- struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
|
|
-
|
|
- mctx->key = CRC_INIT_DEFAULT;
|
|
- mctx->poly = CRC32C_POLY_LE;
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
|
|
- unsigned int keylen)
|
|
-{
|
|
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
|
|
-
|
|
- if (keylen != sizeof(u32)) {
|
|
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- mctx->key = get_unaligned_le32(key);
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int stm32_crc_init(struct shash_desc *desc)
|
|
-{
|
|
- struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
|
|
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
|
|
- struct stm32_crc *crc;
|
|
-
|
|
- spin_lock_bh(&crc_list.lock);
|
|
- list_for_each_entry(crc, &crc_list.dev_list, list) {
|
|
- ctx->crc = crc;
|
|
- break;
|
|
- }
|
|
- spin_unlock_bh(&crc_list.lock);
|
|
-
|
|
- pm_runtime_get_sync(ctx->crc->dev);
|
|
-
|
|
- /* Reset, set key, poly and configure in bit reverse mode */
|
|
- writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
|
|
- writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
|
|
- writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
|
|
-
|
|
- /* Store partial result */
|
|
- ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
|
|
- ctx->crc->nb_pending_bytes = 0;
|
|
-
|
|
- pm_runtime_mark_last_busy(ctx->crc->dev);
|
|
- pm_runtime_put_autosuspend(ctx->crc->dev);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
|
|
- unsigned int length)
|
|
-{
|
|
- struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
|
|
- struct stm32_crc *crc = ctx->crc;
|
|
- u32 *d32;
|
|
- unsigned int i;
|
|
-
|
|
- pm_runtime_get_sync(crc->dev);
|
|
-
|
|
- if (unlikely(crc->nb_pending_bytes)) {
|
|
- while (crc->nb_pending_bytes != sizeof(u32) && length) {
|
|
- /* Fill in pending data */
|
|
- crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
|
|
- length--;
|
|
- }
|
|
-
|
|
- if (crc->nb_pending_bytes == sizeof(u32)) {
|
|
- /* Process completed pending data */
|
|
- writel_relaxed(*(u32 *)crc->pending_data,
|
|
- crc->regs + CRC_DR);
|
|
- crc->nb_pending_bytes = 0;
|
|
- }
|
|
- }
|
|
-
|
|
- d32 = (u32 *)d8;
|
|
- for (i = 0; i < length >> 2; i++)
|
|
- /* Process 32 bits data */
|
|
- writel_relaxed(*(d32++), crc->regs + CRC_DR);
|
|
-
|
|
- /* Store partial result */
|
|
- ctx->partial = readl_relaxed(crc->regs + CRC_DR);
|
|
-
|
|
- pm_runtime_mark_last_busy(crc->dev);
|
|
- pm_runtime_put_autosuspend(crc->dev);
|
|
-
|
|
- /* Check for pending data (non 32 bits) */
|
|
- length &= 3;
|
|
- if (likely(!length))
|
|
- return 0;
|
|
-
|
|
- if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
|
|
- /* Shall not happen */
|
|
- dev_err(crc->dev, "Pending data overflow\n");
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- d8 = (const u8 *)d32;
|
|
- for (i = 0; i < length; i++)
|
|
- /* Store pending data */
|
|
- crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int stm32_crc_final(struct shash_desc *desc, u8 *out)
|
|
-{
|
|
- struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
|
|
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
|
|
-
|
|
- /* Send computed CRC */
|
|
- put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ?
|
|
- ~ctx->partial : ctx->partial, out);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int stm32_crc_finup(struct shash_desc *desc, const u8 *data,
|
|
- unsigned int length, u8 *out)
|
|
-{
|
|
- return stm32_crc_update(desc, data, length) ?:
|
|
- stm32_crc_final(desc, out);
|
|
-}
|
|
-
|
|
-static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
|
|
- unsigned int length, u8 *out)
|
|
-{
|
|
- return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
|
|
-}
|
|
-
|
|
-static struct shash_alg algs[] = {
|
|
- /* CRC-32 */
|
|
- {
|
|
- .setkey = stm32_crc_setkey,
|
|
- .init = stm32_crc_init,
|
|
- .update = stm32_crc_update,
|
|
- .final = stm32_crc_final,
|
|
- .finup = stm32_crc_finup,
|
|
- .digest = stm32_crc_digest,
|
|
- .descsize = sizeof(struct stm32_crc_desc_ctx),
|
|
- .digestsize = CHKSUM_DIGEST_SIZE,
|
|
- .base = {
|
|
- .cra_name = "crc32",
|
|
- .cra_driver_name = DRIVER_NAME,
|
|
- .cra_priority = 200,
|
|
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
|
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
|
|
- .cra_alignmask = 3,
|
|
- .cra_ctxsize = sizeof(struct stm32_crc_ctx),
|
|
- .cra_module = THIS_MODULE,
|
|
- .cra_init = stm32_crc32_cra_init,
|
|
- }
|
|
- },
|
|
- /* CRC-32Castagnoli */
|
|
- {
|
|
- .setkey = stm32_crc_setkey,
|
|
- .init = stm32_crc_init,
|
|
- .update = stm32_crc_update,
|
|
- .final = stm32_crc_final,
|
|
- .finup = stm32_crc_finup,
|
|
- .digest = stm32_crc_digest,
|
|
- .descsize = sizeof(struct stm32_crc_desc_ctx),
|
|
- .digestsize = CHKSUM_DIGEST_SIZE,
|
|
- .base = {
|
|
- .cra_name = "crc32c",
|
|
- .cra_driver_name = DRIVER_NAME,
|
|
- .cra_priority = 200,
|
|
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
|
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
|
|
- .cra_alignmask = 3,
|
|
- .cra_ctxsize = sizeof(struct stm32_crc_ctx),
|
|
- .cra_module = THIS_MODULE,
|
|
- .cra_init = stm32_crc32c_cra_init,
|
|
- }
|
|
- }
|
|
-};
|
|
-
|
|
-static int stm32_crc_probe(struct platform_device *pdev)
|
|
-{
|
|
- struct device *dev = &pdev->dev;
|
|
- struct stm32_crc *crc;
|
|
- struct resource *res;
|
|
- int ret;
|
|
-
|
|
- crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
|
|
- if (!crc)
|
|
- return -ENOMEM;
|
|
-
|
|
- crc->dev = dev;
|
|
-
|
|
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
- crc->regs = devm_ioremap_resource(dev, res);
|
|
- if (IS_ERR(crc->regs)) {
|
|
- dev_err(dev, "Cannot map CRC IO\n");
|
|
- return PTR_ERR(crc->regs);
|
|
- }
|
|
-
|
|
- crc->clk = devm_clk_get(dev, NULL);
|
|
- if (IS_ERR(crc->clk)) {
|
|
- dev_err(dev, "Could not get clock\n");
|
|
- return PTR_ERR(crc->clk);
|
|
- }
|
|
-
|
|
- ret = clk_prepare_enable(crc->clk);
|
|
- if (ret) {
|
|
- dev_err(crc->dev, "Failed to enable clock\n");
|
|
- return ret;
|
|
- }
|
|
-
|
|
- pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY);
|
|
- pm_runtime_use_autosuspend(dev);
|
|
-
|
|
- pm_runtime_get_noresume(dev);
|
|
- pm_runtime_set_active(dev);
|
|
- pm_runtime_enable(dev);
|
|
-
|
|
- platform_set_drvdata(pdev, crc);
|
|
-
|
|
- spin_lock(&crc_list.lock);
|
|
- list_add(&crc->list, &crc_list.dev_list);
|
|
- spin_unlock(&crc_list.lock);
|
|
-
|
|
- ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
|
|
- if (ret) {
|
|
- dev_err(dev, "Failed to register\n");
|
|
- clk_disable_unprepare(crc->clk);
|
|
- return ret;
|
|
- }
|
|
-
|
|
- dev_info(dev, "Initialized\n");
|
|
-
|
|
- pm_runtime_put_sync(dev);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int stm32_crc_remove(struct platform_device *pdev)
|
|
-{
|
|
- struct stm32_crc *crc = platform_get_drvdata(pdev);
|
|
- int ret = pm_runtime_get_sync(crc->dev);
|
|
-
|
|
- if (ret < 0)
|
|
- return ret;
|
|
-
|
|
- spin_lock(&crc_list.lock);
|
|
- list_del(&crc->list);
|
|
- spin_unlock(&crc_list.lock);
|
|
-
|
|
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
|
|
-
|
|
- pm_runtime_disable(crc->dev);
|
|
- pm_runtime_put_noidle(crc->dev);
|
|
-
|
|
- clk_disable_unprepare(crc->clk);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-#ifdef CONFIG_PM
|
|
-static int stm32_crc_runtime_suspend(struct device *dev)
|
|
-{
|
|
- struct stm32_crc *crc = dev_get_drvdata(dev);
|
|
-
|
|
- clk_disable_unprepare(crc->clk);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int stm32_crc_runtime_resume(struct device *dev)
|
|
-{
|
|
- struct stm32_crc *crc = dev_get_drvdata(dev);
|
|
- int ret;
|
|
-
|
|
- ret = clk_prepare_enable(crc->clk);
|
|
- if (ret) {
|
|
- dev_err(crc->dev, "Failed to prepare_enable clock\n");
|
|
- return ret;
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-#endif
|
|
-
|
|
-static const struct dev_pm_ops stm32_crc_pm_ops = {
|
|
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
|
- pm_runtime_force_resume)
|
|
- SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
|
|
- stm32_crc_runtime_resume, NULL)
|
|
-};
|
|
-
|
|
-static const struct of_device_id stm32_dt_ids[] = {
|
|
- { .compatible = "st,stm32f7-crc", },
|
|
- {},
|
|
-};
|
|
-MODULE_DEVICE_TABLE(of, stm32_dt_ids);
|
|
-
|
|
-static struct platform_driver stm32_crc_driver = {
|
|
- .probe = stm32_crc_probe,
|
|
- .remove = stm32_crc_remove,
|
|
- .driver = {
|
|
- .name = DRIVER_NAME,
|
|
- .pm = &stm32_crc_pm_ops,
|
|
- .of_match_table = stm32_dt_ids,
|
|
- },
|
|
-};
|
|
-
|
|
-module_platform_driver(stm32_crc_driver);
|
|
-
|
|
-MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
|
|
-MODULE_DESCRIPTION("STMicrolectronics STM32 CRC32 hardware driver");
|
|
-MODULE_LICENSE("GPL");
|
|
--
|
|
2.7.4
|
|
|