779 lines
23 KiB
Diff
779 lines
23 KiB
Diff
From 56442e69cbaf31555a9bb4f3311a0593a0c172cc Mon Sep 17 00:00:00 2001
|
|
From: Romuald JEANNE <romuald.jeanne@st.com>
|
|
Date: Tue, 16 Mar 2021 08:56:48 +0100
|
|
Subject: [PATCH 04/22] ARM 5.10.10-stm32mp1-r1 CRYPTO
|
|
|
|
---
|
|
drivers/crypto/stm32/stm32-cryp.c | 300 +++++++++++++++++++++---------
|
|
drivers/crypto/stm32/stm32-hash.c | 19 +-
|
|
2 files changed, 228 insertions(+), 91 deletions(-)
|
|
|
|
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
|
|
index 2670c30332fa..503428bf15cb 100644
|
|
--- a/drivers/crypto/stm32/stm32-cryp.c
|
|
+++ b/drivers/crypto/stm32/stm32-cryp.c
|
|
@@ -144,10 +144,10 @@ struct stm32_cryp {
|
|
size_t authsize;
|
|
size_t hw_blocksize;
|
|
|
|
+ size_t remain_in;
|
|
size_t total_in;
|
|
- size_t total_in_save;
|
|
+ size_t remain_out;
|
|
size_t total_out;
|
|
- size_t total_out_save;
|
|
|
|
struct scatterlist *in_sg;
|
|
struct scatterlist *out_sg;
|
|
@@ -157,9 +157,6 @@ struct stm32_cryp {
|
|
struct scatterlist out_sgl;
|
|
bool sgs_copied;
|
|
|
|
- int in_sg_len;
|
|
- int out_sg_len;
|
|
-
|
|
struct scatter_walk in_walk;
|
|
struct scatter_walk out_walk;
|
|
|
|
@@ -322,28 +319,46 @@ static int stm32_cryp_check_io_aligned(struct stm32_cryp *cryp)
|
|
|
|
ret = stm32_cryp_check_aligned(cryp->out_sg, cryp->total_out,
|
|
cryp->hw_blocksize);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ if (is_gcm(cryp) || is_ccm(cryp))
|
|
+ if (!IS_ALIGNED(cryp->areq->assoclen, sizeof(u32)))
|
|
+ ret = -EINVAL;
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void sg_copy_buf(void *buf, struct scatterlist *sg,
|
|
- unsigned int start, unsigned int nbytes, int out)
|
|
+ unsigned int start, unsigned int first_len,
|
|
+ unsigned int zero_len,
|
|
+ unsigned int second_len,
|
|
+ int out)
|
|
{
|
|
struct scatter_walk walk;
|
|
+ unsigned int nbytes = first_len + zero_len + second_len;
|
|
+ u32 empty = 0;
|
|
|
|
if (!nbytes)
|
|
return;
|
|
|
|
scatterwalk_start(&walk, sg);
|
|
scatterwalk_advance(&walk, start);
|
|
- scatterwalk_copychunks(buf, &walk, nbytes, out);
|
|
+ if (first_len)
|
|
+ scatterwalk_copychunks(buf, &walk, first_len, out);
|
|
+ if (zero_len)
|
|
+ memcpy(buf+first_len, &empty, zero_len);
|
|
+ if (second_len)
|
|
+ scatterwalk_copychunks(buf + first_len + zero_len, &walk,
|
|
+ second_len, out);
|
|
+
|
|
scatterwalk_done(&walk, out, 0);
|
|
}
|
|
|
|
static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp)
|
|
{
|
|
void *buf_in, *buf_out;
|
|
- int pages, total_in, total_out;
|
|
+ int pages_in, pages_out, total_in, total_out;
|
|
|
|
if (!stm32_cryp_check_io_aligned(cryp)) {
|
|
cryp->sgs_copied = 0;
|
|
@@ -351,29 +366,37 @@ static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp)
|
|
}
|
|
|
|
total_in = ALIGN(cryp->total_in, cryp->hw_blocksize);
|
|
- pages = total_in ? get_order(total_in) : 1;
|
|
- buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
|
|
+ pages_in = total_in ? get_order(total_in) : 1;
|
|
+ buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages_in);
|
|
|
|
total_out = ALIGN(cryp->total_out, cryp->hw_blocksize);
|
|
- pages = total_out ? get_order(total_out) : 1;
|
|
- buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
|
|
+ pages_out = total_out ? get_order(total_out) : 1;
|
|
+ buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages_out);
|
|
|
|
if (!buf_in || !buf_out) {
|
|
dev_err(cryp->dev, "Can't allocate pages when unaligned\n");
|
|
+ if (buf_in)
|
|
+ free_pages((unsigned long)buf_in, pages_in);
|
|
cryp->sgs_copied = 0;
|
|
return -EFAULT;
|
|
}
|
|
|
|
- sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->total_in, 0);
|
|
+
|
|
+ if ((is_gcm(cryp) || is_ccm(cryp)) && (!IS_ALIGNED(cryp->areq->assoclen,
|
|
+ sizeof(u32)))) {
|
|
+ sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->areq->assoclen,
|
|
+ ALIGN(cryp->areq->assoclen, sizeof(u32))
|
|
+ - cryp->areq->assoclen,
|
|
+ cryp->areq->cryptlen, 0);
|
|
+ } else
|
|
+ sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->total_in, 0, 0, 0);
|
|
|
|
sg_init_one(&cryp->in_sgl, buf_in, total_in);
|
|
cryp->in_sg = &cryp->in_sgl;
|
|
- cryp->in_sg_len = 1;
|
|
|
|
sg_init_one(&cryp->out_sgl, buf_out, total_out);
|
|
cryp->out_sg_save = cryp->out_sg;
|
|
cryp->out_sg = &cryp->out_sgl;
|
|
- cryp->out_sg_len = 1;
|
|
|
|
cryp->sgs_copied = 1;
|
|
|
|
@@ -654,14 +677,14 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
|
|
buf_in = sg_virt(&cryp->in_sgl);
|
|
buf_out = sg_virt(&cryp->out_sgl);
|
|
|
|
- sg_copy_buf(buf_out, cryp->out_sg_save, 0,
|
|
- cryp->total_out_save, 1);
|
|
+ sg_copy_buf(buf_out, cryp->out_sg_save, 0, 0, 0,
|
|
+ cryp->total_out, 1);
|
|
|
|
- len = ALIGN(cryp->total_in_save, cryp->hw_blocksize);
|
|
+ len = ALIGN(cryp->total_in, cryp->hw_blocksize);
|
|
pages = len ? get_order(len) : 1;
|
|
free_pages((unsigned long)buf_in, pages);
|
|
|
|
- len = ALIGN(cryp->total_out_save, cryp->hw_blocksize);
|
|
+ len = ALIGN(cryp->total_out, cryp->hw_blocksize);
|
|
pages = len ? get_order(len) : 1;
|
|
free_pages((unsigned long)buf_out, pages);
|
|
}
|
|
@@ -801,7 +824,20 @@ static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
|
|
static int stm32_cryp_aes_gcm_setauthsize(struct crypto_aead *tfm,
|
|
unsigned int authsize)
|
|
{
|
|
- return authsize == AES_BLOCK_SIZE ? 0 : -EINVAL;
|
|
+ switch (authsize) {
|
|
+ case 4:
|
|
+ case 8:
|
|
+ case 12:
|
|
+ case 13:
|
|
+ case 14:
|
|
+ case 15:
|
|
+ case 16:
|
|
+ break;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm,
|
|
@@ -825,31 +861,61 @@ static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm,
|
|
|
|
static int stm32_cryp_aes_ecb_encrypt(struct skcipher_request *req)
|
|
{
|
|
+ if (req->cryptlen % AES_BLOCK_SIZE)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (req->cryptlen == 0)
|
|
+ return 0;
|
|
+
|
|
return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT);
|
|
}
|
|
|
|
static int stm32_cryp_aes_ecb_decrypt(struct skcipher_request *req)
|
|
{
|
|
+ if (req->cryptlen % AES_BLOCK_SIZE)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (req->cryptlen == 0)
|
|
+ return 0;
|
|
+
|
|
return stm32_cryp_crypt(req, FLG_AES | FLG_ECB);
|
|
}
|
|
|
|
static int stm32_cryp_aes_cbc_encrypt(struct skcipher_request *req)
|
|
{
|
|
+ if (req->cryptlen % AES_BLOCK_SIZE)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (req->cryptlen == 0)
|
|
+ return 0;
|
|
+
|
|
return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT);
|
|
}
|
|
|
|
static int stm32_cryp_aes_cbc_decrypt(struct skcipher_request *req)
|
|
{
|
|
+ if (req->cryptlen % AES_BLOCK_SIZE)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (req->cryptlen == 0)
|
|
+ return 0;
|
|
+
|
|
return stm32_cryp_crypt(req, FLG_AES | FLG_CBC);
|
|
}
|
|
|
|
static int stm32_cryp_aes_ctr_encrypt(struct skcipher_request *req)
|
|
{
|
|
+ if (req->cryptlen == 0)
|
|
+ return 0;
|
|
+
|
|
return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT);
|
|
}
|
|
|
|
static int stm32_cryp_aes_ctr_decrypt(struct skcipher_request *req)
|
|
{
|
|
+ if (req->cryptlen == 0)
|
|
+ return 0;
|
|
+
|
|
return stm32_cryp_crypt(req, FLG_AES | FLG_CTR);
|
|
}
|
|
|
|
@@ -863,53 +929,122 @@ static int stm32_cryp_aes_gcm_decrypt(struct aead_request *req)
|
|
return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM);
|
|
}
|
|
|
|
+static inline int crypto_ccm_check_iv(const u8 *iv)
|
|
+{
|
|
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
|
|
+ if (iv[0] < 1 || iv[0] > 7)
|
|
+ return -EINVAL;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int stm32_cryp_aes_ccm_encrypt(struct aead_request *req)
|
|
{
|
|
+ int err;
|
|
+
|
|
+ err = crypto_ccm_check_iv(req->iv);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM | FLG_ENCRYPT);
|
|
}
|
|
|
|
static int stm32_cryp_aes_ccm_decrypt(struct aead_request *req)
|
|
{
|
|
+ int err;
|
|
+
|
|
+ err = crypto_ccm_check_iv(req->iv);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM);
|
|
}
|
|
|
|
static int stm32_cryp_des_ecb_encrypt(struct skcipher_request *req)
|
|
{
|
|
+ if (req->cryptlen % DES_BLOCK_SIZE)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (req->cryptlen == 0)
|
|
+ return 0;
|
|
+
|
|
return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT);
|
|
}
|
|
|
|
static int stm32_cryp_des_ecb_decrypt(struct skcipher_request *req)
|
|
{
|
|
+ if (req->cryptlen % DES_BLOCK_SIZE)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (req->cryptlen == 0)
|
|
+ return 0;
|
|
+
|
|
return stm32_cryp_crypt(req, FLG_DES | FLG_ECB);
|
|
}
|
|
|
|
static int stm32_cryp_des_cbc_encrypt(struct skcipher_request *req)
|
|
{
|
|
+ if (req->cryptlen % DES_BLOCK_SIZE)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (req->cryptlen == 0)
|
|
+ return 0;
|
|
+
|
|
return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT);
|
|
}
|
|
|
|
static int stm32_cryp_des_cbc_decrypt(struct skcipher_request *req)
|
|
{
|
|
+ if (req->cryptlen % DES_BLOCK_SIZE)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (req->cryptlen == 0)
|
|
+ return 0;
|
|
+
|
|
return stm32_cryp_crypt(req, FLG_DES | FLG_CBC);
|
|
}
|
|
|
|
static int stm32_cryp_tdes_ecb_encrypt(struct skcipher_request *req)
|
|
{
|
|
+ if (req->cryptlen % DES_BLOCK_SIZE)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (req->cryptlen == 0)
|
|
+ return 0;
|
|
+
|
|
return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT);
|
|
}
|
|
|
|
static int stm32_cryp_tdes_ecb_decrypt(struct skcipher_request *req)
|
|
{
|
|
+ if (req->cryptlen % DES_BLOCK_SIZE)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (req->cryptlen == 0)
|
|
+ return 0;
|
|
+
|
|
return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB);
|
|
}
|
|
|
|
static int stm32_cryp_tdes_cbc_encrypt(struct skcipher_request *req)
|
|
{
|
|
+ if (req->cryptlen % DES_BLOCK_SIZE)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (req->cryptlen == 0)
|
|
+ return 0;
|
|
+
|
|
return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT);
|
|
}
|
|
|
|
static int stm32_cryp_tdes_cbc_decrypt(struct skcipher_request *req)
|
|
{
|
|
+ if (req->cryptlen % DES_BLOCK_SIZE)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (req->cryptlen == 0)
|
|
+ return 0;
|
|
+
|
|
return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
|
|
}
|
|
|
|
@@ -971,36 +1106,25 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req,
|
|
cryp->areq = areq;
|
|
cryp->req = NULL;
|
|
cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
|
|
- cryp->total_in = areq->assoclen + areq->cryptlen;
|
|
+ cryp->total_in = ALIGN(areq->assoclen, sizeof(u32))
|
|
+ + areq->cryptlen;
|
|
if (is_encrypt(cryp))
|
|
/* Append auth tag to output */
|
|
- cryp->total_out = cryp->total_in + cryp->authsize;
|
|
+ cryp->total_out = areq->assoclen + areq->cryptlen
|
|
+ + cryp->authsize;
|
|
else
|
|
/* No auth tag in output */
|
|
- cryp->total_out = cryp->total_in - cryp->authsize;
|
|
+ cryp->total_out = areq->assoclen + areq->cryptlen
|
|
+ - cryp->authsize;
|
|
}
|
|
|
|
- cryp->total_in_save = cryp->total_in;
|
|
- cryp->total_out_save = cryp->total_out;
|
|
+ cryp->remain_in = cryp->total_in;
|
|
+ cryp->remain_out = cryp->total_out;
|
|
|
|
cryp->in_sg = req ? req->src : areq->src;
|
|
cryp->out_sg = req ? req->dst : areq->dst;
|
|
cryp->out_sg_save = cryp->out_sg;
|
|
|
|
- cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in);
|
|
- if (cryp->in_sg_len < 0) {
|
|
- dev_err(cryp->dev, "Cannot get in_sg_len\n");
|
|
- ret = cryp->in_sg_len;
|
|
- return ret;
|
|
- }
|
|
-
|
|
- cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out);
|
|
- if (cryp->out_sg_len < 0) {
|
|
- dev_err(cryp->dev, "Cannot get out_sg_len\n");
|
|
- ret = cryp->out_sg_len;
|
|
- return ret;
|
|
- }
|
|
-
|
|
ret = stm32_cryp_copy_sgs(cryp);
|
|
if (ret)
|
|
return ret;
|
|
@@ -1011,7 +1135,7 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req,
|
|
if (is_gcm(cryp) || is_ccm(cryp)) {
|
|
/* In output, jump after assoc data */
|
|
scatterwalk_advance(&cryp->out_walk, cryp->areq->assoclen);
|
|
- cryp->total_out -= cryp->areq->assoclen;
|
|
+ cryp->remain_out -= cryp->areq->assoclen;
|
|
}
|
|
|
|
ret = stm32_cryp_hw_init(cryp);
|
|
@@ -1130,7 +1254,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
|
|
stm32_cryp_write(cryp, CRYP_DIN, size_bit);
|
|
|
|
size_bit = is_encrypt(cryp) ? cryp->areq->cryptlen :
|
|
- cryp->areq->cryptlen - AES_BLOCK_SIZE;
|
|
+ cryp->areq->cryptlen - cryp->authsize;
|
|
size_bit *= 8;
|
|
if (cryp->caps->swap_final)
|
|
size_bit = (__force u32)cpu_to_be32(size_bit);
|
|
@@ -1169,14 +1293,14 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
|
|
dst = sg_virt(cryp->out_sg) + _walked_out;
|
|
|
|
for (i = 0; i < AES_BLOCK_32; i++) {
|
|
- if (cryp->total_out >= sizeof(u32)) {
|
|
+ if (cryp->remain_out >= sizeof(u32)) {
|
|
/* Read a full u32 */
|
|
*dst = stm32_cryp_read(cryp, CRYP_DOUT);
|
|
|
|
dst = stm32_cryp_next_out(cryp, dst,
|
|
sizeof(u32));
|
|
- cryp->total_out -= sizeof(u32);
|
|
- } else if (!cryp->total_out) {
|
|
+ cryp->remain_out -= sizeof(u32);
|
|
+ } else if (!cryp->remain_out) {
|
|
/* Empty fifo out (data from input padding) */
|
|
stm32_cryp_read(cryp, CRYP_DOUT);
|
|
} else {
|
|
@@ -1184,11 +1308,11 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
|
|
d32 = stm32_cryp_read(cryp, CRYP_DOUT);
|
|
d8 = (u8 *)&d32;
|
|
|
|
- for (j = 0; j < cryp->total_out; j++) {
|
|
+ for (j = 0; j < cryp->remain_out; j++) {
|
|
*((u8 *)dst) = *(d8++);
|
|
dst = stm32_cryp_next_out(cryp, dst, 1);
|
|
}
|
|
- cryp->total_out = 0;
|
|
+ cryp->remain_out = 0;
|
|
}
|
|
}
|
|
} else {
|
|
@@ -1196,7 +1320,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
|
|
u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32];
|
|
|
|
scatterwalk_map_and_copy(in_tag, cryp->in_sg,
|
|
- cryp->total_in_save - cryp->authsize,
|
|
+ cryp->total_in - cryp->authsize,
|
|
cryp->authsize, 0);
|
|
|
|
for (i = 0; i < AES_BLOCK_32; i++)
|
|
@@ -1256,13 +1380,13 @@ static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
|
|
dst = sg_virt(cryp->out_sg) + _walked_out;
|
|
|
|
for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
|
|
- if (likely(cryp->total_out - tag_size >= sizeof(u32))) {
|
|
+ if (likely(cryp->remain_out - tag_size >= sizeof(u32))) {
|
|
/* Read a full u32 */
|
|
*dst = stm32_cryp_read(cryp, CRYP_DOUT);
|
|
|
|
dst = stm32_cryp_next_out(cryp, dst, sizeof(u32));
|
|
- cryp->total_out -= sizeof(u32);
|
|
- } else if (cryp->total_out == tag_size) {
|
|
+ cryp->remain_out -= sizeof(u32);
|
|
+ } else if (cryp->remain_out == tag_size) {
|
|
/* Empty fifo out (data from input padding) */
|
|
d32 = stm32_cryp_read(cryp, CRYP_DOUT);
|
|
} else {
|
|
@@ -1270,15 +1394,15 @@ static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
|
|
d32 = stm32_cryp_read(cryp, CRYP_DOUT);
|
|
d8 = (u8 *)&d32;
|
|
|
|
- for (j = 0; j < cryp->total_out - tag_size; j++) {
|
|
+ for (j = 0; j < cryp->remain_out - tag_size; j++) {
|
|
*((u8 *)dst) = *(d8++);
|
|
dst = stm32_cryp_next_out(cryp, dst, 1);
|
|
}
|
|
- cryp->total_out = tag_size;
|
|
+ cryp->remain_out = tag_size;
|
|
}
|
|
}
|
|
|
|
- return !(cryp->total_out - tag_size) || !cryp->total_in;
|
|
+ return !(cryp->remain_out - tag_size) || !cryp->remain_in;
|
|
}
|
|
|
|
static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
|
|
@@ -1297,25 +1421,25 @@ static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
|
|
src = sg_virt(cryp->in_sg) + _walked_in;
|
|
|
|
for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
|
|
- if (likely(cryp->total_in - tag_size >= sizeof(u32))) {
|
|
+ if (likely(cryp->remain_in - tag_size >= sizeof(u32))) {
|
|
/* Write a full u32 */
|
|
stm32_cryp_write(cryp, CRYP_DIN, *src);
|
|
|
|
src = stm32_cryp_next_in(cryp, src, sizeof(u32));
|
|
- cryp->total_in -= sizeof(u32);
|
|
- } else if (cryp->total_in == tag_size) {
|
|
+ cryp->remain_in -= sizeof(u32);
|
|
+ } else if (cryp->remain_in == tag_size) {
|
|
/* Write padding data */
|
|
stm32_cryp_write(cryp, CRYP_DIN, 0);
|
|
} else {
|
|
/* Write less than an u32 */
|
|
memset(d8, 0, sizeof(u32));
|
|
- for (j = 0; j < cryp->total_in - tag_size; j++) {
|
|
+ for (j = 0; j < cryp->remain_in - tag_size; j++) {
|
|
d8[j] = *((u8 *)src);
|
|
src = stm32_cryp_next_in(cryp, src, 1);
|
|
}
|
|
|
|
stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
|
|
- cryp->total_in = tag_size;
|
|
+ cryp->remain_in = tag_size;
|
|
}
|
|
}
|
|
}
|
|
@@ -1324,7 +1448,7 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
|
|
{
|
|
int err;
|
|
u32 cfg, tmp[AES_BLOCK_32];
|
|
- size_t total_in_ori = cryp->total_in;
|
|
+ size_t remain_in_ori = cryp->remain_in;
|
|
struct scatterlist *out_sg_ori = cryp->out_sg;
|
|
unsigned int i;
|
|
|
|
@@ -1350,7 +1474,7 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
|
|
|
|
/* b) pad and write the last block */
|
|
stm32_cryp_irq_write_block(cryp);
|
|
- cryp->total_in = total_in_ori;
|
|
+ cryp->remain_in = remain_in_ori;
|
|
err = stm32_cryp_wait_output(cryp);
|
|
if (err) {
|
|
dev_err(cryp->dev, "Timeout (write gcm header)\n");
|
|
@@ -1360,8 +1484,8 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
|
|
/* c) get and store encrypted data */
|
|
stm32_cryp_irq_read_data(cryp);
|
|
scatterwalk_map_and_copy(tmp, out_sg_ori,
|
|
- cryp->total_in_save - total_in_ori,
|
|
- total_in_ori, 0);
|
|
+ cryp->total_in - remain_in_ori,
|
|
+ remain_in_ori, 0);
|
|
|
|
/* d) change mode back to AES GCM */
|
|
cfg &= ~CR_ALGO_MASK;
|
|
@@ -1375,12 +1499,12 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
|
|
|
|
/* f) write padded data */
|
|
for (i = 0; i < AES_BLOCK_32; i++) {
|
|
- if (cryp->total_in)
|
|
+ if (cryp->remain_in)
|
|
stm32_cryp_write(cryp, CRYP_DIN, tmp[i]);
|
|
else
|
|
stm32_cryp_write(cryp, CRYP_DIN, 0);
|
|
|
|
- cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in);
|
|
+ cryp->remain_in -= min_t(size_t, sizeof(u32), cryp->remain_in);
|
|
}
|
|
|
|
/* g) Empty fifo out */
|
|
@@ -1406,8 +1530,8 @@ static void stm32_cryp_irq_set_npblb(struct stm32_cryp *cryp)
|
|
cfg &= ~CR_CRYPEN;
|
|
stm32_cryp_write(cryp, CRYP_CR, cfg);
|
|
|
|
- payload_bytes = is_decrypt(cryp) ? cryp->total_in - cryp->authsize :
|
|
- cryp->total_in;
|
|
+ payload_bytes = is_decrypt(cryp) ? cryp->remain_in - cryp->authsize :
|
|
+ cryp->remain_in;
|
|
cfg |= (cryp->hw_blocksize - payload_bytes) << CR_NBPBL_SHIFT;
|
|
cfg |= CR_CRYPEN;
|
|
stm32_cryp_write(cryp, CRYP_CR, cfg);
|
|
@@ -1418,7 +1542,7 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
|
|
int err = 0;
|
|
u32 cfg, iv1tmp;
|
|
u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32], tmp[AES_BLOCK_32];
|
|
- size_t last_total_out, total_in_ori = cryp->total_in;
|
|
+ size_t last_remain_out, remain_in_ori = cryp->remain_in;
|
|
struct scatterlist *out_sg_ori = cryp->out_sg;
|
|
unsigned int i;
|
|
|
|
@@ -1453,7 +1577,7 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
|
|
|
|
/* b) pad and write the last block */
|
|
stm32_cryp_irq_write_block(cryp);
|
|
- cryp->total_in = total_in_ori;
|
|
+ cryp->remain_in = remain_in_ori;
|
|
err = stm32_cryp_wait_output(cryp);
|
|
if (err) {
|
|
dev_err(cryp->dev, "Timeout (wite ccm padded data)\n");
|
|
@@ -1461,13 +1585,13 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
|
|
}
|
|
|
|
/* c) get and store decrypted data */
|
|
- last_total_out = cryp->total_out;
|
|
+ last_remain_out = cryp->remain_out;
|
|
stm32_cryp_irq_read_data(cryp);
|
|
|
|
memset(tmp, 0, sizeof(tmp));
|
|
scatterwalk_map_and_copy(tmp, out_sg_ori,
|
|
- cryp->total_out_save - last_total_out,
|
|
- last_total_out, 0);
|
|
+ cryp->total_out - last_remain_out,
|
|
+ last_remain_out, 0);
|
|
|
|
/* d) Load again CRYP_CSGCMCCMxR */
|
|
for (i = 0; i < ARRAY_SIZE(cstmp2); i++)
|
|
@@ -1501,12 +1625,12 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
|
|
|
|
static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
|
|
{
|
|
- if (unlikely(!cryp->total_in)) {
|
|
+ if (unlikely(!cryp->remain_in)) {
|
|
dev_warn(cryp->dev, "No more data to process\n");
|
|
return;
|
|
}
|
|
|
|
- if (unlikely(cryp->total_in < AES_BLOCK_SIZE &&
|
|
+ if (unlikely(cryp->remain_in < AES_BLOCK_SIZE &&
|
|
(stm32_cryp_get_hw_mode(cryp) == CR_AES_GCM) &&
|
|
is_encrypt(cryp))) {
|
|
/* Padding for AES GCM encryption */
|
|
@@ -1518,7 +1642,7 @@ static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
|
|
stm32_cryp_irq_set_npblb(cryp);
|
|
}
|
|
|
|
- if (unlikely((cryp->total_in - cryp->authsize < AES_BLOCK_SIZE) &&
|
|
+ if (unlikely((cryp->remain_in - cryp->authsize < AES_BLOCK_SIZE) &&
|
|
(stm32_cryp_get_hw_mode(cryp) == CR_AES_CCM) &&
|
|
is_decrypt(cryp))) {
|
|
/* Padding for AES CCM decryption */
|
|
@@ -1548,10 +1672,10 @@ static void stm32_cryp_irq_write_gcm_header(struct stm32_cryp *cryp)
|
|
stm32_cryp_write(cryp, CRYP_DIN, *src);
|
|
|
|
src = stm32_cryp_next_in(cryp, src, sizeof(u32));
|
|
- cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in);
|
|
+ cryp->remain_in -= min_t(size_t, sizeof(u32), cryp->remain_in);
|
|
|
|
/* Check if whole header written */
|
|
- if ((cryp->total_in_save - cryp->total_in) ==
|
|
+ if ((cryp->total_in - cryp->remain_in) >=
|
|
cryp->areq->assoclen) {
|
|
/* Write padding if needed */
|
|
for (j = i + 1; j < AES_BLOCK_32; j++)
|
|
@@ -1583,7 +1707,7 @@ static void stm32_cryp_irq_write_gcm_header(struct stm32_cryp *cryp)
|
|
break;
|
|
}
|
|
|
|
- if (!cryp->total_in)
|
|
+ if (!cryp->remain_in)
|
|
break;
|
|
}
|
|
}
|
|
@@ -1611,7 +1735,7 @@ static void stm32_cryp_irq_write_ccm_header(struct stm32_cryp *cryp)
|
|
stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
|
|
i++;
|
|
|
|
- cryp->total_in -= min_t(size_t, 2, cryp->total_in);
|
|
+ cryp->remain_in -= min_t(size_t, 2, cryp->remain_in);
|
|
} else {
|
|
/* Build the two first u32 of B1 */
|
|
d8[0] = 0xFF;
|
|
@@ -1632,7 +1756,7 @@ static void stm32_cryp_irq_write_ccm_header(struct stm32_cryp *cryp)
|
|
stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
|
|
i++;
|
|
|
|
- cryp->total_in -= min_t(size_t, 2, cryp->total_in);
|
|
+ cryp->remain_in -= min_t(size_t, 2, cryp->remain_in);
|
|
}
|
|
}
|
|
|
|
@@ -1644,14 +1768,14 @@ static void stm32_cryp_irq_write_ccm_header(struct stm32_cryp *cryp)
|
|
d8[k] = *((u8 *)src);
|
|
src = stm32_cryp_next_in(cryp, src, 1);
|
|
|
|
- cryp->total_in -= min_t(size_t, 1, cryp->total_in);
|
|
- if ((cryp->total_in_save - cryp->total_in) == alen)
|
|
+ cryp->remain_in -= min_t(size_t, 1, cryp->remain_in);
|
|
+ if ((cryp->total_in - cryp->remain_in) == alen)
|
|
break;
|
|
}
|
|
|
|
stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
|
|
|
|
- if ((cryp->total_in_save - cryp->total_in) == alen) {
|
|
+ if ((cryp->total_in - cryp->remain_in) == alen) {
|
|
/* Write padding if needed */
|
|
for (j = i + 1; j < AES_BLOCK_32; j++)
|
|
stm32_cryp_write(cryp, CRYP_DIN, 0);
|
|
@@ -1955,7 +2079,9 @@ static int stm32_cryp_probe(struct platform_device *pdev)
|
|
|
|
cryp->clk = devm_clk_get(dev, NULL);
|
|
if (IS_ERR(cryp->clk)) {
|
|
- dev_err(dev, "Could not get clock\n");
|
|
+ if (PTR_ERR(cryp->clk) != -EPROBE_DEFER)
|
|
+ dev_err(dev, "Could not get clock\n");
|
|
+
|
|
return PTR_ERR(cryp->clk);
|
|
}
|
|
|
|
@@ -1973,7 +2099,11 @@ static int stm32_cryp_probe(struct platform_device *pdev)
|
|
pm_runtime_enable(dev);
|
|
|
|
rst = devm_reset_control_get(dev, NULL);
|
|
- if (!IS_ERR(rst)) {
|
|
+ if (IS_ERR(rst)) {
|
|
+ ret = PTR_ERR(rst);
|
|
+ if (ret == -EPROBE_DEFER)
|
|
+ goto err_rst;
|
|
+ } else {
|
|
reset_control_assert(rst);
|
|
udelay(2);
|
|
reset_control_deassert(rst);
|
|
@@ -2024,7 +2154,7 @@ static int stm32_cryp_probe(struct platform_device *pdev)
|
|
spin_lock(&cryp_list.lock);
|
|
list_del(&cryp->list);
|
|
spin_unlock(&cryp_list.lock);
|
|
-
|
|
+err_rst:
|
|
pm_runtime_disable(dev);
|
|
pm_runtime_put_noidle(dev);
|
|
pm_runtime_disable(dev);
|
|
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
|
|
index e3e25278a970..da9c3e913e55 100644
|
|
--- a/drivers/crypto/stm32/stm32-hash.c
|
|
+++ b/drivers/crypto/stm32/stm32-hash.c
|
|
@@ -925,15 +925,10 @@ static int stm32_hash_final(struct ahash_request *req)
|
|
static int stm32_hash_finup(struct ahash_request *req)
|
|
{
|
|
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
|
|
- struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
|
|
- struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
|
|
int err1, err2;
|
|
|
|
rctx->flags |= HASH_FLAGS_FINUP;
|
|
|
|
- if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
|
|
- rctx->flags &= ~HASH_FLAGS_CPU;
|
|
-
|
|
err1 = stm32_hash_update(req);
|
|
|
|
if (err1 == -EINPROGRESS || err1 == -EBUSY)
|
|
@@ -950,7 +945,19 @@ static int stm32_hash_finup(struct ahash_request *req)
|
|
|
|
static int stm32_hash_digest(struct ahash_request *req)
|
|
{
|
|
- return stm32_hash_init(req) ?: stm32_hash_finup(req);
|
|
+ int ret;
|
|
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
|
|
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
|
|
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
|
|
+
|
|
+ ret = stm32_hash_init(req);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
|
|
+ rctx->flags &= ~HASH_FLAGS_CPU;
|
|
+
|
|
+ return stm32_hash_finup(req);
|
|
}
|
|
|
|
static int stm32_hash_export(struct ahash_request *req, void *out)
|
|
--
|
|
2.17.1
|
|
|