lkml.org 
[lkml]   [2019]   [May]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 2/2] crypto: caam - fix pkcs1pad input without changing crypto request
Date
For pkcs1pad(rsa-caam, sha256), CAAM expects an input of modulus size. For
this we strip the leading zeros in case the size is more than modulus or
pad the input with zeros until the modulus size is reached.
This commit avoids modifying the crypto request while striping zeros from
input, to comply with the crypto API requirement. This is done by adding
a fixup input pointer and length.

Signed-off-by: Iuliana Prodan <iuliana.prodan@nxp.com>
---
drivers/crypto/caam/caampkc.c | 39 ++++++++++++++++++++++++++-------------
drivers/crypto/caam/caampkc.h | 7 ++++++-
2 files changed, 32 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 0c8cfc0..a6231f1 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -32,8 +32,10 @@ static u8 *zero_buffer;
static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
struct akcipher_request *req)
{
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
- dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);

if (edesc->sec4_sg_bytes)
dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
@@ -251,17 +253,21 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
if (lzeros < 0)
return ERR_PTR(lzeros);

- req->src_len -= lzeros;
- req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
+ req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
+ lzeros);
+ req_ctx->fixup_src_len = req->src_len - lzeros;
} else {
/*
* input src is less then n key modulus,
* so there will be zero padding
*/
diff_size = key->n_sz - req->src_len;
+ req_ctx->fixup_src = req->src;
+ req_ctx->fixup_src_len = req->src_len;
}

- src_nents = sg_nents_for_len(req->src, req->src_len);
+ src_nents = sg_nents_for_len(req_ctx->fixup_src,
+ req_ctx->fixup_src_len);
dst_nents = sg_nents_for_len(req->dst, req->dst_len);

if (!diff_size && src_nents == 1)
@@ -280,7 +286,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
if (!edesc)
return ERR_PTR(-ENOMEM);

- sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
if (unlikely(!sgc)) {
dev_err(dev, "unable to map source\n");
goto src_fail;
@@ -298,8 +304,8 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
0);

if (sec4_sg_index)
- sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg +
- !!diff_size, 0);
+ sg_to_sec4_sg_last(req_ctx->fixup_src, src_nents,
+ edesc->sec4_sg + !!diff_size, 0);

if (dst_nents > 1)
sg_to_sec4_sg_last(req->dst, dst_nents,
@@ -330,7 +336,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
sec4_sg_fail:
dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
dst_fail:
- dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
src_fail:
kfree(edesc);
return ERR_PTR(-ENOMEM);
@@ -340,6 +346,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
struct rsa_edesc *edesc)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct caam_rsa_key *key = &ctx->key;
struct device *dev = ctx->dev;
@@ -364,7 +371,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
pdb->f_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
- pdb->f_dma = sg_dma_address(req->src);
+ pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
}

if (edesc->dst_nents > 1) {
@@ -376,7 +383,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
}

pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
- pdb->f_len = req->src_len;
+ pdb->f_len = req_ctx->fixup_src_len;

return 0;
}
@@ -409,7 +416,9 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
pdb->g_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
- pdb->g_dma = sg_dma_address(req->src);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}

if (edesc->dst_nents > 1) {
@@ -472,7 +481,9 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
pdb->g_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
- pdb->g_dma = sg_dma_address(req->src);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}

if (edesc->dst_nents > 1) {
@@ -559,7 +570,9 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
pdb->g_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
- pdb->g_dma = sg_dma_address(req->src);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}

if (edesc->dst_nents > 1) {
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index 5ac7201..2c488c9 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -95,14 +95,19 @@ struct caam_rsa_ctx {
struct caam_rsa_key key;
struct device *dev;
dma_addr_t padding_dma;
+
};

/**
* caam_rsa_req_ctx - per request context.
- * @src: input scatterlist (stripped of leading zeros)
+ * @src : input scatterlist (stripped of leading zeros)
+ * @fixup_src : input scatterlist (that might be stripped of leading zeros)
+ * @fixup_src_len : length of the fixup_src input scatterlist
*/
struct caam_rsa_req_ctx {
struct scatterlist src[2];
+ struct scatterlist *fixup_src;
+ unsigned int fixup_src_len;
};

/**
--
2.1.0
\
 
 \ /
  Last update: 2019-05-14 18:45    [W:0.047 / U:1.736 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site