author | Matthew Shyu <matthew.shyu@amlogic.com> | 2018-10-30 06:48:07 (GMT) |
---|---|---|
committer | Can Cao <can.cao@amlogic.com> | 2018-11-21 05:25:57 (GMT) |
commit | 36ec26d2629f8f24e000ade2d4f78d71c543dcd0 (patch) | |
tree | 19fb9716611ddf06c78b513daa3db54f323065a0 | |
parent | 307d1fbded12b652d1cd43d16ef26d1a435bd99c (diff) | |
download | common-36ec26d2629f8f24e000ade2d4f78d71c543dcd0.zip common-36ec26d2629f8f24e000ade2d4f78d71c543dcd0.tar.gz common-36ec26d2629f8f24e000ade2d4f78d71c543dcd0.tar.bz2 |
crypto: Enable ARMCE and set aes dma to slow mode [1/1]
PD#SWPL-574
Problem:
xfrm test failed randomly in VtsKernelNetTest
due to random failed in gcm(aes)
Solution:
Use ARMCE for small sized crypto processing and use
copy mode when HW dma is used
Verify:
verified on ampere
Change-Id: I31cd75cfcd85da9fc9e9640135c7ce76623ef715
Signed-off-by: Matthew Shyu <matthew.shyu@amlogic.com>
-rwxr-xr-x | arch/arm64/configs/gxl_sei210_defconfig | 5 | ||||
-rw-r--r-- | drivers/amlogic/crypto/aml-aes-dma.c | 62 |
2 files changed, 48 insertions, 19 deletions
diff --git a/arch/arm64/configs/gxl_sei210_defconfig b/arch/arm64/configs/gxl_sei210_defconfig index 35d32e0..be19100 100755 --- a/arch/arm64/configs/gxl_sei210_defconfig +++ b/arch/arm64/configs/gxl_sei210_defconfig @@ -601,6 +601,11 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=y CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_ARM_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM_CE=y +CONFIG_CRYPTO_SHA2_ARM_CE=y +CONFIG_CRYPTO_AES_ARM_CE=y +CONFIG_CRYPTO_GHASH_ARM_CE=y CONFIG_CRC_T10DIF=y CONFIG_CRC7=y CONFIG_SEI_RESET_KEY=y diff --git a/drivers/amlogic/crypto/aml-aes-dma.c b/drivers/amlogic/crypto/aml-aes-dma.c index c265790..b7ff805 100644 --- a/drivers/amlogic/crypto/aml-aes-dma.c +++ b/drivers/amlogic/crypto/aml-aes-dma.c @@ -58,6 +58,7 @@ #define AML_AES_QUEUE_LENGTH 50 #define AML_AES_DMA_THRESHOLD 16 +#define SUPPORT_FAST_DMA 0 struct aml_aes_dev; struct aml_aes_ctx { @@ -223,7 +224,7 @@ static size_t aml_aes_sg_copy(struct scatterlist **sg, size_t *offset, return off; } - +#if SUPPORT_FAST_DMA static size_t aml_aes_sg_dma(struct aml_aes_dev *dd, struct dma_dsc *dsc, uint32_t *nents, size_t total) { @@ -243,23 +244,36 @@ static size_t aml_aes_sg_dma(struct aml_aes_dev *dd, struct dma_dsc *dsc, process = min_t(unsigned int, total, in_sg->length); count += process; *nents += 1; + if (process != in_sg->length) + dd->out_offset = dd->in_offset = in_sg->length; total -= process; in_sg = sg_next(in_sg); out_sg = sg_next(out_sg); } - err = dma_map_sg(dd->dev, dd->in_sg, *nents, DMA_TO_DEVICE); - if (!err) { - dev_err(dd->dev, "dma_map_sg() error\n"); - return 0; - } + if (dd->in_sg != dd->out_sg) { + err = dma_map_sg(dd->dev, dd->in_sg, *nents, DMA_TO_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + return 0; + } - err = dma_map_sg(dd->dev, dd->out_sg, *nents, - DMA_FROM_DEVICE); - if (!err) { - dev_err(dd->dev, "dma_map_sg() error\n"); - dma_unmap_sg(dd->dev, dd->in_sg, *nents, - DMA_TO_DEVICE); - return 0; + err = dma_map_sg(dd->dev, dd->out_sg, *nents, + DMA_FROM_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + dma_unmap_sg(dd->dev, dd->in_sg, *nents, + DMA_TO_DEVICE); + return 0; + } + } else { + err = dma_map_sg(dd->dev, dd->in_sg, *nents, + DMA_BIDIRECTIONAL); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + return 0; + } + dma_sync_sg_for_device(dd->dev, dd->in_sg, + *nents, DMA_TO_DEVICE); } in_sg = dd->in_sg; @@ -280,7 +294,7 @@ static size_t aml_aes_sg_dma(struct aml_aes_dev *dd, struct dma_dsc *dsc, } return count; } - +#endif static struct aml_aes_dev *aml_aes_find_dev(struct aml_aes_ctx *ctx) { struct aml_aes_dev *aes_dd = NULL; @@ -385,7 +399,8 @@ static int aml_aes_crypt_dma_start(struct aml_aes_dev *dd) dd->fast_nents = 0; } - //fast = 0; +#if SUPPORT_FAST_DMA + // fast = 0; if (fast) { count = aml_aes_sg_dma(dd, dsc, &dd->fast_nents, dd->total); dd->flags |= AES_FLAGS_FAST; @@ -393,7 +408,9 @@ static int aml_aes_crypt_dma_start(struct aml_aes_dev *dd) dd->fast_total = count; dbgp(1, "use fast dma: n:%u, t:%zd\n", dd->fast_nents, dd->fast_total); - } else { + } else +#endif + { /* slow dma */ /* use cache buffers */ count = aml_aes_sg_copy(&dd->in_sg, &dd->in_offset, @@ -523,10 +540,17 @@ static int aml_aes_crypt_dma_stop(struct aml_aes_dev *dd) dma_sync_single_for_cpu(dd->dev, dd->dma_descript_tab, PAGE_SIZE, DMA_FROM_DEVICE); if (dd->flags & AES_FLAGS_FAST) { - dma_unmap_sg(dd->dev, dd->out_sg, + if (dd->in_sg != dd->out_sg) { + dma_unmap_sg(dd->dev, dd->out_sg, dd->fast_nents, DMA_FROM_DEVICE); - dma_unmap_sg(dd->dev, dd->in_sg, + dma_unmap_sg(dd->dev, dd->in_sg, dd->fast_nents, DMA_TO_DEVICE); + } else { + dma_sync_sg_for_cpu(dd->dev, dd->in_sg, + dd->fast_nents, DMA_FROM_DEVICE); + dma_unmap_sg(dd->dev, dd->in_sg, + dd->fast_nents, DMA_BIDIRECTIONAL); + } if (dd->flags & AES_FLAGS_CBC) scatterwalk_map_and_copy(dd->req->info, dd->out_sg, dd->fast_total - 16, @@ -980,7 +1004,7 @@ static void aml_aes_done_task(unsigned long data) aml_dma_debug(dd->descriptor, dd->fast_nents ? dd->fast_nents : 1, __func__, dd->thread, dd->status); - err = dd->err ? : err; + err = dd->err ? dd->err : err; if (dd->total && !err) { if (dd->flags & AES_FLAGS_FAST) { |