crypto: x86/aria - add keystream array into request ctx
avx accelerated aria module used local keystream array. But, keystream array size is too big. So, it puts the keystream array into request ctx. Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
91dfd98216
commit
8e7d7ce2e3
|
@ -33,6 +33,10 @@ asmlinkage void aria_aesni_avx_gfni_ctr_crypt_16way(const void *ctx, u8 *dst,
|
|||
|
||||
static struct aria_avx_ops aria_ops;
|
||||
|
||||
struct aria_avx_request_ctx {
|
||||
u8 keystream[ARIA_AESNI_PARALLEL_BLOCK_SIZE];
|
||||
};
|
||||
|
||||
static int ecb_do_encrypt(struct skcipher_request *req, const u32 *rkey)
|
||||
{
|
||||
ECB_WALK_START(req, ARIA_BLOCK_SIZE, ARIA_AESNI_PARALLEL_BLOCKS);
|
||||
|
@ -73,6 +77,7 @@ static int aria_avx_set_key(struct crypto_skcipher *tfm, const u8 *key,
|
|||
|
||||
static int aria_avx_ctr_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct aria_avx_request_ctx *req_ctx = skcipher_request_ctx(req);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct aria_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
|
@ -86,10 +91,9 @@ static int aria_avx_ctr_encrypt(struct skcipher_request *req)
|
|||
u8 *dst = walk.dst.virt.addr;
|
||||
|
||||
while (nbytes >= ARIA_AESNI_PARALLEL_BLOCK_SIZE) {
|
||||
u8 keystream[ARIA_AESNI_PARALLEL_BLOCK_SIZE];
|
||||
|
||||
kernel_fpu_begin();
|
||||
aria_ops.aria_ctr_crypt_16way(ctx, dst, src, keystream,
|
||||
aria_ops.aria_ctr_crypt_16way(ctx, dst, src,
|
||||
&req_ctx->keystream[0],
|
||||
walk.iv);
|
||||
kernel_fpu_end();
|
||||
dst += ARIA_AESNI_PARALLEL_BLOCK_SIZE;
|
||||
|
@ -98,28 +102,29 @@ static int aria_avx_ctr_encrypt(struct skcipher_request *req)
|
|||
}
|
||||
|
||||
while (nbytes >= ARIA_BLOCK_SIZE) {
|
||||
u8 keystream[ARIA_BLOCK_SIZE];
|
||||
|
||||
memcpy(keystream, walk.iv, ARIA_BLOCK_SIZE);
|
||||
memcpy(&req_ctx->keystream[0], walk.iv, ARIA_BLOCK_SIZE);
|
||||
crypto_inc(walk.iv, ARIA_BLOCK_SIZE);
|
||||
|
||||
aria_encrypt(ctx, keystream, keystream);
|
||||
aria_encrypt(ctx, &req_ctx->keystream[0],
|
||||
&req_ctx->keystream[0]);
|
||||
|
||||
crypto_xor_cpy(dst, src, keystream, ARIA_BLOCK_SIZE);
|
||||
crypto_xor_cpy(dst, src, &req_ctx->keystream[0],
|
||||
ARIA_BLOCK_SIZE);
|
||||
dst += ARIA_BLOCK_SIZE;
|
||||
src += ARIA_BLOCK_SIZE;
|
||||
nbytes -= ARIA_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
if (walk.nbytes == walk.total && nbytes > 0) {
|
||||
u8 keystream[ARIA_BLOCK_SIZE];
|
||||
|
||||
memcpy(keystream, walk.iv, ARIA_BLOCK_SIZE);
|
||||
memcpy(&req_ctx->keystream[0], walk.iv,
|
||||
ARIA_BLOCK_SIZE);
|
||||
crypto_inc(walk.iv, ARIA_BLOCK_SIZE);
|
||||
|
||||
aria_encrypt(ctx, keystream, keystream);
|
||||
aria_encrypt(ctx, &req_ctx->keystream[0],
|
||||
&req_ctx->keystream[0]);
|
||||
|
||||
crypto_xor_cpy(dst, src, keystream, nbytes);
|
||||
crypto_xor_cpy(dst, src, &req_ctx->keystream[0],
|
||||
nbytes);
|
||||
dst += nbytes;
|
||||
src += nbytes;
|
||||
nbytes = 0;
|
||||
|
@ -130,6 +135,13 @@ static int aria_avx_ctr_encrypt(struct skcipher_request *req)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int aria_avx_init_tfm(struct crypto_skcipher *tfm)
|
||||
{
|
||||
crypto_skcipher_set_reqsize(tfm, sizeof(struct aria_avx_request_ctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct skcipher_alg aria_algs[] = {
|
||||
{
|
||||
.base.cra_name = "__ecb(aria)",
|
||||
|
@ -160,6 +172,7 @@ static struct skcipher_alg aria_algs[] = {
|
|||
.setkey = aria_avx_set_key,
|
||||
.encrypt = aria_avx_ctr_encrypt,
|
||||
.decrypt = aria_avx_ctr_encrypt,
|
||||
.init = aria_avx_init_tfm,
|
||||
}
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue