mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 19:26:25 +00:00
[CRYPTO] Add plumbing for multi-block operations
The VIA Padlock device is able to perform much better when multiple blocks are fed to it at once. As this device offers an exceptional throughput rate it is worthwhile to optimise the infrastructure specifically for it. We shift the existing page-sized fast path down to the CBC/ECB functions. We can then replace the CBC/ECB functions with functions provided by the underlying algorithm that performs the multi-block operations. As a side-effect this improves the performance of large cipher operations for all existing algorithm implementations. I've measured the gain to be around 5% for 3DES and 15% for AES. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
8279dd748f
commit
c774e93e21
3 changed files with 162 additions and 96 deletions
248
crypto/cipher.c
248
crypto/cipher.c
|
@ -4,6 +4,7 @@
|
|||
* Cipher operations.
|
||||
*
|
||||
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
|
||||
* Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
|
@ -22,9 +23,13 @@
|
|||
#include "internal.h"
|
||||
#include "scatterwalk.h"
|
||||
|
||||
typedef void (cryptfn_t)(void *, u8 *, const u8 *);
|
||||
typedef void (procfn_t)(struct crypto_tfm *, u8 *,
|
||||
u8*, cryptfn_t, void *);
|
||||
struct cipher_desc {
|
||||
struct crypto_tfm *tfm;
|
||||
void (*crfn)(void *ctx, u8 *dst, const u8 *src);
|
||||
unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
|
||||
const u8 *src, unsigned int nbytes);
|
||||
void *info;
|
||||
};
|
||||
|
||||
static inline void xor_64(u8 *a, const u8 *b)
|
||||
{
|
||||
|
@ -39,63 +44,57 @@ static inline void xor_128(u8 *a, const u8 *b)
|
|||
((u32 *)a)[2] ^= ((u32 *)b)[2];
|
||||
((u32 *)a)[3] ^= ((u32 *)b)[3];
|
||||
}
|
||||
|
||||
static inline void *prepare_src(struct scatter_walk *walk, int bsize,
|
||||
void *tmp, int in_place)
|
||||
{
|
||||
void *src = walk->data;
|
||||
int n = bsize;
|
||||
|
||||
if (unlikely(scatterwalk_across_pages(walk, bsize))) {
|
||||
src = tmp;
|
||||
n = scatterwalk_copychunks(src, walk, bsize, 0);
|
||||
}
|
||||
scatterwalk_advance(walk, n);
|
||||
return src;
|
||||
static unsigned int crypt_slow(const struct cipher_desc *desc,
|
||||
struct scatter_walk *in,
|
||||
struct scatter_walk *out, unsigned int bsize)
|
||||
{
|
||||
u8 src[bsize];
|
||||
u8 dst[bsize];
|
||||
unsigned int n;
|
||||
|
||||
n = scatterwalk_copychunks(src, in, bsize, 0);
|
||||
scatterwalk_advance(in, n);
|
||||
|
||||
desc->prfn(desc, dst, src, bsize);
|
||||
|
||||
n = scatterwalk_copychunks(dst, out, bsize, 1);
|
||||
scatterwalk_advance(out, n);
|
||||
|
||||
return bsize;
|
||||
}
|
||||
|
||||
static inline void *prepare_dst(struct scatter_walk *walk, int bsize,
|
||||
void *tmp, int in_place)
|
||||
static inline unsigned int crypt_fast(const struct cipher_desc *desc,
|
||||
struct scatter_walk *in,
|
||||
struct scatter_walk *out,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
void *dst = walk->data;
|
||||
u8 *src, *dst;
|
||||
|
||||
if (unlikely(scatterwalk_across_pages(walk, bsize)) || in_place)
|
||||
dst = tmp;
|
||||
return dst;
|
||||
}
|
||||
src = in->data;
|
||||
dst = scatterwalk_samebuf(in, out) ? src : out->data;
|
||||
|
||||
static inline void complete_src(struct scatter_walk *walk, int bsize,
|
||||
void *src, int in_place)
|
||||
{
|
||||
}
|
||||
nbytes = desc->prfn(desc, dst, src, nbytes);
|
||||
|
||||
static inline void complete_dst(struct scatter_walk *walk, int bsize,
|
||||
void *dst, int in_place)
|
||||
{
|
||||
int n = bsize;
|
||||
scatterwalk_advance(in, nbytes);
|
||||
scatterwalk_advance(out, nbytes);
|
||||
|
||||
if (unlikely(scatterwalk_across_pages(walk, bsize)))
|
||||
n = scatterwalk_copychunks(dst, walk, bsize, 1);
|
||||
else if (in_place)
|
||||
memcpy(walk->data, dst, bsize);
|
||||
scatterwalk_advance(walk, n);
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic encrypt/decrypt wrapper for ciphers, handles operations across
|
||||
* multiple page boundaries by using temporary blocks. In user context,
|
||||
* the kernel is given a chance to schedule us once per block.
|
||||
* the kernel is given a chance to schedule us once per page.
|
||||
*/
|
||||
static int crypt(struct crypto_tfm *tfm,
|
||||
static int crypt(const struct cipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src,
|
||||
unsigned int nbytes, cryptfn_t crfn,
|
||||
procfn_t prfn, void *info)
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct scatter_walk walk_in, walk_out;
|
||||
struct crypto_tfm *tfm = desc->tfm;
|
||||
const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
|
||||
u8 tmp_src[bsize];
|
||||
u8 tmp_dst[bsize];
|
||||
|
||||
if (!nbytes)
|
||||
return 0;
|
||||
|
@ -109,29 +108,20 @@ static int crypt(struct crypto_tfm *tfm,
|
|||
scatterwalk_start(&walk_out, dst);
|
||||
|
||||
for(;;) {
|
||||
u8 *src_p, *dst_p;
|
||||
int in_place;
|
||||
unsigned int n;
|
||||
|
||||
scatterwalk_map(&walk_in, 0);
|
||||
scatterwalk_map(&walk_out, 1);
|
||||
|
||||
in_place = scatterwalk_samebuf(&walk_in, &walk_out);
|
||||
n = scatterwalk_clamp(&walk_in, nbytes);
|
||||
n = scatterwalk_clamp(&walk_out, n);
|
||||
|
||||
do {
|
||||
src_p = prepare_src(&walk_in, bsize, tmp_src,
|
||||
in_place);
|
||||
dst_p = prepare_dst(&walk_out, bsize, tmp_dst,
|
||||
in_place);
|
||||
if (likely(n >= bsize))
|
||||
n = crypt_fast(desc, &walk_in, &walk_out, n);
|
||||
else
|
||||
n = crypt_slow(desc, &walk_in, &walk_out, bsize);
|
||||
|
||||
prfn(tfm, dst_p, src_p, crfn, info);
|
||||
|
||||
complete_src(&walk_in, bsize, src_p, in_place);
|
||||
complete_dst(&walk_out, bsize, dst_p, in_place);
|
||||
|
||||
nbytes -= bsize;
|
||||
} while (nbytes &&
|
||||
!scatterwalk_across_pages(&walk_in, bsize) &&
|
||||
!scatterwalk_across_pages(&walk_out, bsize));
|
||||
nbytes -= n;
|
||||
|
||||
scatterwalk_done(&walk_in, 0, nbytes);
|
||||
scatterwalk_done(&walk_out, 1, nbytes);
|
||||
|
@ -143,30 +133,78 @@ static int crypt(struct crypto_tfm *tfm,
|
|||
}
|
||||
}
|
||||
|
||||
static void cbc_process_encrypt(struct crypto_tfm *tfm, u8 *dst, u8 *src,
|
||||
cryptfn_t fn, void *info)
|
||||
static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,
|
||||
u8 *dst, const u8 *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
u8 *iv = info;
|
||||
struct crypto_tfm *tfm = desc->tfm;
|
||||
void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
|
||||
int bsize = crypto_tfm_alg_blocksize(tfm);
|
||||
|
||||
tfm->crt_u.cipher.cit_xor_block(iv, src);
|
||||
fn(crypto_tfm_ctx(tfm), dst, iv);
|
||||
memcpy(iv, dst, crypto_tfm_alg_blocksize(tfm));
|
||||
void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
|
||||
u8 *iv = desc->info;
|
||||
unsigned int done = 0;
|
||||
|
||||
do {
|
||||
xor(iv, src);
|
||||
fn(crypto_tfm_ctx(tfm), dst, iv);
|
||||
memcpy(iv, dst, bsize);
|
||||
|
||||
src += bsize;
|
||||
dst += bsize;
|
||||
} while ((done += bsize) < nbytes);
|
||||
|
||||
return done;
|
||||
}
|
||||
|
||||
static void cbc_process_decrypt(struct crypto_tfm *tfm, u8 *dst, u8 *src,
|
||||
cryptfn_t fn, void *info)
|
||||
static unsigned int cbc_process_decrypt(const struct cipher_desc *desc,
|
||||
u8 *dst, const u8 *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
u8 *iv = info;
|
||||
struct crypto_tfm *tfm = desc->tfm;
|
||||
void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
|
||||
int bsize = crypto_tfm_alg_blocksize(tfm);
|
||||
|
||||
fn(crypto_tfm_ctx(tfm), dst, src);
|
||||
tfm->crt_u.cipher.cit_xor_block(dst, iv);
|
||||
memcpy(iv, src, crypto_tfm_alg_blocksize(tfm));
|
||||
u8 stack[src == dst ? bsize : 0];
|
||||
u8 *buf = stack;
|
||||
u8 **dst_p = src == dst ? &buf : &dst;
|
||||
|
||||
void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
|
||||
u8 *iv = desc->info;
|
||||
unsigned int done = 0;
|
||||
|
||||
do {
|
||||
u8 *tmp_dst = *dst_p;
|
||||
|
||||
fn(crypto_tfm_ctx(tfm), tmp_dst, src);
|
||||
xor(tmp_dst, iv);
|
||||
memcpy(iv, src, bsize);
|
||||
if (tmp_dst != dst)
|
||||
memcpy(dst, tmp_dst, bsize);
|
||||
|
||||
src += bsize;
|
||||
dst += bsize;
|
||||
} while ((done += bsize) < nbytes);
|
||||
|
||||
return done;
|
||||
}
|
||||
|
||||
static void ecb_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
|
||||
cryptfn_t fn, void *info)
|
||||
static unsigned int ecb_process(const struct cipher_desc *desc, u8 *dst,
|
||||
const u8 *src, unsigned int nbytes)
|
||||
{
|
||||
fn(crypto_tfm_ctx(tfm), dst, src);
|
||||
struct crypto_tfm *tfm = desc->tfm;
|
||||
int bsize = crypto_tfm_alg_blocksize(tfm);
|
||||
void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
|
||||
unsigned int done = 0;
|
||||
|
||||
do {
|
||||
fn(crypto_tfm_ctx(tfm), dst, src);
|
||||
|
||||
src += bsize;
|
||||
dst += bsize;
|
||||
} while ((done += bsize) < nbytes);
|
||||
|
||||
return done;
|
||||
}
|
||||
|
||||
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
|
||||
|
@ -185,9 +223,13 @@ static int ecb_encrypt(struct crypto_tfm *tfm,
|
|||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
return crypt(tfm, dst, src, nbytes,
|
||||
tfm->__crt_alg->cra_cipher.cia_encrypt,
|
||||
ecb_process, NULL);
|
||||
struct cipher_desc desc;
|
||||
|
||||
desc.tfm = tfm;
|
||||
desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt;
|
||||
desc.prfn = ecb_process;
|
||||
|
||||
return crypt(&desc, dst, src, nbytes);
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct crypto_tfm *tfm,
|
||||
|
@ -195,9 +237,13 @@ static int ecb_decrypt(struct crypto_tfm *tfm,
|
|||
struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
return crypt(tfm, dst, src, nbytes,
|
||||
tfm->__crt_alg->cra_cipher.cia_decrypt,
|
||||
ecb_process, NULL);
|
||||
struct cipher_desc desc;
|
||||
|
||||
desc.tfm = tfm;
|
||||
desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt;
|
||||
desc.prfn = ecb_process;
|
||||
|
||||
return crypt(&desc, dst, src, nbytes);
|
||||
}
|
||||
|
||||
static int cbc_encrypt(struct crypto_tfm *tfm,
|
||||
|
@ -205,9 +251,14 @@ static int cbc_encrypt(struct crypto_tfm *tfm,
|
|||
struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
return crypt(tfm, dst, src, nbytes,
|
||||
tfm->__crt_alg->cra_cipher.cia_encrypt,
|
||||
cbc_process_encrypt, tfm->crt_cipher.cit_iv);
|
||||
struct cipher_desc desc;
|
||||
|
||||
desc.tfm = tfm;
|
||||
desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt;
|
||||
desc.prfn = cbc_process_encrypt;
|
||||
desc.info = tfm->crt_cipher.cit_iv;
|
||||
|
||||
return crypt(&desc, dst, src, nbytes);
|
||||
}
|
||||
|
||||
static int cbc_encrypt_iv(struct crypto_tfm *tfm,
|
||||
|
@ -215,9 +266,14 @@ static int cbc_encrypt_iv(struct crypto_tfm *tfm,
|
|||
struct scatterlist *src,
|
||||
unsigned int nbytes, u8 *iv)
|
||||
{
|
||||
return crypt(tfm, dst, src, nbytes,
|
||||
tfm->__crt_alg->cra_cipher.cia_encrypt,
|
||||
cbc_process_encrypt, iv);
|
||||
struct cipher_desc desc;
|
||||
|
||||
desc.tfm = tfm;
|
||||
desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt;
|
||||
desc.prfn = cbc_process_encrypt;
|
||||
desc.info = iv;
|
||||
|
||||
return crypt(&desc, dst, src, nbytes);
|
||||
}
|
||||
|
||||
static int cbc_decrypt(struct crypto_tfm *tfm,
|
||||
|
@ -225,9 +281,14 @@ static int cbc_decrypt(struct crypto_tfm *tfm,
|
|||
struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
return crypt(tfm, dst, src, nbytes,
|
||||
tfm->__crt_alg->cra_cipher.cia_decrypt,
|
||||
cbc_process_decrypt, tfm->crt_cipher.cit_iv);
|
||||
struct cipher_desc desc;
|
||||
|
||||
desc.tfm = tfm;
|
||||
desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt;
|
||||
desc.prfn = cbc_process_decrypt;
|
||||
desc.info = tfm->crt_cipher.cit_iv;
|
||||
|
||||
return crypt(&desc, dst, src, nbytes);
|
||||
}
|
||||
|
||||
static int cbc_decrypt_iv(struct crypto_tfm *tfm,
|
||||
|
@ -235,9 +296,14 @@ static int cbc_decrypt_iv(struct crypto_tfm *tfm,
|
|||
struct scatterlist *src,
|
||||
unsigned int nbytes, u8 *iv)
|
||||
{
|
||||
return crypt(tfm, dst, src, nbytes,
|
||||
tfm->__crt_alg->cra_cipher.cia_decrypt,
|
||||
cbc_process_decrypt, iv);
|
||||
struct cipher_desc desc;
|
||||
|
||||
desc.tfm = tfm;
|
||||
desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt;
|
||||
desc.prfn = cbc_process_decrypt;
|
||||
desc.info = iv;
|
||||
|
||||
return crypt(&desc, dst, src, nbytes);
|
||||
}
|
||||
|
||||
static int nocrypt(struct crypto_tfm *tfm,
|
||||
|
|
|
@ -100,7 +100,7 @@ void scatterwalk_done(struct scatter_walk *walk, int out, int more)
|
|||
int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
|
||||
size_t nbytes, int out)
|
||||
{
|
||||
do {
|
||||
while (nbytes > walk->len_this_page) {
|
||||
memcpy_dir(buf, walk->data, walk->len_this_page, out);
|
||||
buf += walk->len_this_page;
|
||||
nbytes -= walk->len_this_page;
|
||||
|
@ -108,7 +108,7 @@ int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
|
|||
scatterwalk_unmap(walk, out);
|
||||
scatterwalk_pagedone(walk, out, 1);
|
||||
scatterwalk_map(walk, out);
|
||||
} while (nbytes > walk->len_this_page);
|
||||
}
|
||||
|
||||
memcpy_dir(buf, walk->data, nbytes, out);
|
||||
return nbytes;
|
||||
|
|
|
@ -40,10 +40,10 @@ static inline int scatterwalk_samebuf(struct scatter_walk *walk_in,
|
|||
walk_in->offset == walk_out->offset;
|
||||
}
|
||||
|
||||
static inline int scatterwalk_across_pages(struct scatter_walk *walk,
|
||||
unsigned int nbytes)
|
||||
static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
return nbytes > walk->len_this_page;
|
||||
return nbytes > walk->len_this_page ? walk->len_this_page : nbytes;
|
||||
}
|
||||
|
||||
static inline void scatterwalk_advance(struct scatter_walk *walk,
|
||||
|
|
Loading…
Reference in a new issue