mirror of
https://github.com/adulau/aha.git
synced 2024-12-29 12:16:20 +00:00
1da177e4c3
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
341 lines
8.5 KiB
C
341 lines
8.5 KiB
C
/*
|
|
* Cryptographic API.
|
|
*
|
|
* Cipher operations.
|
|
*
|
|
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms of the GNU General Public License as published by the Free
|
|
* Software Foundation; either version 2 of the License, or (at your option)
|
|
* any later version.
|
|
*
|
|
*/
|
|
#include <linux/compiler.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/crypto.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/string.h>
|
|
#include <asm/scatterlist.h>
|
|
#include "internal.h"
|
|
#include "scatterwalk.h"
|
|
|
|
typedef void (cryptfn_t)(void *, u8 *, const u8 *);
|
|
typedef void (procfn_t)(struct crypto_tfm *, u8 *,
|
|
u8*, cryptfn_t, void *);
|
|
|
|
static inline void xor_64(u8 *a, const u8 *b)
|
|
{
|
|
((u32 *)a)[0] ^= ((u32 *)b)[0];
|
|
((u32 *)a)[1] ^= ((u32 *)b)[1];
|
|
}
|
|
|
|
static inline void xor_128(u8 *a, const u8 *b)
|
|
{
|
|
((u32 *)a)[0] ^= ((u32 *)b)[0];
|
|
((u32 *)a)[1] ^= ((u32 *)b)[1];
|
|
((u32 *)a)[2] ^= ((u32 *)b)[2];
|
|
((u32 *)a)[3] ^= ((u32 *)b)[3];
|
|
}
|
|
|
|
static inline void *prepare_src(struct scatter_walk *walk, int bsize,
|
|
void *tmp, int in_place)
|
|
{
|
|
void *src = walk->data;
|
|
int n = bsize;
|
|
|
|
if (unlikely(scatterwalk_across_pages(walk, bsize))) {
|
|
src = tmp;
|
|
n = scatterwalk_copychunks(src, walk, bsize, 0);
|
|
}
|
|
scatterwalk_advance(walk, n);
|
|
return src;
|
|
}
|
|
|
|
static inline void *prepare_dst(struct scatter_walk *walk, int bsize,
|
|
void *tmp, int in_place)
|
|
{
|
|
void *dst = walk->data;
|
|
|
|
if (unlikely(scatterwalk_across_pages(walk, bsize)) || in_place)
|
|
dst = tmp;
|
|
return dst;
|
|
}
|
|
|
|
static inline void complete_src(struct scatter_walk *walk, int bsize,
|
|
void *src, int in_place)
|
|
{
|
|
}
|
|
|
|
static inline void complete_dst(struct scatter_walk *walk, int bsize,
|
|
void *dst, int in_place)
|
|
{
|
|
int n = bsize;
|
|
|
|
if (unlikely(scatterwalk_across_pages(walk, bsize)))
|
|
n = scatterwalk_copychunks(dst, walk, bsize, 1);
|
|
else if (in_place)
|
|
memcpy(walk->data, dst, bsize);
|
|
scatterwalk_advance(walk, n);
|
|
}
|
|
|
|
/*
|
|
* Generic encrypt/decrypt wrapper for ciphers, handles operations across
|
|
* multiple page boundaries by using temporary blocks. In user context,
|
|
* the kernel is given a chance to schedule us once per block.
|
|
*/
|
|
static int crypt(struct crypto_tfm *tfm,
|
|
struct scatterlist *dst,
|
|
struct scatterlist *src,
|
|
unsigned int nbytes, cryptfn_t crfn,
|
|
procfn_t prfn, void *info)
|
|
{
|
|
struct scatter_walk walk_in, walk_out;
|
|
const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
|
|
u8 tmp_src[bsize];
|
|
u8 tmp_dst[bsize];
|
|
|
|
if (!nbytes)
|
|
return 0;
|
|
|
|
if (nbytes % bsize) {
|
|
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
|
|
return -EINVAL;
|
|
}
|
|
|
|
scatterwalk_start(&walk_in, src);
|
|
scatterwalk_start(&walk_out, dst);
|
|
|
|
for(;;) {
|
|
u8 *src_p, *dst_p;
|
|
int in_place;
|
|
|
|
scatterwalk_map(&walk_in, 0);
|
|
scatterwalk_map(&walk_out, 1);
|
|
|
|
in_place = scatterwalk_samebuf(&walk_in, &walk_out);
|
|
|
|
do {
|
|
src_p = prepare_src(&walk_in, bsize, tmp_src,
|
|
in_place);
|
|
dst_p = prepare_dst(&walk_out, bsize, tmp_dst,
|
|
in_place);
|
|
|
|
prfn(tfm, dst_p, src_p, crfn, info);
|
|
|
|
complete_src(&walk_in, bsize, src_p, in_place);
|
|
complete_dst(&walk_out, bsize, dst_p, in_place);
|
|
|
|
nbytes -= bsize;
|
|
} while (nbytes &&
|
|
!scatterwalk_across_pages(&walk_in, bsize) &&
|
|
!scatterwalk_across_pages(&walk_out, bsize));
|
|
|
|
scatterwalk_done(&walk_in, 0, nbytes);
|
|
scatterwalk_done(&walk_out, 1, nbytes);
|
|
|
|
if (!nbytes)
|
|
return 0;
|
|
|
|
crypto_yield(tfm);
|
|
}
|
|
}
|
|
|
|
static void cbc_process_encrypt(struct crypto_tfm *tfm, u8 *dst, u8 *src,
|
|
cryptfn_t fn, void *info)
|
|
{
|
|
u8 *iv = info;
|
|
|
|
tfm->crt_u.cipher.cit_xor_block(iv, src);
|
|
fn(crypto_tfm_ctx(tfm), dst, iv);
|
|
memcpy(iv, dst, crypto_tfm_alg_blocksize(tfm));
|
|
}
|
|
|
|
static void cbc_process_decrypt(struct crypto_tfm *tfm, u8 *dst, u8 *src,
|
|
cryptfn_t fn, void *info)
|
|
{
|
|
u8 *iv = info;
|
|
|
|
fn(crypto_tfm_ctx(tfm), dst, src);
|
|
tfm->crt_u.cipher.cit_xor_block(dst, iv);
|
|
memcpy(iv, src, crypto_tfm_alg_blocksize(tfm));
|
|
}
|
|
|
|
static void ecb_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
|
|
cryptfn_t fn, void *info)
|
|
{
|
|
fn(crypto_tfm_ctx(tfm), dst, src);
|
|
}
|
|
|
|
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
|
|
{
|
|
struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
|
|
|
|
if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) {
|
|
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
return -EINVAL;
|
|
} else
|
|
return cia->cia_setkey(crypto_tfm_ctx(tfm), key, keylen,
|
|
&tfm->crt_flags);
|
|
}
|
|
|
|
static int ecb_encrypt(struct crypto_tfm *tfm,
|
|
struct scatterlist *dst,
|
|
struct scatterlist *src, unsigned int nbytes)
|
|
{
|
|
return crypt(tfm, dst, src, nbytes,
|
|
tfm->__crt_alg->cra_cipher.cia_encrypt,
|
|
ecb_process, NULL);
|
|
}
|
|
|
|
static int ecb_decrypt(struct crypto_tfm *tfm,
|
|
struct scatterlist *dst,
|
|
struct scatterlist *src,
|
|
unsigned int nbytes)
|
|
{
|
|
return crypt(tfm, dst, src, nbytes,
|
|
tfm->__crt_alg->cra_cipher.cia_decrypt,
|
|
ecb_process, NULL);
|
|
}
|
|
|
|
static int cbc_encrypt(struct crypto_tfm *tfm,
|
|
struct scatterlist *dst,
|
|
struct scatterlist *src,
|
|
unsigned int nbytes)
|
|
{
|
|
return crypt(tfm, dst, src, nbytes,
|
|
tfm->__crt_alg->cra_cipher.cia_encrypt,
|
|
cbc_process_encrypt, tfm->crt_cipher.cit_iv);
|
|
}
|
|
|
|
static int cbc_encrypt_iv(struct crypto_tfm *tfm,
|
|
struct scatterlist *dst,
|
|
struct scatterlist *src,
|
|
unsigned int nbytes, u8 *iv)
|
|
{
|
|
return crypt(tfm, dst, src, nbytes,
|
|
tfm->__crt_alg->cra_cipher.cia_encrypt,
|
|
cbc_process_encrypt, iv);
|
|
}
|
|
|
|
static int cbc_decrypt(struct crypto_tfm *tfm,
|
|
struct scatterlist *dst,
|
|
struct scatterlist *src,
|
|
unsigned int nbytes)
|
|
{
|
|
return crypt(tfm, dst, src, nbytes,
|
|
tfm->__crt_alg->cra_cipher.cia_decrypt,
|
|
cbc_process_decrypt, tfm->crt_cipher.cit_iv);
|
|
}
|
|
|
|
static int cbc_decrypt_iv(struct crypto_tfm *tfm,
|
|
struct scatterlist *dst,
|
|
struct scatterlist *src,
|
|
unsigned int nbytes, u8 *iv)
|
|
{
|
|
return crypt(tfm, dst, src, nbytes,
|
|
tfm->__crt_alg->cra_cipher.cia_decrypt,
|
|
cbc_process_decrypt, iv);
|
|
}
|
|
|
|
static int nocrypt(struct crypto_tfm *tfm,
|
|
struct scatterlist *dst,
|
|
struct scatterlist *src,
|
|
unsigned int nbytes)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
|
|
static int nocrypt_iv(struct crypto_tfm *tfm,
|
|
struct scatterlist *dst,
|
|
struct scatterlist *src,
|
|
unsigned int nbytes, u8 *iv)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
|
|
int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
|
|
{
|
|
u32 mode = flags & CRYPTO_TFM_MODE_MASK;
|
|
|
|
tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB;
|
|
if (flags & CRYPTO_TFM_REQ_WEAK_KEY)
|
|
tfm->crt_flags = CRYPTO_TFM_REQ_WEAK_KEY;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int crypto_init_cipher_ops(struct crypto_tfm *tfm)
|
|
{
|
|
int ret = 0;
|
|
struct cipher_tfm *ops = &tfm->crt_cipher;
|
|
|
|
ops->cit_setkey = setkey;
|
|
|
|
switch (tfm->crt_cipher.cit_mode) {
|
|
case CRYPTO_TFM_MODE_ECB:
|
|
ops->cit_encrypt = ecb_encrypt;
|
|
ops->cit_decrypt = ecb_decrypt;
|
|
break;
|
|
|
|
case CRYPTO_TFM_MODE_CBC:
|
|
ops->cit_encrypt = cbc_encrypt;
|
|
ops->cit_decrypt = cbc_decrypt;
|
|
ops->cit_encrypt_iv = cbc_encrypt_iv;
|
|
ops->cit_decrypt_iv = cbc_decrypt_iv;
|
|
break;
|
|
|
|
case CRYPTO_TFM_MODE_CFB:
|
|
ops->cit_encrypt = nocrypt;
|
|
ops->cit_decrypt = nocrypt;
|
|
ops->cit_encrypt_iv = nocrypt_iv;
|
|
ops->cit_decrypt_iv = nocrypt_iv;
|
|
break;
|
|
|
|
case CRYPTO_TFM_MODE_CTR:
|
|
ops->cit_encrypt = nocrypt;
|
|
ops->cit_decrypt = nocrypt;
|
|
ops->cit_encrypt_iv = nocrypt_iv;
|
|
ops->cit_decrypt_iv = nocrypt_iv;
|
|
break;
|
|
|
|
default:
|
|
BUG();
|
|
}
|
|
|
|
if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
|
|
|
|
switch (crypto_tfm_alg_blocksize(tfm)) {
|
|
case 8:
|
|
ops->cit_xor_block = xor_64;
|
|
break;
|
|
|
|
case 16:
|
|
ops->cit_xor_block = xor_128;
|
|
break;
|
|
|
|
default:
|
|
printk(KERN_WARNING "%s: block size %u not supported\n",
|
|
crypto_tfm_alg_name(tfm),
|
|
crypto_tfm_alg_blocksize(tfm));
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm);
|
|
ops->cit_iv = kmalloc(ops->cit_ivsize, GFP_KERNEL);
|
|
if (ops->cit_iv == NULL)
|
|
ret = -ENOMEM;
|
|
}
|
|
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
|
|
{
|
|
if (tfm->crt_cipher.cit_iv)
|
|
kfree(tfm->crt_cipher.cit_iv);
|
|
}
|