mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
40725181b7
This patch adds hooks for cipher algorithms to implement multi-block ECB/CBC operations directly. This is expected to provide significant performance boots to the VIA Padlock. It could also be used for improving software implementations such as AES where operating on multiple blocks at a time may enable certain optimisations. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
87 lines
2.3 KiB
C
87 lines
2.3 KiB
C
/*
|
|
* Cryptographic API.
|
|
*
|
|
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms of the GNU General Public License as published by the Free
|
|
* Software Foundation; either version 2 of the License, or (at your option)
|
|
* any later version.
|
|
*
|
|
*/
|
|
#ifndef _CRYPTO_INTERNAL_H
|
|
#define _CRYPTO_INTERNAL_H
|
|
#include <linux/crypto.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/init.h>
|
|
#include <linux/kmod.h>
|
|
#include <asm/kmap_types.h>
|
|
|
|
extern enum km_type crypto_km_types[];
|
|
|
|
static inline enum km_type crypto_kmap_type(int out)
|
|
{
|
|
return crypto_km_types[(in_softirq() ? 2 : 0) + out];
|
|
}
|
|
|
|
static inline void *crypto_kmap(struct page *page, int out)
|
|
{
|
|
return kmap_atomic(page, crypto_kmap_type(out));
|
|
}
|
|
|
|
static inline void crypto_kunmap(void *vaddr, int out)
|
|
{
|
|
kunmap_atomic(vaddr, crypto_kmap_type(out));
|
|
}
|
|
|
|
static inline void crypto_yield(struct crypto_tfm *tfm)
|
|
{
|
|
if (!in_atomic())
|
|
cond_resched();
|
|
}
|
|
|
|
struct crypto_alg *crypto_alg_lookup(const char *name);
|
|
|
|
/* A far more intelligent version of this is planned. For now, just
|
|
* try an exact match on the name of the algorithm. */
|
|
static inline struct crypto_alg *crypto_alg_mod_lookup(const char *name)
|
|
{
|
|
return try_then_request_module(crypto_alg_lookup(name), name);
|
|
}
|
|
|
|
#ifdef CONFIG_CRYPTO_HMAC
|
|
int crypto_alloc_hmac_block(struct crypto_tfm *tfm);
|
|
void crypto_free_hmac_block(struct crypto_tfm *tfm);
|
|
#else
|
|
static inline int crypto_alloc_hmac_block(struct crypto_tfm *tfm)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void crypto_free_hmac_block(struct crypto_tfm *tfm)
|
|
{ }
|
|
#endif
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
void __init crypto_init_proc(void);
|
|
#else
|
|
static inline void crypto_init_proc(void)
|
|
{ }
|
|
#endif
|
|
|
|
int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags);
|
|
int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags);
|
|
int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags);
|
|
|
|
int crypto_init_digest_ops(struct crypto_tfm *tfm);
|
|
int crypto_init_cipher_ops(struct crypto_tfm *tfm);
|
|
int crypto_init_compress_ops(struct crypto_tfm *tfm);
|
|
|
|
void crypto_exit_digest_ops(struct crypto_tfm *tfm);
|
|
void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
|
|
void crypto_exit_compress_ops(struct crypto_tfm *tfm);
|
|
|
|
#endif /* _CRYPTO_INTERNAL_H */
|
|
|