mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 19:26:25 +00:00
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
This commit is contained in:
commit
48467641bc
43 changed files with 405 additions and 283 deletions
|
@ -223,6 +223,7 @@ CAST5 algorithm contributors:
|
|||
|
||||
TEA/XTEA algorithm contributors:
|
||||
Aaron Grothe
|
||||
Michael Ringe
|
||||
|
||||
Khazad algorithm contributors:
|
||||
Aaron Grothe
|
||||
|
|
|
@ -219,7 +219,7 @@ config CRYPTO_CAST6
|
|||
described in RFC2612.
|
||||
|
||||
config CRYPTO_TEA
|
||||
tristate "TEA and XTEA cipher algorithms"
|
||||
tristate "TEA, XTEA and XETA cipher algorithms"
|
||||
depends on CRYPTO
|
||||
help
|
||||
TEA cipher algorithm.
|
||||
|
@ -232,6 +232,9 @@ config CRYPTO_TEA
|
|||
the TEA algorithm to address a potential key weakness
|
||||
in the TEA algorithm.
|
||||
|
||||
Xtendend Encryption Tiny Algorithm is a mis-implementation
|
||||
of the XTEA algorithm for compatibility purposes.
|
||||
|
||||
config CRYPTO_ARC4
|
||||
tristate "ARC4 cipher algorithm"
|
||||
depends on CRYPTO
|
||||
|
|
|
@ -66,7 +66,8 @@ static inline struct crypto_alg *crypto_alg_mod_lookup(const char *name)
|
|||
|
||||
static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags)
|
||||
{
|
||||
tfm->crt_flags = 0;
|
||||
tfm->crt_flags = flags & CRYPTO_TFM_REQ_MASK;
|
||||
flags &= ~CRYPTO_TFM_REQ_MASK;
|
||||
|
||||
switch (crypto_tfm_alg_type(tfm)) {
|
||||
case CRYPTO_ALG_TYPE_CIPHER:
|
||||
|
|
|
@ -377,11 +377,7 @@ static int nocrypt_iv(struct crypto_tfm *tfm,
|
|||
int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
|
||||
{
|
||||
u32 mode = flags & CRYPTO_TFM_MODE_MASK;
|
||||
|
||||
tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB;
|
||||
if (flags & CRYPTO_TFM_REQ_WEAK_KEY)
|
||||
tfm->crt_flags = CRYPTO_TFM_REQ_WEAK_KEY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/kmap_types.h>
|
||||
|
||||
extern enum km_type crypto_km_types[];
|
||||
|
@ -38,7 +39,7 @@ static inline void crypto_kunmap(void *vaddr, int out)
|
|||
|
||||
static inline void crypto_yield(struct crypto_tfm *tfm)
|
||||
{
|
||||
if (!in_atomic())
|
||||
if (tfm->crt_flags & CRYPTO_TFM_REQ_MAY_SLEEP)
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ static char *check[] = {
|
|||
"des", "md5", "des3_ede", "rot13", "sha1", "sha256", "blowfish",
|
||||
"twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6",
|
||||
"arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
|
||||
"khazad", "wp512", "wp384", "wp256", "tnepres", NULL
|
||||
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", NULL
|
||||
};
|
||||
|
||||
static void hexdump(unsigned char *buf, unsigned int len)
|
||||
|
@ -859,6 +859,10 @@ static void do_test(void)
|
|||
test_cipher ("anubis", MODE_CBC, ENCRYPT, anubis_cbc_enc_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS);
|
||||
test_cipher ("anubis", MODE_CBC, DECRYPT, anubis_cbc_dec_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS);
|
||||
|
||||
//XETA
|
||||
test_cipher ("xeta", MODE_ECB, ENCRYPT, xeta_enc_tv_template, XETA_ENC_TEST_VECTORS);
|
||||
test_cipher ("xeta", MODE_ECB, DECRYPT, xeta_dec_tv_template, XETA_DEC_TEST_VECTORS);
|
||||
|
||||
test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS);
|
||||
test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS);
|
||||
test_hash("wp512", wp512_tv_template, WP512_TEST_VECTORS);
|
||||
|
@ -1016,6 +1020,11 @@ static void do_test(void)
|
|||
case 29:
|
||||
test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS);
|
||||
break;
|
||||
|
||||
case 30:
|
||||
test_cipher ("xeta", MODE_ECB, ENCRYPT, xeta_enc_tv_template, XETA_ENC_TEST_VECTORS);
|
||||
test_cipher ("xeta", MODE_ECB, DECRYPT, xeta_dec_tv_template, XETA_DEC_TEST_VECTORS);
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_CRYPTO_HMAC
|
||||
case 100:
|
||||
|
|
138
crypto/tcrypt.h
138
crypto/tcrypt.h
|
@ -2211,7 +2211,7 @@ static struct cipher_testvec xtea_enc_tv_template[] = {
|
|||
.klen = 16,
|
||||
.input = { [0 ... 8] = 0x00 },
|
||||
.ilen = 8,
|
||||
.result = { 0xaa, 0x22, 0x96, 0xe5, 0x6c, 0x61, 0xf3, 0x45 },
|
||||
.result = { 0xd8, 0xd4, 0xe9, 0xde, 0xd9, 0x1e, 0x13, 0xf7 },
|
||||
.rlen = 8,
|
||||
}, {
|
||||
.key = { 0x2b, 0x02, 0x05, 0x68, 0x06, 0x14, 0x49, 0x76,
|
||||
|
@ -2219,31 +2219,31 @@ static struct cipher_testvec xtea_enc_tv_template[] = {
|
|||
.klen = 16,
|
||||
.input = { 0x74, 0x65, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x2e },
|
||||
.ilen = 8,
|
||||
.result = { 0x82, 0x3e, 0xeb, 0x35, 0xdc, 0xdd, 0xd9, 0xc3 },
|
||||
.result = { 0x94, 0xeb, 0xc8, 0x96, 0x84, 0x6a, 0x49, 0xa8 },
|
||||
.rlen = 8,
|
||||
}, {
|
||||
.key = { 0x09, 0x65, 0x43, 0x11, 0x66, 0x44, 0x39, 0x25,
|
||||
0x51, 0x3a, 0x16, 0x10, 0x0a, 0x08, 0x12, 0x6e },
|
||||
.klen = 16,
|
||||
.input = { 0x6c, 0x6f, 0x6e, 0x67, 0x65, 0x72, 0x5f, 0x74,
|
||||
.input = { 0x3e, 0xce, 0xae, 0x22, 0x60, 0x56, 0xa8, 0x9d,
|
||||
0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x63, 0x74 },
|
||||
.ilen = 16,
|
||||
.result = { 0xe2, 0x04, 0xdb, 0xf2, 0x89, 0x85, 0x9e, 0xea,
|
||||
.result = { 0xe2, 0x04, 0xdb, 0xf2, 0x89, 0x85, 0x9e, 0xea,
|
||||
0x61, 0x35, 0xaa, 0xed, 0xb5, 0xcb, 0x71, 0x2c },
|
||||
.rlen = 16,
|
||||
}, {
|
||||
.key = { 0x4d, 0x76, 0x32, 0x17, 0x05, 0x3f, 0x75, 0x2c,
|
||||
0x5d, 0x04, 0x16, 0x36, 0x15, 0x72, 0x63, 0x2f },
|
||||
.klen = 16,
|
||||
.input = { 0x54, 0x65, 0x61, 0x20, 0x69, 0x73, 0x20, 0x67,
|
||||
0x6f, 0x6f, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20,
|
||||
0x79, 0x6f, 0x75, 0x21, 0x21, 0x21, 0x20, 0x72,
|
||||
.input = { 0x54, 0x65, 0x61, 0x20, 0x69, 0x73, 0x20, 0x67,
|
||||
0x6f, 0x6f, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20,
|
||||
0x79, 0x6f, 0x75, 0x21, 0x21, 0x21, 0x20, 0x72,
|
||||
0x65, 0x61, 0x6c, 0x6c, 0x79, 0x21, 0x21, 0x21 },
|
||||
.ilen = 32,
|
||||
.result = { 0x0b, 0x03, 0xcd, 0x8a, 0xbe, 0x95, 0xfd, 0xb1,
|
||||
0xc1, 0x44, 0x91, 0x0b, 0xa5, 0xc9, 0x1b, 0xb4,
|
||||
0xa9, 0xda, 0x1e, 0x9e, 0xb1, 0x3e, 0x2a, 0x8f,
|
||||
0xea, 0xa5, 0x6a, 0x85, 0xd1, 0xf4, 0xa8, 0xa5 },
|
||||
.result = { 0x99, 0x81, 0x9f, 0x5d, 0x6f, 0x4b, 0x31, 0x3a,
|
||||
0x86, 0xff, 0x6f, 0xd0, 0xe3, 0x87, 0x70, 0x07,
|
||||
0x4d, 0xb8, 0xcf, 0xf3, 0x99, 0x50, 0xb3, 0xd4,
|
||||
0x73, 0xa2, 0xfa, 0xc9, 0x16, 0x59, 0x5d, 0x81 },
|
||||
.rlen = 32,
|
||||
}
|
||||
};
|
||||
|
@ -2252,7 +2252,7 @@ static struct cipher_testvec xtea_dec_tv_template[] = {
|
|||
{
|
||||
.key = { [0 ... 15] = 0x00 },
|
||||
.klen = 16,
|
||||
.input = { 0xaa, 0x22, 0x96, 0xe5, 0x6c, 0x61, 0xf3, 0x45 },
|
||||
.input = { 0xd8, 0xd4, 0xe9, 0xde, 0xd9, 0x1e, 0x13, 0xf7 },
|
||||
.ilen = 8,
|
||||
.result = { [0 ... 8] = 0x00 },
|
||||
.rlen = 8,
|
||||
|
@ -2260,7 +2260,7 @@ static struct cipher_testvec xtea_dec_tv_template[] = {
|
|||
.key = { 0x2b, 0x02, 0x05, 0x68, 0x06, 0x14, 0x49, 0x76,
|
||||
0x77, 0x5d, 0x0e, 0x26, 0x6c, 0x28, 0x78, 0x43 },
|
||||
.klen = 16,
|
||||
.input = { 0x82, 0x3e, 0xeb, 0x35, 0xdc, 0xdd, 0xd9, 0xc3 },
|
||||
.input = { 0x94, 0xeb, 0xc8, 0x96, 0x84, 0x6a, 0x49, 0xa8 },
|
||||
.ilen = 8,
|
||||
.result = { 0x74, 0x65, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x2e },
|
||||
.rlen = 8,
|
||||
|
@ -2268,24 +2268,24 @@ static struct cipher_testvec xtea_dec_tv_template[] = {
|
|||
.key = { 0x09, 0x65, 0x43, 0x11, 0x66, 0x44, 0x39, 0x25,
|
||||
0x51, 0x3a, 0x16, 0x10, 0x0a, 0x08, 0x12, 0x6e },
|
||||
.klen = 16,
|
||||
.input = { 0xe2, 0x04, 0xdb, 0xf2, 0x89, 0x85, 0x9e, 0xea,
|
||||
0x61, 0x35, 0xaa, 0xed, 0xb5, 0xcb, 0x71, 0x2c },
|
||||
.input = { 0x3e, 0xce, 0xae, 0x22, 0x60, 0x56, 0xa8, 0x9d,
|
||||
0x77, 0x4d, 0xd4, 0xb4, 0x87, 0x24, 0xe3, 0x9a },
|
||||
.ilen = 16,
|
||||
.result = { 0x6c, 0x6f, 0x6e, 0x67, 0x65, 0x72, 0x5f, 0x74,
|
||||
.result = { 0x6c, 0x6f, 0x6e, 0x67, 0x65, 0x72, 0x5f, 0x74,
|
||||
0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x63, 0x74 },
|
||||
.rlen = 16,
|
||||
}, {
|
||||
.key = { 0x4d, 0x76, 0x32, 0x17, 0x05, 0x3f, 0x75, 0x2c,
|
||||
0x5d, 0x04, 0x16, 0x36, 0x15, 0x72, 0x63, 0x2f },
|
||||
.klen = 16,
|
||||
.input = { 0x0b, 0x03, 0xcd, 0x8a, 0xbe, 0x95, 0xfd, 0xb1,
|
||||
0xc1, 0x44, 0x91, 0x0b, 0xa5, 0xc9, 0x1b, 0xb4,
|
||||
0xa9, 0xda, 0x1e, 0x9e, 0xb1, 0x3e, 0x2a, 0x8f,
|
||||
0xea, 0xa5, 0x6a, 0x85, 0xd1, 0xf4, 0xa8, 0xa5 },
|
||||
.input = { 0x99, 0x81, 0x9f, 0x5d, 0x6f, 0x4b, 0x31, 0x3a,
|
||||
0x86, 0xff, 0x6f, 0xd0, 0xe3, 0x87, 0x70, 0x07,
|
||||
0x4d, 0xb8, 0xcf, 0xf3, 0x99, 0x50, 0xb3, 0xd4,
|
||||
0x73, 0xa2, 0xfa, 0xc9, 0x16, 0x59, 0x5d, 0x81 },
|
||||
.ilen = 32,
|
||||
.result = { 0x54, 0x65, 0x61, 0x20, 0x69, 0x73, 0x20, 0x67,
|
||||
0x6f, 0x6f, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20,
|
||||
0x79, 0x6f, 0x75, 0x21, 0x21, 0x21, 0x20, 0x72,
|
||||
.result = { 0x54, 0x65, 0x61, 0x20, 0x69, 0x73, 0x20, 0x67,
|
||||
0x6f, 0x6f, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20,
|
||||
0x79, 0x6f, 0x75, 0x21, 0x21, 0x21, 0x20, 0x72,
|
||||
0x65, 0x61, 0x6c, 0x6c, 0x79, 0x21, 0x21, 0x21 },
|
||||
.rlen = 32,
|
||||
}
|
||||
|
@ -2594,6 +2594,98 @@ static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
|
|||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* XETA test vectors
|
||||
*/
|
||||
#define XETA_ENC_TEST_VECTORS 4
|
||||
#define XETA_DEC_TEST_VECTORS 4
|
||||
|
||||
static struct cipher_testvec xeta_enc_tv_template[] = {
|
||||
{
|
||||
.key = { [0 ... 15] = 0x00 },
|
||||
.klen = 16,
|
||||
.input = { [0 ... 8] = 0x00 },
|
||||
.ilen = 8,
|
||||
.result = { 0xaa, 0x22, 0x96, 0xe5, 0x6c, 0x61, 0xf3, 0x45 },
|
||||
.rlen = 8,
|
||||
}, {
|
||||
.key = { 0x2b, 0x02, 0x05, 0x68, 0x06, 0x14, 0x49, 0x76,
|
||||
0x77, 0x5d, 0x0e, 0x26, 0x6c, 0x28, 0x78, 0x43 },
|
||||
.klen = 16,
|
||||
.input = { 0x74, 0x65, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x2e },
|
||||
.ilen = 8,
|
||||
.result = { 0x82, 0x3e, 0xeb, 0x35, 0xdc, 0xdd, 0xd9, 0xc3 },
|
||||
.rlen = 8,
|
||||
}, {
|
||||
.key = { 0x09, 0x65, 0x43, 0x11, 0x66, 0x44, 0x39, 0x25,
|
||||
0x51, 0x3a, 0x16, 0x10, 0x0a, 0x08, 0x12, 0x6e },
|
||||
.klen = 16,
|
||||
.input = { 0x6c, 0x6f, 0x6e, 0x67, 0x65, 0x72, 0x5f, 0x74,
|
||||
0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x63, 0x74 },
|
||||
.ilen = 16,
|
||||
.result = { 0xe2, 0x04, 0xdb, 0xf2, 0x89, 0x85, 0x9e, 0xea,
|
||||
0x61, 0x35, 0xaa, 0xed, 0xb5, 0xcb, 0x71, 0x2c },
|
||||
.rlen = 16,
|
||||
}, {
|
||||
.key = { 0x4d, 0x76, 0x32, 0x17, 0x05, 0x3f, 0x75, 0x2c,
|
||||
0x5d, 0x04, 0x16, 0x36, 0x15, 0x72, 0x63, 0x2f },
|
||||
.klen = 16,
|
||||
.input = { 0x54, 0x65, 0x61, 0x20, 0x69, 0x73, 0x20, 0x67,
|
||||
0x6f, 0x6f, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20,
|
||||
0x79, 0x6f, 0x75, 0x21, 0x21, 0x21, 0x20, 0x72,
|
||||
0x65, 0x61, 0x6c, 0x6c, 0x79, 0x21, 0x21, 0x21 },
|
||||
.ilen = 32,
|
||||
.result = { 0x0b, 0x03, 0xcd, 0x8a, 0xbe, 0x95, 0xfd, 0xb1,
|
||||
0xc1, 0x44, 0x91, 0x0b, 0xa5, 0xc9, 0x1b, 0xb4,
|
||||
0xa9, 0xda, 0x1e, 0x9e, 0xb1, 0x3e, 0x2a, 0x8f,
|
||||
0xea, 0xa5, 0x6a, 0x85, 0xd1, 0xf4, 0xa8, 0xa5 },
|
||||
.rlen = 32,
|
||||
}
|
||||
};
|
||||
|
||||
static struct cipher_testvec xeta_dec_tv_template[] = {
|
||||
{
|
||||
.key = { [0 ... 15] = 0x00 },
|
||||
.klen = 16,
|
||||
.input = { 0xaa, 0x22, 0x96, 0xe5, 0x6c, 0x61, 0xf3, 0x45 },
|
||||
.ilen = 8,
|
||||
.result = { [0 ... 8] = 0x00 },
|
||||
.rlen = 8,
|
||||
}, {
|
||||
.key = { 0x2b, 0x02, 0x05, 0x68, 0x06, 0x14, 0x49, 0x76,
|
||||
0x77, 0x5d, 0x0e, 0x26, 0x6c, 0x28, 0x78, 0x43 },
|
||||
.klen = 16,
|
||||
.input = { 0x82, 0x3e, 0xeb, 0x35, 0xdc, 0xdd, 0xd9, 0xc3 },
|
||||
.ilen = 8,
|
||||
.result = { 0x74, 0x65, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x2e },
|
||||
.rlen = 8,
|
||||
}, {
|
||||
.key = { 0x09, 0x65, 0x43, 0x11, 0x66, 0x44, 0x39, 0x25,
|
||||
0x51, 0x3a, 0x16, 0x10, 0x0a, 0x08, 0x12, 0x6e },
|
||||
.klen = 16,
|
||||
.input = { 0xe2, 0x04, 0xdb, 0xf2, 0x89, 0x85, 0x9e, 0xea,
|
||||
0x61, 0x35, 0xaa, 0xed, 0xb5, 0xcb, 0x71, 0x2c },
|
||||
.ilen = 16,
|
||||
.result = { 0x6c, 0x6f, 0x6e, 0x67, 0x65, 0x72, 0x5f, 0x74,
|
||||
0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x63, 0x74 },
|
||||
.rlen = 16,
|
||||
}, {
|
||||
.key = { 0x4d, 0x76, 0x32, 0x17, 0x05, 0x3f, 0x75, 0x2c,
|
||||
0x5d, 0x04, 0x16, 0x36, 0x15, 0x72, 0x63, 0x2f },
|
||||
.klen = 16,
|
||||
.input = { 0x0b, 0x03, 0xcd, 0x8a, 0xbe, 0x95, 0xfd, 0xb1,
|
||||
0xc1, 0x44, 0x91, 0x0b, 0xa5, 0xc9, 0x1b, 0xb4,
|
||||
0xa9, 0xda, 0x1e, 0x9e, 0xb1, 0x3e, 0x2a, 0x8f,
|
||||
0xea, 0xa5, 0x6a, 0x85, 0xd1, 0xf4, 0xa8, 0xa5 },
|
||||
.ilen = 32,
|
||||
.result = { 0x54, 0x65, 0x61, 0x20, 0x69, 0x73, 0x20, 0x67,
|
||||
0x6f, 0x6f, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20,
|
||||
0x79, 0x6f, 0x75, 0x21, 0x21, 0x21, 0x20, 0x72,
|
||||
0x65, 0x61, 0x6c, 0x6c, 0x79, 0x21, 0x21, 0x21 },
|
||||
.rlen = 32,
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Compression stuff.
|
||||
*/
|
||||
|
|
81
crypto/tea.c
81
crypto/tea.c
|
@ -1,11 +1,15 @@
|
|||
/*
|
||||
* Cryptographic API.
|
||||
*
|
||||
* TEA and Xtended TEA Algorithms
|
||||
* TEA, XTEA, and XETA crypto alogrithms
|
||||
*
|
||||
* The TEA and Xtended TEA algorithms were developed by David Wheeler
|
||||
* and Roger Needham at the Computer Laboratory of Cambridge University.
|
||||
*
|
||||
* Due to the order of evaluation in XTEA many people have incorrectly
|
||||
* implemented it. XETA (XTEA in the wrong order), exists for
|
||||
* compatibility with these implementations.
|
||||
*
|
||||
* Copyright (c) 2004 Aaron Grothe ajgrothe@yahoo.com
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
|
@ -153,9 +157,9 @@ static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
|
|||
z = u32_in (src + 4);
|
||||
|
||||
while (sum != limit) {
|
||||
y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3];
|
||||
y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]);
|
||||
sum += XTEA_DELTA;
|
||||
z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3];
|
||||
z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]);
|
||||
}
|
||||
|
||||
u32_out (dst, y);
|
||||
|
@ -174,6 +178,51 @@ static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
|
|||
|
||||
sum = XTEA_DELTA * XTEA_ROUNDS;
|
||||
|
||||
while (sum) {
|
||||
z -= ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 & 3]);
|
||||
sum -= XTEA_DELTA;
|
||||
y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]);
|
||||
}
|
||||
|
||||
u32_out (dst, y);
|
||||
u32_out (dst + 4, z);
|
||||
|
||||
}
|
||||
|
||||
|
||||
static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
|
||||
{
|
||||
|
||||
u32 y, z, sum = 0;
|
||||
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
|
||||
|
||||
struct xtea_ctx *ctx = ctx_arg;
|
||||
|
||||
y = u32_in (src);
|
||||
z = u32_in (src + 4);
|
||||
|
||||
while (sum != limit) {
|
||||
y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3];
|
||||
sum += XTEA_DELTA;
|
||||
z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3];
|
||||
}
|
||||
|
||||
u32_out (dst, y);
|
||||
u32_out (dst + 4, z);
|
||||
|
||||
}
|
||||
|
||||
static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
|
||||
{
|
||||
|
||||
u32 y, z, sum;
|
||||
struct tea_ctx *ctx = ctx_arg;
|
||||
|
||||
y = u32_in (src);
|
||||
z = u32_in (src + 4);
|
||||
|
||||
sum = XTEA_DELTA * XTEA_ROUNDS;
|
||||
|
||||
while (sum) {
|
||||
z -= (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 & 3];
|
||||
sum -= XTEA_DELTA;
|
||||
|
@ -215,6 +264,21 @@ static struct crypto_alg xtea_alg = {
|
|||
.cia_decrypt = xtea_decrypt } }
|
||||
};
|
||||
|
||||
static struct crypto_alg xeta_alg = {
|
||||
.cra_name = "xeta",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = XTEA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof (struct xtea_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(xtea_alg.cra_list),
|
||||
.cra_u = { .cipher = {
|
||||
.cia_min_keysize = XTEA_KEY_SIZE,
|
||||
.cia_max_keysize = XTEA_KEY_SIZE,
|
||||
.cia_setkey = xtea_setkey,
|
||||
.cia_encrypt = xeta_encrypt,
|
||||
.cia_decrypt = xeta_decrypt } }
|
||||
};
|
||||
|
||||
static int __init init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -229,6 +293,13 @@ static int __init init(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = crypto_register_alg(&xeta_alg);
|
||||
if (ret < 0) {
|
||||
crypto_unregister_alg(&tea_alg);
|
||||
crypto_unregister_alg(&xtea_alg);
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
@ -237,12 +308,14 @@ static void __exit fini(void)
|
|||
{
|
||||
crypto_unregister_alg(&tea_alg);
|
||||
crypto_unregister_alg(&xtea_alg);
|
||||
crypto_unregister_alg(&xeta_alg);
|
||||
}
|
||||
|
||||
MODULE_ALIAS("xtea");
|
||||
MODULE_ALIAS("xeta");
|
||||
|
||||
module_init(init);
|
||||
module_exit(fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("TEA & XTEA Cryptographic Algorithms");
|
||||
MODULE_DESCRIPTION("TEA, XTEA & XETA Cryptographic Algorithms");
|
||||
|
|
|
@ -417,9 +417,9 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
|
|||
chan = (here[3] & uPD98401_AAL5_CHAN) >>
|
||||
uPD98401_AAL5_CHAN_SHIFT;
|
||||
if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) {
|
||||
int pos = ZATM_VCC(vcc)->pool;
|
||||
|
||||
int pos;
|
||||
vcc = zatm_dev->rx_map[chan];
|
||||
pos = ZATM_VCC(vcc)->pool;
|
||||
if (skb == zatm_dev->last_free[pos])
|
||||
zatm_dev->last_free[pos] = NULL;
|
||||
skb_unlink(skb, zatm_dev->pool + pos);
|
||||
|
|
|
@ -57,9 +57,11 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
|
|||
mode = strsep(&cmsp, "-");
|
||||
|
||||
if (mode == NULL || strcmp(mode, "cbc") == 0)
|
||||
tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC);
|
||||
tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
else if (strcmp(mode, "ecb") == 0)
|
||||
tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB);
|
||||
tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (tfm == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -144,7 +144,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
|
|||
}
|
||||
|
||||
/* Hash the cipher key with the given hash algorithm */
|
||||
hash_tfm = crypto_alloc_tfm(opts, 0);
|
||||
hash_tfm = crypto_alloc_tfm(opts, CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (hash_tfm == NULL) {
|
||||
ti->error = PFX "Error initializing ESSIV hash";
|
||||
return -EINVAL;
|
||||
|
@ -172,7 +172,8 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
|
|||
|
||||
/* Setup the essiv_tfm with the given salt */
|
||||
essiv_tfm = crypto_alloc_tfm(crypto_tfm_alg_name(cc->tfm),
|
||||
CRYPTO_TFM_MODE_ECB);
|
||||
CRYPTO_TFM_MODE_ECB |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (essiv_tfm == NULL) {
|
||||
ti->error = PFX "Error allocating crypto tfm for ESSIV";
|
||||
kfree(salt);
|
||||
|
@ -587,7 +588,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
goto bad1;
|
||||
}
|
||||
|
||||
tfm = crypto_alloc_tfm(cipher, crypto_flags);
|
||||
tfm = crypto_alloc_tfm(cipher, crypto_flags | CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (!tfm) {
|
||||
ti->error = PFX "Error allocating crypto tfm";
|
||||
goto bad1;
|
||||
|
|
|
@ -948,6 +948,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id, struct pt_regs *regs)
|
|||
u32 gem_status = readl(gp->regs + GREG_STAT);
|
||||
|
||||
if (gem_status == 0) {
|
||||
netif_poll_enable(dev);
|
||||
spin_unlock_irqrestore(&gp->lock, flags);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
|
|
@ -1020,7 +1020,7 @@ struct gem {
|
|||
|
||||
struct gem_init_block *init_block;
|
||||
struct sk_buff *rx_skbs[RX_RING_SIZE];
|
||||
struct sk_buff *tx_skbs[RX_RING_SIZE];
|
||||
struct sk_buff *tx_skbs[TX_RING_SIZE];
|
||||
dma_addr_t gblock_dvma;
|
||||
|
||||
struct pci_dev *pdev;
|
||||
|
|
|
@ -66,8 +66,8 @@
|
|||
|
||||
#define DRV_MODULE_NAME "tg3"
|
||||
#define PFX DRV_MODULE_NAME ": "
|
||||
#define DRV_MODULE_VERSION "3.37"
|
||||
#define DRV_MODULE_RELDATE "August 25, 2005"
|
||||
#define DRV_MODULE_VERSION "3.38"
|
||||
#define DRV_MODULE_RELDATE "September 1, 2005"
|
||||
|
||||
#define TG3_DEF_MAC_MODE 0
|
||||
#define TG3_DEF_RX_MODE 0
|
||||
|
@ -121,12 +121,9 @@
|
|||
TG3_RX_RCB_RING_SIZE(tp))
|
||||
#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
|
||||
TG3_TX_RING_SIZE)
|
||||
#define TX_RING_GAP(TP) \
|
||||
(TG3_TX_RING_SIZE - (TP)->tx_pending)
|
||||
#define TX_BUFFS_AVAIL(TP) \
|
||||
(((TP)->tx_cons <= (TP)->tx_prod) ? \
|
||||
(TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \
|
||||
(TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
|
||||
((TP)->tx_pending - \
|
||||
(((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
|
||||
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
|
||||
|
||||
#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
|
||||
|
@ -2880,9 +2877,13 @@ static void tg3_tx(struct tg3 *tp)
|
|||
|
||||
tp->tx_cons = sw_idx;
|
||||
|
||||
if (netif_queue_stopped(tp->dev) &&
|
||||
(TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
|
||||
netif_wake_queue(tp->dev);
|
||||
if (unlikely(netif_queue_stopped(tp->dev))) {
|
||||
spin_lock(&tp->tx_lock);
|
||||
if (netif_queue_stopped(tp->dev) &&
|
||||
(TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
|
||||
netif_wake_queue(tp->dev);
|
||||
spin_unlock(&tp->tx_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/* Returns size of skb allocated or < 0 on error.
|
||||
|
@ -3198,9 +3199,7 @@ static int tg3_poll(struct net_device *netdev, int *budget)
|
|||
|
||||
/* run TX completion thread */
|
||||
if (sblk->idx[0].tx_consumer != tp->tx_cons) {
|
||||
spin_lock(&tp->tx_lock);
|
||||
tg3_tx(tp);
|
||||
spin_unlock(&tp->tx_lock);
|
||||
}
|
||||
|
||||
/* run RX thread, within the bounds set by NAPI.
|
||||
|
@ -3716,8 +3715,11 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
|
||||
|
||||
tp->tx_prod = entry;
|
||||
if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
|
||||
if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
|
||||
netif_stop_queue(dev);
|
||||
if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
|
||||
netif_wake_queue(tp->dev);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mmiowb();
|
||||
|
|
|
@ -18,6 +18,9 @@
|
|||
/*
|
||||
* Changes:
|
||||
*
|
||||
* Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
|
||||
* Add TUNSETLINK ioctl to set the link encapsulation
|
||||
*
|
||||
* Mark Smith <markzzzsmith@yahoo.com.au>
|
||||
* Use random_ether_addr() for tap MAC address.
|
||||
*
|
||||
|
@ -612,6 +615,18 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
|
|||
DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner);
|
||||
break;
|
||||
|
||||
case TUNSETLINK:
|
||||
/* Only allow setting the type when the interface is down */
|
||||
if (tun->dev->flags & IFF_UP) {
|
||||
DBG(KERN_INFO "%s: Linktype set failed because interface is up\n",
|
||||
tun->dev->name);
|
||||
return -EBUSY;
|
||||
} else {
|
||||
tun->dev->type = (int) arg;
|
||||
DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type);
|
||||
}
|
||||
break;
|
||||
|
||||
#ifdef TUN_DEBUG
|
||||
case TUNSETDEBUG:
|
||||
tun->debug = arg;
|
||||
|
|
|
@ -1308,7 +1308,7 @@ static int micsetup(struct airo_info *ai) {
|
|||
int i;
|
||||
|
||||
if (ai->tfm == NULL)
|
||||
ai->tfm = crypto_alloc_tfm("aes", 0);
|
||||
ai->tfm = crypto_alloc_tfm("aes", CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
|
||||
if (ai->tfm == NULL) {
|
||||
printk(KERN_ERR "airo: failed to load transform for AES\n");
|
||||
|
@ -2410,8 +2410,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
|
|||
}
|
||||
}
|
||||
#ifdef MICSUPPORT
|
||||
if (ai->tfm)
|
||||
crypto_free_tfm(ai->tfm);
|
||||
crypto_free_tfm(ai->tfm);
|
||||
#endif
|
||||
del_airo_dev( dev );
|
||||
free_netdev( dev );
|
||||
|
|
|
@ -93,7 +93,7 @@ nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname)
|
|||
|
||||
dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
|
||||
clname->len, clname->data);
|
||||
tfm = crypto_alloc_tfm("md5", 0);
|
||||
tfm = crypto_alloc_tfm("md5", CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (tfm == NULL)
|
||||
goto out;
|
||||
cksum.len = crypto_tfm_alg_digestsize(tfm);
|
||||
|
@ -114,8 +114,7 @@ nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname)
|
|||
kfree(cksum.data);
|
||||
status = nfs_ok;
|
||||
out:
|
||||
if (tfm)
|
||||
crypto_free_tfm(tfm);
|
||||
crypto_free_tfm(tfm);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#define CRYPTO_TFM_MODE_CTR 0x00000008
|
||||
|
||||
#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
|
||||
#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
|
||||
#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
|
||||
#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
|
||||
#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
|
||||
|
|
|
@ -77,6 +77,7 @@ struct tun_struct {
|
|||
#define TUNSETIFF _IOW('T', 202, int)
|
||||
#define TUNSETPERSIST _IOW('T', 203, int)
|
||||
#define TUNSETOWNER _IOW('T', 204, int)
|
||||
#define TUNSETLINK _IOW('T', 205, int)
|
||||
|
||||
/* TUNSETIFF ifr flags */
|
||||
#define IFF_TUN 0x0001
|
||||
|
|
|
@ -958,7 +958,7 @@ static __inline__ int ip_vs_todrop(void)
|
|||
*/
|
||||
#define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK)
|
||||
|
||||
extern __inline__ char ip_vs_fwd_tag(struct ip_vs_conn *cp)
|
||||
static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp)
|
||||
{
|
||||
char fwd;
|
||||
|
||||
|
|
|
@ -709,6 +709,12 @@ static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
|
|||
sk_stream_mem_schedule(sk, skb->truesize, 1);
|
||||
}
|
||||
|
||||
static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
|
||||
{
|
||||
return size <= sk->sk_forward_alloc ||
|
||||
sk_stream_mem_schedule(sk, size, 0);
|
||||
}
|
||||
|
||||
/* Used by processes to "lock" a socket state, so that
|
||||
* interrupts and bottom half handlers won't change it
|
||||
* from under us. It essentially blocks any incoming
|
||||
|
@ -1203,8 +1209,7 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
|
|||
skb = alloc_skb_fclone(size + hdr_len, gfp);
|
||||
if (skb) {
|
||||
skb->truesize += mem;
|
||||
if (sk->sk_forward_alloc >= (int)skb->truesize ||
|
||||
sk_stream_mem_schedule(sk, skb->truesize, 0)) {
|
||||
if (sk_stream_wmem_schedule(sk, skb->truesize)) {
|
||||
skb_reserve(skb, hdr_len);
|
||||
return skb;
|
||||
}
|
||||
|
@ -1227,10 +1232,8 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk)
|
|||
{
|
||||
struct page *page = NULL;
|
||||
|
||||
if (sk->sk_forward_alloc >= (int)PAGE_SIZE ||
|
||||
sk_stream_mem_schedule(sk, PAGE_SIZE, 0))
|
||||
page = alloc_pages(sk->sk_allocation, 0);
|
||||
else {
|
||||
page = alloc_pages(sk->sk_allocation, 0);
|
||||
if (!page) {
|
||||
sk->sk_prot->enter_memory_pressure();
|
||||
sk_stream_moderate_sndbuf(sk);
|
||||
}
|
||||
|
|
|
@ -454,6 +454,7 @@ extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
|
|||
extern void tcp_xmit_retransmit_queue(struct sock *);
|
||||
extern void tcp_simple_retransmit(struct sock *);
|
||||
extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
|
||||
extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
|
||||
|
||||
extern void tcp_send_probe0(struct sock *);
|
||||
extern void tcp_send_partial(struct sock *);
|
||||
|
|
|
@ -1876,8 +1876,27 @@ static inline unsigned int dn_current_mss(struct sock *sk, int flags)
|
|||
return mss_now;
|
||||
}
|
||||
|
||||
/*
|
||||
* N.B. We get the timeout wrong here, but then we always did get it
|
||||
* wrong before and this is another step along the road to correcting
|
||||
* it. It ought to get updated each time we pass through the routine,
|
||||
* but in practise it probably doesn't matter too much for now.
|
||||
*/
|
||||
static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk,
|
||||
unsigned long datalen, int noblock,
|
||||
int *errcode)
|
||||
{
|
||||
struct sk_buff *skb = sock_alloc_send_skb(sk, datalen,
|
||||
noblock, errcode);
|
||||
if (skb) {
|
||||
skb->protocol = __constant_htons(ETH_P_DNA_RT);
|
||||
skb->pkt_type = PACKET_OUTGOING;
|
||||
}
|
||||
return skb;
|
||||
}
|
||||
|
||||
static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
|
||||
struct msghdr *msg, size_t size)
|
||||
struct msghdr *msg, size_t size)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct dn_scp *scp = DN_SK(sk);
|
||||
|
@ -1892,7 +1911,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
|
|||
struct dn_skb_cb *cb;
|
||||
size_t len;
|
||||
unsigned char fctype;
|
||||
long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
|
||||
long timeo;
|
||||
|
||||
if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT))
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -1900,18 +1919,21 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
|
|||
if (addr_len && (addr_len != sizeof(struct sockaddr_dn)))
|
||||
return -EINVAL;
|
||||
|
||||
lock_sock(sk);
|
||||
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
|
||||
/*
|
||||
* The only difference between stream sockets and sequenced packet
|
||||
* sockets is that the stream sockets always behave as if MSG_EOR
|
||||
* has been set.
|
||||
*/
|
||||
if (sock->type == SOCK_STREAM) {
|
||||
if (flags & MSG_EOR)
|
||||
return -EINVAL;
|
||||
if (flags & MSG_EOR) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
flags |= MSG_EOR;
|
||||
}
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
err = dn_check_state(sk, addr, addr_len, &timeo, flags);
|
||||
if (err)
|
||||
|
@ -1980,8 +2002,12 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
|
|||
|
||||
/*
|
||||
* Get a suitably sized skb.
|
||||
* 64 is a bit of a hack really, but its larger than any
|
||||
* link-layer headers and has served us well as a good
|
||||
* guess as to their real length.
|
||||
*/
|
||||
skb = dn_alloc_send_skb(sk, &len, flags & MSG_DONTWAIT, timeo, &err);
|
||||
skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER,
|
||||
flags & MSG_DONTWAIT, &err);
|
||||
|
||||
if (err)
|
||||
break;
|
||||
|
@ -1991,7 +2017,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
|
|||
|
||||
cb = DN_SKB_CB(skb);
|
||||
|
||||
skb_reserve(skb, DN_MAX_NSP_DATA_HEADER);
|
||||
skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER);
|
||||
|
||||
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
|
||||
err = -EFAULT;
|
||||
|
|
|
@ -136,69 +136,6 @@ struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri)
|
|||
return skb;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrapper for the above, for allocs of data skbs. We try and get the
|
||||
* whole size thats been asked for (plus 11 bytes of header). If this
|
||||
* fails, then we try for any size over 16 bytes for SOCK_STREAMS.
|
||||
*/
|
||||
struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err)
|
||||
{
|
||||
int space;
|
||||
int len;
|
||||
struct sk_buff *skb = NULL;
|
||||
|
||||
*err = 0;
|
||||
|
||||
while(skb == NULL) {
|
||||
if (signal_pending(current)) {
|
||||
*err = sock_intr_errno(timeo);
|
||||
break;
|
||||
}
|
||||
|
||||
if (sk->sk_shutdown & SEND_SHUTDOWN) {
|
||||
*err = EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (sk->sk_err)
|
||||
break;
|
||||
|
||||
len = *size + 11;
|
||||
space = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
|
||||
|
||||
if (space < len) {
|
||||
if ((sk->sk_socket->type == SOCK_STREAM) &&
|
||||
(space >= (16 + 11)))
|
||||
len = space;
|
||||
}
|
||||
|
||||
if (space < len) {
|
||||
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
|
||||
if (noblock) {
|
||||
*err = EWOULDBLOCK;
|
||||
break;
|
||||
}
|
||||
|
||||
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
|
||||
SOCK_SLEEP_PRE(sk)
|
||||
|
||||
if ((sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc)) <
|
||||
len)
|
||||
schedule();
|
||||
|
||||
SOCK_SLEEP_POST(sk)
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((skb = dn_alloc_skb(sk, len, sk->sk_allocation)) == NULL)
|
||||
continue;
|
||||
|
||||
*size = len - 11;
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate persist timer based upon the smoothed round
|
||||
* trip time and the variance. Backoff according to the
|
||||
|
|
|
@ -263,10 +263,8 @@ static int ah_init_state(struct xfrm_state *x)
|
|||
|
||||
error:
|
||||
if (ahp) {
|
||||
if (ahp->work_icv)
|
||||
kfree(ahp->work_icv);
|
||||
if (ahp->tfm)
|
||||
crypto_free_tfm(ahp->tfm);
|
||||
kfree(ahp->work_icv);
|
||||
crypto_free_tfm(ahp->tfm);
|
||||
kfree(ahp);
|
||||
}
|
||||
return -EINVAL;
|
||||
|
@ -279,14 +277,10 @@ static void ah_destroy(struct xfrm_state *x)
|
|||
if (!ahp)
|
||||
return;
|
||||
|
||||
if (ahp->work_icv) {
|
||||
kfree(ahp->work_icv);
|
||||
ahp->work_icv = NULL;
|
||||
}
|
||||
if (ahp->tfm) {
|
||||
crypto_free_tfm(ahp->tfm);
|
||||
ahp->tfm = NULL;
|
||||
}
|
||||
kfree(ahp->work_icv);
|
||||
ahp->work_icv = NULL;
|
||||
crypto_free_tfm(ahp->tfm);
|
||||
ahp->tfm = NULL;
|
||||
kfree(ahp);
|
||||
}
|
||||
|
||||
|
|
|
@ -343,22 +343,14 @@ static void esp_destroy(struct xfrm_state *x)
|
|||
if (!esp)
|
||||
return;
|
||||
|
||||
if (esp->conf.tfm) {
|
||||
crypto_free_tfm(esp->conf.tfm);
|
||||
esp->conf.tfm = NULL;
|
||||
}
|
||||
if (esp->conf.ivec) {
|
||||
kfree(esp->conf.ivec);
|
||||
esp->conf.ivec = NULL;
|
||||
}
|
||||
if (esp->auth.tfm) {
|
||||
crypto_free_tfm(esp->auth.tfm);
|
||||
esp->auth.tfm = NULL;
|
||||
}
|
||||
if (esp->auth.work_icv) {
|
||||
kfree(esp->auth.work_icv);
|
||||
esp->auth.work_icv = NULL;
|
||||
}
|
||||
crypto_free_tfm(esp->conf.tfm);
|
||||
esp->conf.tfm = NULL;
|
||||
kfree(esp->conf.ivec);
|
||||
esp->conf.ivec = NULL;
|
||||
crypto_free_tfm(esp->auth.tfm);
|
||||
esp->auth.tfm = NULL;
|
||||
kfree(esp->auth.work_icv);
|
||||
esp->auth.work_icv = NULL;
|
||||
kfree(esp);
|
||||
}
|
||||
|
||||
|
|
|
@ -345,8 +345,7 @@ static void ipcomp_free_tfms(struct crypto_tfm **tfms)
|
|||
|
||||
for_each_cpu(cpu) {
|
||||
struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
|
||||
if (tfm)
|
||||
crypto_free_tfm(tfm);
|
||||
crypto_free_tfm(tfm);
|
||||
}
|
||||
free_percpu(tfms);
|
||||
}
|
||||
|
|
|
@ -144,7 +144,7 @@ clusterip_config_init(struct ipt_clusterip_tgt_info *i, u_int32_t ip,
|
|||
memcpy(&c->clustermac, &i->clustermac, ETH_ALEN);
|
||||
c->num_total_nodes = i->num_total_nodes;
|
||||
c->num_local_nodes = i->num_local_nodes;
|
||||
memcpy(&c->local_nodes, &i->local_nodes, sizeof(&c->local_nodes));
|
||||
memcpy(&c->local_nodes, &i->local_nodes, sizeof(c->local_nodes));
|
||||
c->hash_mode = i->hash_mode;
|
||||
c->hash_initval = i->hash_initval;
|
||||
atomic_set(&c->refcount, 1);
|
||||
|
|
|
@ -552,8 +552,7 @@ new_segment:
|
|||
tcp_mark_push(tp, skb);
|
||||
goto new_segment;
|
||||
}
|
||||
if (sk->sk_forward_alloc < copy &&
|
||||
!sk_stream_mem_schedule(sk, copy, 0))
|
||||
if (!sk_stream_wmem_schedule(sk, copy))
|
||||
goto wait_for_memory;
|
||||
|
||||
if (can_coalesce) {
|
||||
|
@ -770,19 +769,23 @@ new_segment:
|
|||
if (off == PAGE_SIZE) {
|
||||
put_page(page);
|
||||
TCP_PAGE(sk) = page = NULL;
|
||||
TCP_OFF(sk) = off = 0;
|
||||
}
|
||||
}
|
||||
} else
|
||||
BUG_ON(off);
|
||||
|
||||
if (copy > PAGE_SIZE - off)
|
||||
copy = PAGE_SIZE - off;
|
||||
|
||||
if (!sk_stream_wmem_schedule(sk, copy))
|
||||
goto wait_for_memory;
|
||||
|
||||
if (!page) {
|
||||
/* Allocate new cache page. */
|
||||
if (!(page = sk_stream_alloc_page(sk)))
|
||||
goto wait_for_memory;
|
||||
off = 0;
|
||||
}
|
||||
|
||||
if (copy > PAGE_SIZE - off)
|
||||
copy = PAGE_SIZE - off;
|
||||
|
||||
/* Time to copy data. We are close to
|
||||
* the end! */
|
||||
err = skb_copy_to_page(sk, from, skb, page,
|
||||
|
|
|
@ -923,14 +923,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
|
|||
int flag = 0;
|
||||
int i;
|
||||
|
||||
/* So, SACKs for already sent large segments will be lost.
|
||||
* Not good, but alternative is to resegment the queue. */
|
||||
if (sk->sk_route_caps & NETIF_F_TSO) {
|
||||
sk->sk_route_caps &= ~NETIF_F_TSO;
|
||||
sock_set_flag(sk, SOCK_NO_LARGESEND);
|
||||
tp->mss_cache = tp->mss_cache;
|
||||
}
|
||||
|
||||
if (!tp->sacked_out)
|
||||
tp->fackets_out = 0;
|
||||
prior_fackets = tp->fackets_out;
|
||||
|
@ -978,20 +970,40 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
|
|||
flag |= FLAG_DATA_LOST;
|
||||
|
||||
sk_stream_for_retrans_queue(skb, sk) {
|
||||
u8 sacked = TCP_SKB_CB(skb)->sacked;
|
||||
int in_sack;
|
||||
int in_sack, pcount;
|
||||
u8 sacked;
|
||||
|
||||
/* The retransmission queue is always in order, so
|
||||
* we can short-circuit the walk early.
|
||||
*/
|
||||
if(!before(TCP_SKB_CB(skb)->seq, end_seq))
|
||||
if (!before(TCP_SKB_CB(skb)->seq, end_seq))
|
||||
break;
|
||||
|
||||
fack_count += tcp_skb_pcount(skb);
|
||||
pcount = tcp_skb_pcount(skb);
|
||||
|
||||
if (pcount > 1 &&
|
||||
(after(start_seq, TCP_SKB_CB(skb)->seq) ||
|
||||
before(end_seq, TCP_SKB_CB(skb)->end_seq))) {
|
||||
unsigned int pkt_len;
|
||||
|
||||
if (after(start_seq, TCP_SKB_CB(skb)->seq))
|
||||
pkt_len = (start_seq -
|
||||
TCP_SKB_CB(skb)->seq);
|
||||
else
|
||||
pkt_len = (end_seq -
|
||||
TCP_SKB_CB(skb)->seq);
|
||||
if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size))
|
||||
break;
|
||||
pcount = tcp_skb_pcount(skb);
|
||||
}
|
||||
|
||||
fack_count += pcount;
|
||||
|
||||
in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
|
||||
!before(end_seq, TCP_SKB_CB(skb)->end_seq);
|
||||
|
||||
sacked = TCP_SKB_CB(skb)->sacked;
|
||||
|
||||
/* Account D-SACK for retransmitted packet. */
|
||||
if ((dup_sack && in_sack) &&
|
||||
(sacked & TCPCB_RETRANS) &&
|
||||
|
|
|
@ -428,11 +428,11 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned
|
|||
* packet to the list. This won't be called frequently, I hope.
|
||||
* Remember, these are still headerless SKBs at this point.
|
||||
*/
|
||||
static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
|
||||
int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *buff;
|
||||
int nsize;
|
||||
int nsize, old_factor;
|
||||
u16 flags;
|
||||
|
||||
nsize = skb_headlen(skb) - len;
|
||||
|
@ -490,18 +490,29 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned
|
|||
tp->left_out -= tcp_skb_pcount(skb);
|
||||
}
|
||||
|
||||
old_factor = tcp_skb_pcount(skb);
|
||||
|
||||
/* Fix up tso_factor for both original and new SKB. */
|
||||
tcp_set_skb_tso_segs(sk, skb, mss_now);
|
||||
tcp_set_skb_tso_segs(sk, buff, mss_now);
|
||||
|
||||
if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
|
||||
tp->lost_out += tcp_skb_pcount(skb);
|
||||
tp->left_out += tcp_skb_pcount(skb);
|
||||
}
|
||||
/* If this packet has been sent out already, we must
|
||||
* adjust the various packet counters.
|
||||
*/
|
||||
if (after(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
|
||||
int diff = old_factor - tcp_skb_pcount(skb) -
|
||||
tcp_skb_pcount(buff);
|
||||
|
||||
if (TCP_SKB_CB(buff)->sacked&TCPCB_LOST) {
|
||||
tp->lost_out += tcp_skb_pcount(buff);
|
||||
tp->left_out += tcp_skb_pcount(buff);
|
||||
tp->packets_out -= diff;
|
||||
if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
|
||||
tp->lost_out -= diff;
|
||||
tp->left_out -= diff;
|
||||
}
|
||||
if (diff > 0) {
|
||||
tp->fackets_out -= diff;
|
||||
if ((int)tp->fackets_out < 0)
|
||||
tp->fackets_out = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Link BUFF into the send queue. */
|
||||
|
@ -1350,12 +1361,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
|
|||
if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
|
||||
if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
|
||||
BUG();
|
||||
|
||||
if (sk->sk_route_caps & NETIF_F_TSO) {
|
||||
sk->sk_route_caps &= ~NETIF_F_TSO;
|
||||
sock_set_flag(sk, SOCK_NO_LARGESEND);
|
||||
}
|
||||
|
||||
if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -1370,22 +1375,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
|
|||
return -EAGAIN;
|
||||
|
||||
if (skb->len > cur_mss) {
|
||||
int old_factor = tcp_skb_pcount(skb);
|
||||
int diff;
|
||||
|
||||
if (tcp_fragment(sk, skb, cur_mss, cur_mss))
|
||||
return -ENOMEM; /* We'll try again later. */
|
||||
|
||||
/* New SKB created, account for it. */
|
||||
diff = old_factor - tcp_skb_pcount(skb) -
|
||||
tcp_skb_pcount(skb->next);
|
||||
tp->packets_out -= diff;
|
||||
|
||||
if (diff > 0) {
|
||||
tp->fackets_out -= diff;
|
||||
if ((int)tp->fackets_out < 0)
|
||||
tp->fackets_out = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Collapse two adjacent packets if worthwhile and we can. */
|
||||
|
@ -1993,12 +1984,6 @@ int tcp_write_wakeup(struct sock *sk)
|
|||
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
|
||||
if (tcp_fragment(sk, skb, seg_size, mss))
|
||||
return -1;
|
||||
/* SWS override triggered forced fragmentation.
|
||||
* Disable TSO, the connection is too sick. */
|
||||
if (sk->sk_route_caps & NETIF_F_TSO) {
|
||||
sock_set_flag(sk, SOCK_NO_LARGESEND);
|
||||
sk->sk_route_caps &= ~NETIF_F_TSO;
|
||||
}
|
||||
} else if (!tcp_skb_pcount(skb))
|
||||
tcp_set_skb_tso_segs(sk, skb, mss);
|
||||
|
||||
|
|
|
@ -3593,10 +3593,8 @@ void __exit addrconf_cleanup(void)
|
|||
rtnl_unlock();
|
||||
|
||||
#ifdef CONFIG_IPV6_PRIVACY
|
||||
if (likely(md5_tfm != NULL)) {
|
||||
crypto_free_tfm(md5_tfm);
|
||||
md5_tfm = NULL;
|
||||
}
|
||||
crypto_free_tfm(md5_tfm);
|
||||
md5_tfm = NULL;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
|
|
@ -401,10 +401,8 @@ static int ah6_init_state(struct xfrm_state *x)
|
|||
|
||||
error:
|
||||
if (ahp) {
|
||||
if (ahp->work_icv)
|
||||
kfree(ahp->work_icv);
|
||||
if (ahp->tfm)
|
||||
crypto_free_tfm(ahp->tfm);
|
||||
kfree(ahp->work_icv);
|
||||
crypto_free_tfm(ahp->tfm);
|
||||
kfree(ahp);
|
||||
}
|
||||
return -EINVAL;
|
||||
|
@ -417,14 +415,10 @@ static void ah6_destroy(struct xfrm_state *x)
|
|||
if (!ahp)
|
||||
return;
|
||||
|
||||
if (ahp->work_icv) {
|
||||
kfree(ahp->work_icv);
|
||||
ahp->work_icv = NULL;
|
||||
}
|
||||
if (ahp->tfm) {
|
||||
crypto_free_tfm(ahp->tfm);
|
||||
ahp->tfm = NULL;
|
||||
}
|
||||
kfree(ahp->work_icv);
|
||||
ahp->work_icv = NULL;
|
||||
crypto_free_tfm(ahp->tfm);
|
||||
ahp->tfm = NULL;
|
||||
kfree(ahp);
|
||||
}
|
||||
|
||||
|
|
|
@ -276,22 +276,14 @@ static void esp6_destroy(struct xfrm_state *x)
|
|||
if (!esp)
|
||||
return;
|
||||
|
||||
if (esp->conf.tfm) {
|
||||
crypto_free_tfm(esp->conf.tfm);
|
||||
esp->conf.tfm = NULL;
|
||||
}
|
||||
if (esp->conf.ivec) {
|
||||
kfree(esp->conf.ivec);
|
||||
esp->conf.ivec = NULL;
|
||||
}
|
||||
if (esp->auth.tfm) {
|
||||
crypto_free_tfm(esp->auth.tfm);
|
||||
esp->auth.tfm = NULL;
|
||||
}
|
||||
if (esp->auth.work_icv) {
|
||||
kfree(esp->auth.work_icv);
|
||||
esp->auth.work_icv = NULL;
|
||||
}
|
||||
crypto_free_tfm(esp->conf.tfm);
|
||||
esp->conf.tfm = NULL;
|
||||
kfree(esp->conf.ivec);
|
||||
esp->conf.ivec = NULL;
|
||||
crypto_free_tfm(esp->auth.tfm);
|
||||
esp->auth.tfm = NULL;
|
||||
kfree(esp->auth.work_icv);
|
||||
esp->auth.work_icv = NULL;
|
||||
kfree(esp);
|
||||
}
|
||||
|
||||
|
|
|
@ -549,7 +549,7 @@ static void icmpv6_notify(struct sk_buff *skb, int type, int code, u32 info)
|
|||
read_lock(&raw_v6_lock);
|
||||
if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) {
|
||||
while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr,
|
||||
skb->dev->ifindex))) {
|
||||
IP6CB(skb)->iif))) {
|
||||
rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
|
||||
sk = sk_next(sk);
|
||||
}
|
||||
|
|
|
@ -341,8 +341,7 @@ static void ipcomp6_free_tfms(struct crypto_tfm **tfms)
|
|||
|
||||
for_each_cpu(cpu) {
|
||||
struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
|
||||
if (tfm)
|
||||
crypto_free_tfm(tfm);
|
||||
crypto_free_tfm(tfm);
|
||||
}
|
||||
free_percpu(tfms);
|
||||
}
|
||||
|
|
|
@ -166,7 +166,7 @@ int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
|
|||
if (sk == NULL)
|
||||
goto out;
|
||||
|
||||
sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr, skb->dev->ifindex);
|
||||
sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr, IP6CB(skb)->iif);
|
||||
|
||||
while (sk) {
|
||||
delivered = 1;
|
||||
|
@ -178,7 +178,7 @@ int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
|
|||
rawv6_rcv(sk, clone);
|
||||
}
|
||||
sk = __raw_v6_lookup(sk_next(sk), nexthdr, daddr, saddr,
|
||||
skb->dev->ifindex);
|
||||
IP6CB(skb)->iif);
|
||||
}
|
||||
out:
|
||||
read_unlock(&raw_v6_lock);
|
||||
|
|
|
@ -193,8 +193,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
|
|||
sctp_unhash_endpoint(ep);
|
||||
|
||||
/* Free up the HMAC transform. */
|
||||
if (sctp_sk(ep->base.sk)->hmac)
|
||||
sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac);
|
||||
sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac);
|
||||
|
||||
/* Cleanup. */
|
||||
sctp_inq_free(&ep->base.inqueue);
|
||||
|
|
|
@ -4194,8 +4194,7 @@ out:
|
|||
sctp_release_sock(sk);
|
||||
return err;
|
||||
cleanup:
|
||||
if (tfm)
|
||||
sctp_crypto_free_tfm(tfm);
|
||||
sctp_crypto_free_tfm(tfm);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -160,7 +160,7 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
|
|||
" unsupported checksum %d", cksumtype);
|
||||
goto out;
|
||||
}
|
||||
if (!(tfm = crypto_alloc_tfm(cksumname, 0)))
|
||||
if (!(tfm = crypto_alloc_tfm(cksumname, CRYPTO_TFM_REQ_MAY_SLEEP)))
|
||||
goto out;
|
||||
cksum->len = crypto_tfm_alg_digestsize(tfm);
|
||||
if ((cksum->data = kmalloc(cksum->len, GFP_KERNEL)) == NULL)
|
||||
|
@ -199,8 +199,7 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
|
|||
crypto_digest_final(tfm, cksum->data);
|
||||
code = 0;
|
||||
out:
|
||||
if (tfm)
|
||||
crypto_free_tfm(tfm);
|
||||
crypto_free_tfm(tfm);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -185,12 +185,9 @@ static void
|
|||
gss_delete_sec_context_kerberos(void *internal_ctx) {
|
||||
struct krb5_ctx *kctx = internal_ctx;
|
||||
|
||||
if (kctx->seq)
|
||||
crypto_free_tfm(kctx->seq);
|
||||
if (kctx->enc)
|
||||
crypto_free_tfm(kctx->enc);
|
||||
if (kctx->mech_used.data)
|
||||
kfree(kctx->mech_used.data);
|
||||
crypto_free_tfm(kctx->seq);
|
||||
crypto_free_tfm(kctx->enc);
|
||||
kfree(kctx->mech_used.data);
|
||||
kfree(kctx);
|
||||
}
|
||||
|
||||
|
|
|
@ -214,14 +214,10 @@ static void
|
|||
gss_delete_sec_context_spkm3(void *internal_ctx) {
|
||||
struct spkm3_ctx *sctx = internal_ctx;
|
||||
|
||||
if(sctx->derived_integ_key)
|
||||
crypto_free_tfm(sctx->derived_integ_key);
|
||||
if(sctx->derived_conf_key)
|
||||
crypto_free_tfm(sctx->derived_conf_key);
|
||||
if(sctx->share_key.data)
|
||||
kfree(sctx->share_key.data);
|
||||
if(sctx->mech_used.data)
|
||||
kfree(sctx->mech_used.data);
|
||||
crypto_free_tfm(sctx->derived_integ_key);
|
||||
crypto_free_tfm(sctx->derived_conf_key);
|
||||
kfree(sctx->share_key.data);
|
||||
kfree(sctx->mech_used.data);
|
||||
kfree(sctx);
|
||||
}
|
||||
|
||||
|
|
|
@ -321,7 +321,7 @@ plaintext_to_sha1(unsigned char *hash, const char *plaintext, int len)
|
|||
"bytes.\n", len, PAGE_SIZE);
|
||||
return -ENOMEM;
|
||||
}
|
||||
tfm = crypto_alloc_tfm("sha1", 0);
|
||||
tfm = crypto_alloc_tfm("sha1", CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (tfm == NULL) {
|
||||
seclvl_printk(0, KERN_ERR,
|
||||
"Failed to load transform for SHA1\n");
|
||||
|
|
Loading…
Reference in a new issue