mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
[CRYPTO] blkcipher: Fix handling of kmalloc page straddling
The function blkcipher_get_spot tries to return a buffer of the specified length that does not straddle a page. It has an off-by-one bug so it may advance a page unnecessarily. What's worse, one of its callers doesn't provide a buffer that's sufficiently long for this operation. This patch fixes both problems. Thanks to Bob Gilligan for diagnosing this problem and providing a fix. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
b21010ed64
commit
e4630f9fd8
1 changed files with 7 additions and 4 deletions
|
@ -59,11 +59,13 @@ static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
|
|||
scatterwalk_unmap(walk->dst.virt.addr, 1);
|
||||
}
|
||||
|
||||
/* Get a spot of the specified length that does not straddle a page.
|
||||
* The caller needs to ensure that there is enough space for this operation.
|
||||
*/
|
||||
static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
|
||||
{
|
||||
if (offset_in_page(start + len) < len)
|
||||
return (u8 *)((unsigned long)(start + len) & PAGE_MASK);
|
||||
return start;
|
||||
u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
|
||||
return start < end_page ? start : end_page;
|
||||
}
|
||||
|
||||
static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
|
||||
|
@ -155,7 +157,8 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
|
|||
if (walk->buffer)
|
||||
goto ok;
|
||||
|
||||
n = bsize * 2 + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
|
||||
n = bsize * 3 - (alignmask + 1) +
|
||||
(alignmask & ~(crypto_tfm_ctx_alignment() - 1));
|
||||
walk->buffer = kmalloc(n, GFP_ATOMIC);
|
||||
if (!walk->buffer)
|
||||
return blkcipher_walk_done(desc, walk, -ENOMEM);
|
||||
|
|
Loading…
Reference in a new issue