mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
[PATCH] Transform kmem_cache_alloc()+memset(0) -> kmem_cache_zalloc().
Replace appropriate pairs of "kmem_cache_alloc()" + "memset(0)" with the corresponding "kmem_cache_zalloc()" call. Signed-off-by: Robert P. J. Day <rpjday@mindspring.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Andi Kleen <ak@muc.de> Cc: Roland McGrath <roland@redhat.com> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Greg KH <greg@kroah.com> Acked-by: Joel Becker <Joel.Becker@oracle.com> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Jan Kara <jack@ucw.cz> Cc: Michael Halcrow <mhalcrow@us.ibm.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Stephen Smalley <sds@tycho.nsa.gov> Cc: James Morris <jmorris@namei.org> Cc: Chris Wright <chrisw@sous-sol.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1b135431ab
commit
c376222960
39 changed files with 48 additions and 103 deletions
|
@ -91,9 +91,8 @@ ia64_elf32_init (struct pt_regs *regs)
|
|||
* it with privilege level 3 because the IVE uses non-privileged accesses to these
|
||||
* tables. IA-32 segmentation is used to protect against IA-32 accesses to them.
|
||||
*/
|
||||
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
||||
if (vma) {
|
||||
memset(vma, 0, sizeof(*vma));
|
||||
vma->vm_mm = current->mm;
|
||||
vma->vm_start = IA32_GDT_OFFSET;
|
||||
vma->vm_end = vma->vm_start + PAGE_SIZE;
|
||||
|
@ -117,9 +116,8 @@ ia64_elf32_init (struct pt_regs *regs)
|
|||
* code is locked in specific gate page, which is pointed by pretcode
|
||||
* when setup_frame_ia32
|
||||
*/
|
||||
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
||||
if (vma) {
|
||||
memset(vma, 0, sizeof(*vma));
|
||||
vma->vm_mm = current->mm;
|
||||
vma->vm_start = IA32_GATE_OFFSET;
|
||||
vma->vm_end = vma->vm_start + PAGE_SIZE;
|
||||
|
@ -142,9 +140,8 @@ ia64_elf32_init (struct pt_regs *regs)
|
|||
* Install LDT as anonymous memory. This gives us all-zero segment descriptors
|
||||
* until a task modifies them via modify_ldt().
|
||||
*/
|
||||
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
||||
if (vma) {
|
||||
memset(vma, 0, sizeof(*vma));
|
||||
vma->vm_mm = current->mm;
|
||||
vma->vm_start = IA32_LDT_OFFSET;
|
||||
vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
|
||||
|
@ -214,12 +211,10 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
|
|||
bprm->loader += stack_base;
|
||||
bprm->exec += stack_base;
|
||||
|
||||
mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||
mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
||||
if (!mpnt)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(mpnt, 0, sizeof(*mpnt));
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
{
|
||||
mpnt->vm_mm = current->mm;
|
||||
|
|
|
@ -2301,12 +2301,11 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
|
|||
DPRINT(("smpl_buf @%p\n", smpl_buf));
|
||||
|
||||
/* allocate vma */
|
||||
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
||||
if (!vma) {
|
||||
DPRINT(("Cannot allocate vma\n"));
|
||||
goto error_kmem;
|
||||
}
|
||||
memset(vma, 0, sizeof(*vma));
|
||||
|
||||
/*
|
||||
* partially initialize the vma for the sampling buffer
|
||||
|
|
|
@ -176,9 +176,8 @@ ia64_init_addr_space (void)
|
|||
* the problem. When the process attempts to write to the register backing store
|
||||
* for the first time, it will get a SEGFAULT in this case.
|
||||
*/
|
||||
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
||||
if (vma) {
|
||||
memset(vma, 0, sizeof(*vma));
|
||||
vma->vm_mm = current->mm;
|
||||
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
|
||||
vma->vm_end = vma->vm_start + PAGE_SIZE;
|
||||
|
@ -195,9 +194,8 @@ ia64_init_addr_space (void)
|
|||
|
||||
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
|
||||
if (!(current->personality & MMAP_PAGE_ZERO)) {
|
||||
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
||||
if (vma) {
|
||||
memset(vma, 0, sizeof(*vma));
|
||||
vma->vm_mm = current->mm;
|
||||
vma->vm_end = PAGE_SIZE;
|
||||
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
|
||||
|
|
|
@ -300,12 +300,10 @@ int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top,
|
|||
bprm->loader += stack_base;
|
||||
bprm->exec += stack_base;
|
||||
|
||||
mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||
mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
||||
if (!mpnt)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(mpnt, 0, sizeof(*mpnt));
|
||||
|
||||
down_write(&mm->mmap_sem);
|
||||
{
|
||||
mpnt->vm_mm = mm;
|
||||
|
|
|
@ -134,14 +134,13 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
|
|||
if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
my_cq = kmem_cache_alloc(cq_cache, GFP_KERNEL);
|
||||
my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL);
|
||||
if (!my_cq) {
|
||||
ehca_err(device, "Out of memory for ehca_cq struct device=%p",
|
||||
device);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
memset(my_cq, 0, sizeof(struct ehca_cq));
|
||||
memset(¶m, 0, sizeof(struct ehca_alloc_cq_parms));
|
||||
|
||||
spin_lock_init(&my_cq->spinlock);
|
||||
|
|
|
@ -53,9 +53,8 @@ static struct ehca_mr *ehca_mr_new(void)
|
|||
{
|
||||
struct ehca_mr *me;
|
||||
|
||||
me = kmem_cache_alloc(mr_cache, GFP_KERNEL);
|
||||
me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
|
||||
if (me) {
|
||||
memset(me, 0, sizeof(struct ehca_mr));
|
||||
spin_lock_init(&me->mrlock);
|
||||
} else
|
||||
ehca_gen_err("alloc failed");
|
||||
|
@ -72,9 +71,8 @@ static struct ehca_mw *ehca_mw_new(void)
|
|||
{
|
||||
struct ehca_mw *me;
|
||||
|
||||
me = kmem_cache_alloc(mw_cache, GFP_KERNEL);
|
||||
me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
|
||||
if (me) {
|
||||
memset(me, 0, sizeof(struct ehca_mw));
|
||||
spin_lock_init(&me->mwlock);
|
||||
} else
|
||||
ehca_gen_err("alloc failed");
|
||||
|
|
|
@ -50,14 +50,13 @@ struct ib_pd *ehca_alloc_pd(struct ib_device *device,
|
|||
{
|
||||
struct ehca_pd *pd;
|
||||
|
||||
pd = kmem_cache_alloc(pd_cache, GFP_KERNEL);
|
||||
pd = kmem_cache_zalloc(pd_cache, GFP_KERNEL);
|
||||
if (!pd) {
|
||||
ehca_err(device, "device=%p context=%p out of memory",
|
||||
device, context);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
memset(pd, 0, sizeof(struct ehca_pd));
|
||||
pd->ownpid = current->tgid;
|
||||
|
||||
/*
|
||||
|
|
|
@ -450,13 +450,12 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
|
|||
if (pd->uobject && udata)
|
||||
context = pd->uobject->context;
|
||||
|
||||
my_qp = kmem_cache_alloc(qp_cache, GFP_KERNEL);
|
||||
my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
|
||||
if (!my_qp) {
|
||||
ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
memset(my_qp, 0, sizeof(struct ehca_qp));
|
||||
memset (&parms, 0, sizeof(struct ehca_alloc_qp_parms));
|
||||
spin_lock_init(&my_qp->spinlock_s);
|
||||
spin_lock_init(&my_qp->spinlock_r);
|
||||
|
|
|
@ -1052,10 +1052,9 @@ static inline struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
|
|||
struct asd_ascb *ascb;
|
||||
unsigned long flags;
|
||||
|
||||
ascb = kmem_cache_alloc(asd_ascb_cache, gfp_flags);
|
||||
ascb = kmem_cache_zalloc(asd_ascb_cache, gfp_flags);
|
||||
|
||||
if (ascb) {
|
||||
memset(ascb, 0, sizeof(*ascb));
|
||||
ascb->dma_scb.size = sizeof(struct scb);
|
||||
ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool,
|
||||
gfp_flags,
|
||||
|
|
|
@ -388,10 +388,9 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
|
|||
int err = 0;
|
||||
int write = (data_direction == DMA_TO_DEVICE);
|
||||
|
||||
sioc = kmem_cache_alloc(scsi_io_context_cache, gfp);
|
||||
sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
|
||||
if (!sioc)
|
||||
return DRIVER_ERROR << 24;
|
||||
memset(sioc, 0, sizeof(*sioc));
|
||||
|
||||
req = blk_get_request(sdev->request_queue, write, gfp);
|
||||
if (!req)
|
||||
|
|
|
@ -2163,9 +2163,8 @@ static void etrax_usb_add_to_bulk_sb_list(struct urb *urb, int epid)
|
|||
|
||||
maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
|
||||
|
||||
sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
|
||||
sb_desc = kmem_cache_zalloc(usb_desc_cache, SLAB_FLAG);
|
||||
assert(sb_desc != NULL);
|
||||
memset(sb_desc, 0, sizeof(USB_SB_Desc_t));
|
||||
|
||||
|
||||
if (usb_pipeout(urb->pipe)) {
|
||||
|
|
|
@ -624,12 +624,10 @@ static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
|
|||
{
|
||||
struct urb_priv *urbp;
|
||||
|
||||
urbp = kmem_cache_alloc(uhci_up_cachep, GFP_ATOMIC);
|
||||
urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
|
||||
if (!urbp)
|
||||
return NULL;
|
||||
|
||||
memset((void *)urbp, 0, sizeof(*urbp));
|
||||
|
||||
urbp->urb = urb;
|
||||
urb->hcpriv = urbp;
|
||||
|
||||
|
|
3
fs/aio.c
3
fs/aio.c
|
@ -211,11 +211,10 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
|
|||
if ((unsigned long)nr_events > aio_max_nr)
|
||||
return ERR_PTR(-EAGAIN);
|
||||
|
||||
ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL);
|
||||
ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
ctx->max_reqs = nr_events;
|
||||
mm = ctx->mm = current->mm;
|
||||
atomic_inc(&mm->mm_count);
|
||||
|
|
|
@ -72,11 +72,10 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent * pare
|
|||
{
|
||||
struct configfs_dirent * sd;
|
||||
|
||||
sd = kmem_cache_alloc(configfs_dir_cachep, GFP_KERNEL);
|
||||
sd = kmem_cache_zalloc(configfs_dir_cachep, GFP_KERNEL);
|
||||
if (!sd)
|
||||
return NULL;
|
||||
|
||||
memset(sd, 0, sizeof(*sd));
|
||||
atomic_set(&sd->s_count, 1);
|
||||
INIT_LIST_HEAD(&sd->s_links);
|
||||
INIT_LIST_HEAD(&sd->s_children);
|
||||
|
|
|
@ -76,9 +76,7 @@ struct dlm_lkb *allocate_lkb(struct dlm_ls *ls)
|
|||
{
|
||||
struct dlm_lkb *lkb;
|
||||
|
||||
lkb = kmem_cache_alloc(lkb_cache, GFP_KERNEL);
|
||||
if (lkb)
|
||||
memset(lkb, 0, sizeof(*lkb));
|
||||
lkb = kmem_cache_zalloc(lkb_cache, GFP_KERNEL);
|
||||
return lkb;
|
||||
}
|
||||
|
||||
|
|
|
@ -600,11 +600,10 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
|
|||
{
|
||||
struct dquot *dquot;
|
||||
|
||||
dquot = kmem_cache_alloc(dquot_cachep, GFP_NOFS);
|
||||
dquot = kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
|
||||
if(!dquot)
|
||||
return NODQUOT;
|
||||
|
||||
memset((caddr_t)dquot, 0, sizeof(struct dquot));
|
||||
mutex_init(&dquot->dq_lock);
|
||||
INIT_LIST_HEAD(&dquot->dq_free);
|
||||
INIT_LIST_HEAD(&dquot->dq_inuse);
|
||||
|
|
|
@ -1332,13 +1332,13 @@ int ecryptfs_write_headers(struct dentry *ecryptfs_dentry,
|
|||
goto out;
|
||||
}
|
||||
/* Released in this function */
|
||||
page_virt = kmem_cache_alloc(ecryptfs_header_cache_0, GFP_USER);
|
||||
page_virt = kmem_cache_zalloc(ecryptfs_header_cache_0, GFP_USER);
|
||||
if (!page_virt) {
|
||||
ecryptfs_printk(KERN_ERR, "Out of memory\n");
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
memset(page_virt, 0, PAGE_CACHE_SIZE);
|
||||
|
||||
rc = ecryptfs_write_headers_virt(page_virt, crypt_stat,
|
||||
ecryptfs_dentry);
|
||||
if (unlikely(rc)) {
|
||||
|
|
|
@ -251,7 +251,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
|
|||
int lower_flags;
|
||||
|
||||
/* Released in ecryptfs_release or end of function if failure */
|
||||
file_info = kmem_cache_alloc(ecryptfs_file_info_cache, GFP_KERNEL);
|
||||
file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
|
||||
ecryptfs_set_file_private(file, file_info);
|
||||
if (!file_info) {
|
||||
ecryptfs_printk(KERN_ERR,
|
||||
|
@ -259,7 +259,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
|
|||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
memset(file_info, 0, sizeof(*file_info));
|
||||
lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
|
||||
crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
|
||||
mount_crypt_stat = &ecryptfs_superblock_to_private(
|
||||
|
|
|
@ -361,8 +361,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
|
|||
goto out;
|
||||
}
|
||||
/* Released in this function */
|
||||
page_virt =
|
||||
(char *)kmem_cache_alloc(ecryptfs_header_cache_2,
|
||||
page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2,
|
||||
GFP_USER);
|
||||
if (!page_virt) {
|
||||
rc = -ENOMEM;
|
||||
|
@ -370,7 +369,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
|
|||
"Cannot ecryptfs_kmalloc a page\n");
|
||||
goto out_dput;
|
||||
}
|
||||
memset(page_virt, 0, PAGE_CACHE_SIZE);
|
||||
|
||||
rc = ecryptfs_read_header_region(page_virt, lower_dentry, nd->mnt);
|
||||
crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
|
||||
if (!ECRYPTFS_CHECK_FLAG(crypt_stat->flags, ECRYPTFS_POLICY_APPLIED))
|
||||
|
|
|
@ -207,14 +207,12 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat,
|
|||
/* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or
|
||||
* at end of function upon failure */
|
||||
auth_tok_list_item =
|
||||
kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL);
|
||||
kmem_cache_zalloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL);
|
||||
if (!auth_tok_list_item) {
|
||||
ecryptfs_printk(KERN_ERR, "Unable to allocate memory\n");
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
memset(auth_tok_list_item, 0,
|
||||
sizeof(struct ecryptfs_auth_tok_list_item));
|
||||
(*new_auth_tok) = &auth_tok_list_item->auth_tok;
|
||||
|
||||
/* check for body size - one to two bytes */
|
||||
|
|
|
@ -378,15 +378,13 @@ ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent)
|
|||
|
||||
/* Released in ecryptfs_put_super() */
|
||||
ecryptfs_set_superblock_private(sb,
|
||||
kmem_cache_alloc(ecryptfs_sb_info_cache,
|
||||
kmem_cache_zalloc(ecryptfs_sb_info_cache,
|
||||
GFP_KERNEL));
|
||||
if (!ecryptfs_superblock_to_private(sb)) {
|
||||
ecryptfs_printk(KERN_WARNING, "Out of memory\n");
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
memset(ecryptfs_superblock_to_private(sb), 0,
|
||||
sizeof(struct ecryptfs_sb_info));
|
||||
sb->s_op = &ecryptfs_sops;
|
||||
/* Released through deactivate_super(sb) from get_sb_nodev */
|
||||
sb->s_root = d_alloc(NULL, &(const struct qstr) {
|
||||
|
@ -402,7 +400,7 @@ ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent)
|
|||
/* Released in d_release when dput(sb->s_root) is called */
|
||||
/* through deactivate_super(sb) from get_sb_nodev() */
|
||||
ecryptfs_set_dentry_private(sb->s_root,
|
||||
kmem_cache_alloc(ecryptfs_dentry_info_cache,
|
||||
kmem_cache_zalloc(ecryptfs_dentry_info_cache,
|
||||
GFP_KERNEL));
|
||||
if (!ecryptfs_dentry_to_private(sb->s_root)) {
|
||||
ecryptfs_printk(KERN_ERR,
|
||||
|
@ -410,8 +408,6 @@ ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent)
|
|||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
memset(ecryptfs_dentry_to_private(sb->s_root), 0,
|
||||
sizeof(struct ecryptfs_dentry_info));
|
||||
rc = 0;
|
||||
out:
|
||||
/* Should be able to rely on deactivate_super called from
|
||||
|
|
|
@ -405,12 +405,10 @@ int setup_arg_pages(struct linux_binprm *bprm,
|
|||
bprm->loader += stack_base;
|
||||
bprm->exec += stack_base;
|
||||
|
||||
mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||
mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
||||
if (!mpnt)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(mpnt, 0, sizeof(*mpnt));
|
||||
|
||||
down_write(&mm->mmap_sem);
|
||||
{
|
||||
mpnt->vm_mm = mm;
|
||||
|
|
|
@ -282,8 +282,7 @@ void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
|
|||
return;
|
||||
}
|
||||
|
||||
bd = kmem_cache_alloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL),
|
||||
memset(bd, 0, sizeof(struct gfs2_bufdata));
|
||||
bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL),
|
||||
bd->bd_bh = bh;
|
||||
bd->bd_gl = gl;
|
||||
|
||||
|
|
|
@ -53,9 +53,8 @@ static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
|
|||
|
||||
struct vfsmount *alloc_vfsmnt(const char *name)
|
||||
{
|
||||
struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL);
|
||||
struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
|
||||
if (mnt) {
|
||||
memset(mnt, 0, sizeof(struct vfsmount));
|
||||
atomic_set(&mnt->mnt_count, 1);
|
||||
INIT_LIST_HEAD(&mnt->mnt_hash);
|
||||
INIT_LIST_HEAD(&mnt->mnt_child);
|
||||
|
|
|
@ -61,7 +61,7 @@ static struct smb_request *smb_do_alloc_request(struct smb_sb_info *server,
|
|||
struct smb_request *req;
|
||||
unsigned char *buf = NULL;
|
||||
|
||||
req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
|
||||
req = kmem_cache_zalloc(req_cachep, GFP_KERNEL);
|
||||
VERBOSE("allocating request: %p\n", req);
|
||||
if (!req)
|
||||
goto out;
|
||||
|
@ -74,7 +74,6 @@ static struct smb_request *smb_do_alloc_request(struct smb_sb_info *server,
|
|||
}
|
||||
}
|
||||
|
||||
memset(req, 0, sizeof(struct smb_request));
|
||||
req->rq_buffer = buf;
|
||||
req->rq_bufsize = bufsize;
|
||||
req->rq_server = server;
|
||||
|
|
|
@ -37,11 +37,10 @@ static struct sysfs_dirent * __sysfs_new_dirent(void * element)
|
|||
{
|
||||
struct sysfs_dirent * sd;
|
||||
|
||||
sd = kmem_cache_alloc(sysfs_dir_cachep, GFP_KERNEL);
|
||||
sd = kmem_cache_zalloc(sysfs_dir_cachep, GFP_KERNEL);
|
||||
if (!sd)
|
||||
return NULL;
|
||||
|
||||
memset(sd, 0, sizeof(*sd));
|
||||
atomic_set(&sd->s_count, 1);
|
||||
atomic_set(&sd->s_event, 1);
|
||||
INIT_LIST_HEAD(&sd->s_children);
|
||||
|
|
|
@ -558,10 +558,9 @@ struct sas_task {
|
|||
static inline struct sas_task *sas_alloc_task(gfp_t flags)
|
||||
{
|
||||
extern struct kmem_cache *sas_task_cache;
|
||||
struct sas_task *task = kmem_cache_alloc(sas_task_cache, flags);
|
||||
struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
|
||||
|
||||
if (task) {
|
||||
memset(task, 0, sizeof(*task));
|
||||
INIT_LIST_HEAD(&task->list);
|
||||
spin_lock_init(&task->task_state_lock);
|
||||
task->task_state_flags = SAS_TASK_STATE_PENDING;
|
||||
|
|
|
@ -399,10 +399,9 @@ EXPORT_SYMBOL_GPL(register_posix_clock);
|
|||
static struct k_itimer * alloc_posix_timer(void)
|
||||
{
|
||||
struct k_itimer *tmr;
|
||||
tmr = kmem_cache_alloc(posix_timers_cache, GFP_KERNEL);
|
||||
tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
|
||||
if (!tmr)
|
||||
return tmr;
|
||||
memset(tmr, 0, sizeof (struct k_itimer));
|
||||
if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
|
||||
kmem_cache_free(posix_timers_cache, tmr);
|
||||
tmr = NULL;
|
||||
|
|
|
@ -132,10 +132,9 @@ void * dst_alloc(struct dst_ops * ops)
|
|||
if (ops->gc())
|
||||
return NULL;
|
||||
}
|
||||
dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
|
||||
dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC);
|
||||
if (!dst)
|
||||
return NULL;
|
||||
memset(dst, 0, ops->entry_size);
|
||||
atomic_set(&dst->__refcnt, 0);
|
||||
dst->ops = ops;
|
||||
dst->lastuse = jiffies;
|
||||
|
|
|
@ -251,12 +251,10 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
|
|||
goto out_entries;
|
||||
}
|
||||
|
||||
n = kmem_cache_alloc(tbl->kmem_cachep, GFP_ATOMIC);
|
||||
n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
|
||||
if (!n)
|
||||
goto out_entries;
|
||||
|
||||
memset(n, 0, tbl->entry_size);
|
||||
|
||||
skb_queue_head_init(&n->arp_queue);
|
||||
rwlock_init(&n->lock);
|
||||
n->updated = n->used = now;
|
||||
|
|
|
@ -593,12 +593,10 @@ create:
|
|||
|
||||
replace:
|
||||
err = -ENOBUFS;
|
||||
new_f = kmem_cache_alloc(dn_hash_kmem, GFP_KERNEL);
|
||||
new_f = kmem_cache_zalloc(dn_hash_kmem, GFP_KERNEL);
|
||||
if (new_f == NULL)
|
||||
goto out;
|
||||
|
||||
memset(new_f, 0, sizeof(struct dn_fib_node));
|
||||
|
||||
new_f->fn_key = key;
|
||||
new_f->fn_type = type;
|
||||
new_f->fn_scope = r->rtm_scope;
|
||||
|
|
|
@ -479,20 +479,18 @@ static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
|
|||
*/
|
||||
static struct mfc_cache *ipmr_cache_alloc(void)
|
||||
{
|
||||
struct mfc_cache *c=kmem_cache_alloc(mrt_cachep, GFP_KERNEL);
|
||||
struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
|
||||
if(c==NULL)
|
||||
return NULL;
|
||||
memset(c, 0, sizeof(*c));
|
||||
c->mfc_un.res.minvif = MAXVIFS;
|
||||
return c;
|
||||
}
|
||||
|
||||
static struct mfc_cache *ipmr_cache_alloc_unres(void)
|
||||
{
|
||||
struct mfc_cache *c=kmem_cache_alloc(mrt_cachep, GFP_ATOMIC);
|
||||
struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
|
||||
if(c==NULL)
|
||||
return NULL;
|
||||
memset(c, 0, sizeof(*c));
|
||||
skb_queue_head_init(&c->mfc_un.unres.unresolved);
|
||||
c->mfc_un.unres.expires = jiffies + 10*HZ;
|
||||
return c;
|
||||
|
|
|
@ -603,13 +603,12 @@ ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport
|
|||
struct ip_vs_conn *cp;
|
||||
struct ip_vs_protocol *pp = ip_vs_proto_get(proto);
|
||||
|
||||
cp = kmem_cache_alloc(ip_vs_conn_cachep, GFP_ATOMIC);
|
||||
cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC);
|
||||
if (cp == NULL) {
|
||||
IP_VS_ERR_RL("ip_vs_conn_new: no memory available.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(cp, 0, sizeof(*cp));
|
||||
INIT_LIST_HEAD(&cp->c_list);
|
||||
init_timer(&cp->timer);
|
||||
cp->timer.data = (unsigned long)cp;
|
||||
|
|
|
@ -638,14 +638,13 @@ struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig,
|
|||
}
|
||||
}
|
||||
|
||||
conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);
|
||||
conntrack = kmem_cache_zalloc(ip_conntrack_cachep, GFP_ATOMIC);
|
||||
if (!conntrack) {
|
||||
DEBUGP("Can't allocate conntrack.\n");
|
||||
atomic_dec(&ip_conntrack_count);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
memset(conntrack, 0, sizeof(*conntrack));
|
||||
atomic_set(&conntrack->ct_general.use, 1);
|
||||
conntrack->ct_general.destroy = destroy_conntrack;
|
||||
conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
|
||||
|
|
|
@ -150,8 +150,7 @@ static __inline__ struct fib6_node * node_alloc(void)
|
|||
{
|
||||
struct fib6_node *fn;
|
||||
|
||||
if ((fn = kmem_cache_alloc(fib6_node_kmem, GFP_ATOMIC)) != NULL)
|
||||
memset(fn, 0, sizeof(struct fib6_node));
|
||||
fn = kmem_cache_zalloc(fib6_node_kmem, GFP_ATOMIC);
|
||||
|
||||
return fn;
|
||||
}
|
||||
|
|
|
@ -979,11 +979,10 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb,
|
|||
{
|
||||
struct sctp_chunk *retval;
|
||||
|
||||
retval = kmem_cache_alloc(sctp_chunk_cachep, GFP_ATOMIC);
|
||||
retval = kmem_cache_zalloc(sctp_chunk_cachep, GFP_ATOMIC);
|
||||
|
||||
if (!retval)
|
||||
goto nodata;
|
||||
memset(retval, 0, sizeof(struct sctp_chunk));
|
||||
|
||||
if (!sk) {
|
||||
SCTP_DEBUG_PRINTK("chunkifying skb %p w/o an sk\n", skb);
|
||||
|
|
|
@ -332,11 +332,10 @@ static struct avc_node *avc_alloc_node(void)
|
|||
{
|
||||
struct avc_node *node;
|
||||
|
||||
node = kmem_cache_alloc(avc_node_cachep, GFP_ATOMIC);
|
||||
node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC);
|
||||
if (!node)
|
||||
goto out;
|
||||
|
||||
memset(node, 0, sizeof(*node));
|
||||
INIT_RCU_HEAD(&node->rhead);
|
||||
INIT_LIST_HEAD(&node->list);
|
||||
atomic_set(&node->ae.used, 1);
|
||||
|
|
|
@ -181,11 +181,10 @@ static int inode_alloc_security(struct inode *inode)
|
|||
struct task_security_struct *tsec = current->security;
|
||||
struct inode_security_struct *isec;
|
||||
|
||||
isec = kmem_cache_alloc(sel_inode_cache, GFP_KERNEL);
|
||||
isec = kmem_cache_zalloc(sel_inode_cache, GFP_KERNEL);
|
||||
if (!isec)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(isec, 0, sizeof(*isec));
|
||||
mutex_init(&isec->lock);
|
||||
INIT_LIST_HEAD(&isec->list);
|
||||
isec->inode = inode;
|
||||
|
|
|
@ -36,10 +36,9 @@ avtab_insert_node(struct avtab *h, int hvalue,
|
|||
struct avtab_key *key, struct avtab_datum *datum)
|
||||
{
|
||||
struct avtab_node * newnode;
|
||||
newnode = kmem_cache_alloc(avtab_node_cachep, GFP_KERNEL);
|
||||
newnode = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL);
|
||||
if (newnode == NULL)
|
||||
return NULL;
|
||||
memset(newnode, 0, sizeof(struct avtab_node));
|
||||
newnode->key = *key;
|
||||
newnode->datum = *datum;
|
||||
if (prev) {
|
||||
|
|
Loading…
Reference in a new issue