block: prevent possible io_context->refcount overflow

Currently io_context has an atomic_t(32-bit) as refcount.  In the case of
cfq, for each device against whcih a task does I/O, a reference to the
io_context would be taken.  And when there are multiple process sharing
io_contexts(CLONE_IO) would also have a reference to the same io_context.

Theoretically the possible maximum number of processes sharing the same
io_context + the number of disks/cfq_data referring to the same io_context
can overflow the 32-bit counter on a very high-end machine.

Even though it is an improbable case, let us make it atomic_long_t.

Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
Nikanth Karthikesan 2009-06-10 12:57:06 -07:00 committed by Jens Axboe
parent 1d589bb16b
commit d9c7d394a8
3 changed files with 10 additions and 10 deletions

View file

@ -35,9 +35,9 @@ int put_io_context(struct io_context *ioc)
if (ioc == NULL) if (ioc == NULL)
return 1; return 1;
BUG_ON(atomic_read(&ioc->refcount) == 0); BUG_ON(atomic_long_read(&ioc->refcount) == 0);
if (atomic_dec_and_test(&ioc->refcount)) { if (atomic_long_dec_and_test(&ioc->refcount)) {
rcu_read_lock(); rcu_read_lock();
if (ioc->aic && ioc->aic->dtor) if (ioc->aic && ioc->aic->dtor)
ioc->aic->dtor(ioc->aic); ioc->aic->dtor(ioc->aic);
@ -90,7 +90,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
if (ret) { if (ret) {
atomic_set(&ret->refcount, 1); atomic_long_set(&ret->refcount, 1);
atomic_set(&ret->nr_tasks, 1); atomic_set(&ret->nr_tasks, 1);
spin_lock_init(&ret->lock); spin_lock_init(&ret->lock);
ret->ioprio_changed = 0; ret->ioprio_changed = 0;
@ -151,7 +151,7 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node)
ret = current_io_context(gfp_flags, node); ret = current_io_context(gfp_flags, node);
if (unlikely(!ret)) if (unlikely(!ret))
break; break;
} while (!atomic_inc_not_zero(&ret->refcount)); } while (!atomic_long_inc_not_zero(&ret->refcount));
return ret; return ret;
} }
@ -163,8 +163,8 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc)
struct io_context *dst = *pdst; struct io_context *dst = *pdst;
if (src) { if (src) {
BUG_ON(atomic_read(&src->refcount) == 0); BUG_ON(atomic_long_read(&src->refcount) == 0);
atomic_inc(&src->refcount); atomic_long_inc(&src->refcount);
put_io_context(dst); put_io_context(dst);
*pdst = src; *pdst = src;
} }

View file

@ -1282,7 +1282,7 @@ static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
if (!cfqd->active_cic) { if (!cfqd->active_cic) {
struct cfq_io_context *cic = RQ_CIC(rq); struct cfq_io_context *cic = RQ_CIC(rq);
atomic_inc(&cic->ioc->refcount); atomic_long_inc(&cic->ioc->refcount);
cfqd->active_cic = cic; cfqd->active_cic = cic;
} }
} }

View file

@ -64,7 +64,7 @@ struct cfq_io_context {
* and kmalloc'ed. These could be shared between processes. * and kmalloc'ed. These could be shared between processes.
*/ */
struct io_context { struct io_context {
atomic_t refcount; atomic_long_t refcount;
atomic_t nr_tasks; atomic_t nr_tasks;
/* all the fields below are protected by this lock */ /* all the fields below are protected by this lock */
@ -91,8 +91,8 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
* if ref count is zero, don't allow sharing (ioc is going away, it's * if ref count is zero, don't allow sharing (ioc is going away, it's
* a race). * a race).
*/ */
if (ioc && atomic_inc_not_zero(&ioc->refcount)) { if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
atomic_inc(&ioc->nr_tasks); atomic_long_inc(&ioc->refcount);
return ioc; return ioc;
} }