mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
ring-buffer: add reader lock
Impact: serialize reader accesses to individual CPU ring buffers The code in the ring buffer expects only one reader at a time, but currently it puts that requirement on the caller. This is not strong enough, and this patch adds a "reader_lock" that serializes the access to the reader API of the ring buffer. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
f536aafc5a
commit
f83c9d0fe4
1 changed files with 79 additions and 32 deletions
|
@ -154,6 +154,7 @@ static inline int test_time_stamp(u64 delta)
|
|||
struct ring_buffer_per_cpu {
|
||||
int cpu;
|
||||
struct ring_buffer *buffer;
|
||||
spinlock_t reader_lock; /* serialize readers */
|
||||
raw_spinlock_t lock;
|
||||
struct lock_class_key lock_key;
|
||||
struct list_head pages;
|
||||
|
@ -321,6 +322,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
|
|||
|
||||
cpu_buffer->cpu = cpu;
|
||||
cpu_buffer->buffer = buffer;
|
||||
spin_lock_init(&cpu_buffer->reader_lock);
|
||||
cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
INIT_LIST_HEAD(&cpu_buffer->pages);
|
||||
|
||||
|
@ -1476,6 +1478,9 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
|
|||
void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
/* Iterator usage is expected to have record disabled */
|
||||
if (list_empty(&cpu_buffer->reader_page->list)) {
|
||||
|
@ -1489,6 +1494,8 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
|
|||
iter->read_stamp = cpu_buffer->read_stamp;
|
||||
else
|
||||
iter->read_stamp = iter->head_page->time_stamp;
|
||||
|
||||
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1707,17 +1714,8 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
|
|||
rb_advance_iter(iter);
|
||||
}
|
||||
|
||||
/**
|
||||
* ring_buffer_peek - peek at the next event to be read
|
||||
* @buffer: The ring buffer to read
|
||||
* @cpu: The cpu to peak at
|
||||
* @ts: The timestamp counter of this event.
|
||||
*
|
||||
* This will return the event that will be read next, but does
|
||||
* not consume the data.
|
||||
*/
|
||||
struct ring_buffer_event *
|
||||
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
|
||||
static struct ring_buffer_event *
|
||||
rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct ring_buffer_event *event;
|
||||
|
@ -1779,16 +1777,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* ring_buffer_iter_peek - peek at the next event to be read
|
||||
* @iter: The ring buffer iterator
|
||||
* @ts: The timestamp counter of this event.
|
||||
*
|
||||
* This will return the event that will be read next, but does
|
||||
* not increment the iterator.
|
||||
*/
|
||||
struct ring_buffer_event *
|
||||
ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
||||
static struct ring_buffer_event *
|
||||
rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
||||
{
|
||||
struct ring_buffer *buffer;
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
|
@ -1849,6 +1839,51 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* ring_buffer_peek - peek at the next event to be read
|
||||
* @buffer: The ring buffer to read
|
||||
* @cpu: The cpu to peak at
|
||||
* @ts: The timestamp counter of this event.
|
||||
*
|
||||
* This will return the event that will be read next, but does
|
||||
* not consume the data.
|
||||
*/
|
||||
struct ring_buffer_event *
|
||||
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
||||
struct ring_buffer_event *event;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||
event = rb_buffer_peek(buffer, cpu, ts);
|
||||
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
return event;
|
||||
}
|
||||
|
||||
/**
|
||||
* ring_buffer_iter_peek - peek at the next event to be read
|
||||
* @iter: The ring buffer iterator
|
||||
* @ts: The timestamp counter of this event.
|
||||
*
|
||||
* This will return the event that will be read next, but does
|
||||
* not increment the iterator.
|
||||
*/
|
||||
struct ring_buffer_event *
|
||||
ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
|
||||
struct ring_buffer_event *event;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||
event = rb_iter_peek(iter, ts);
|
||||
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
return event;
|
||||
}
|
||||
|
||||
/**
|
||||
* ring_buffer_consume - return an event and consume it
|
||||
* @buffer: The ring buffer to get the next event from
|
||||
|
@ -1860,19 +1895,24 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
|||
struct ring_buffer_event *
|
||||
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
||||
struct ring_buffer_event *event;
|
||||
unsigned long flags;
|
||||
|
||||
if (!cpu_isset(cpu, buffer->cpumask))
|
||||
return NULL;
|
||||
|
||||
event = ring_buffer_peek(buffer, cpu, ts);
|
||||
if (!event)
|
||||
return NULL;
|
||||
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
event = rb_buffer_peek(buffer, cpu, ts);
|
||||
if (!event)
|
||||
goto out;
|
||||
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
rb_advance_reader(cpu_buffer);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
return event;
|
||||
}
|
||||
|
||||
|
@ -1909,11 +1949,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
|
|||
atomic_inc(&cpu_buffer->record_disabled);
|
||||
synchronize_sched();
|
||||
|
||||
local_irq_save(flags);
|
||||
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||
__raw_spin_lock(&cpu_buffer->lock);
|
||||
ring_buffer_iter_reset(iter);
|
||||
__raw_spin_unlock(&cpu_buffer->lock);
|
||||
local_irq_restore(flags);
|
||||
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
@ -1945,12 +1985,17 @@ struct ring_buffer_event *
|
|||
ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
|
||||
unsigned long flags;
|
||||
|
||||
event = ring_buffer_iter_peek(iter, ts);
|
||||
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||
event = rb_iter_peek(iter, ts);
|
||||
if (!event)
|
||||
return NULL;
|
||||
goto out;
|
||||
|
||||
rb_advance_iter(iter);
|
||||
out:
|
||||
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
return event;
|
||||
}
|
||||
|
@ -1999,13 +2044,15 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
|
|||
if (!cpu_isset(cpu, buffer->cpumask))
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
__raw_spin_lock(&cpu_buffer->lock);
|
||||
|
||||
rb_reset_cpu(cpu_buffer);
|
||||
|
||||
__raw_spin_unlock(&cpu_buffer->lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in a new issue