mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
ring-buffer: Move access to commit_page up into function used
With the change of the way we process commits. Where a commit only happens at the outer most level, and that we don't need to worry about a commit ending after the rb_start_commit() has been called, the code use to grab the commit page before the tail page to prevent a possible race. But this race no longer exists with the rb_start_commit() rb_end_commit() interface. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
8b2a5dac78
commit
5a50e33cc9
1 changed files with 3 additions and 6 deletions
|
@ -1785,9 +1785,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
|
||||||
static struct ring_buffer_event *
|
static struct ring_buffer_event *
|
||||||
rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
|
rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
|
||||||
unsigned long length, unsigned long tail,
|
unsigned long length, unsigned long tail,
|
||||||
struct buffer_page *commit_page,
|
|
||||||
struct buffer_page *tail_page, u64 *ts)
|
struct buffer_page *tail_page, u64 *ts)
|
||||||
{
|
{
|
||||||
|
struct buffer_page *commit_page = cpu_buffer->commit_page;
|
||||||
struct ring_buffer *buffer = cpu_buffer->buffer;
|
struct ring_buffer *buffer = cpu_buffer->buffer;
|
||||||
struct buffer_page *next_page;
|
struct buffer_page *next_page;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -1890,13 +1890,10 @@ static struct ring_buffer_event *
|
||||||
__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
|
__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
|
||||||
unsigned type, unsigned long length, u64 *ts)
|
unsigned type, unsigned long length, u64 *ts)
|
||||||
{
|
{
|
||||||
struct buffer_page *tail_page, *commit_page;
|
struct buffer_page *tail_page;
|
||||||
struct ring_buffer_event *event;
|
struct ring_buffer_event *event;
|
||||||
unsigned long tail, write;
|
unsigned long tail, write;
|
||||||
|
|
||||||
commit_page = cpu_buffer->commit_page;
|
|
||||||
/* we just need to protect against interrupts */
|
|
||||||
barrier();
|
|
||||||
tail_page = cpu_buffer->tail_page;
|
tail_page = cpu_buffer->tail_page;
|
||||||
write = local_add_return(length, &tail_page->write);
|
write = local_add_return(length, &tail_page->write);
|
||||||
|
|
||||||
|
@ -1907,7 +1904,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
|
||||||
/* See if we shot pass the end of this buffer page */
|
/* See if we shot pass the end of this buffer page */
|
||||||
if (write > BUF_PAGE_SIZE)
|
if (write > BUF_PAGE_SIZE)
|
||||||
return rb_move_tail(cpu_buffer, length, tail,
|
return rb_move_tail(cpu_buffer, length, tail,
|
||||||
commit_page, tail_page, ts);
|
tail_page, ts);
|
||||||
|
|
||||||
/* We reserved something on the buffer */
|
/* We reserved something on the buffer */
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue