mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
perf_counter: fix perf_poll()
Impact: fix kerneltop 100% CPU usage Only return a poll event when there's actually been one, poll_wait() doesn't actually wait for the waitq you pass it, it only enqueues you on it. Only once all FDs have been iterated and none of thm returned a poll-event will it schedule(). Also make it return POLL_HUP when there's not mmap() area to read from. Further, fix a silly bug in the write code. Reported-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Arjan van de Ven <arjan@infradead.org> Orig-LKML-Reference: <1237897096.24918.181.camel@twins> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
f66c6b2066
commit
c7138f37f9
2 changed files with 13 additions and 2 deletions
|
@ -246,6 +246,7 @@ struct file;
|
|||
struct perf_mmap_data {
|
||||
struct rcu_head rcu_head;
|
||||
int nr_pages;
|
||||
atomic_t wakeup;
|
||||
atomic_t head;
|
||||
struct perf_counter_mmap_page *user_page;
|
||||
void *data_pages[0];
|
||||
|
|
|
@ -1161,7 +1161,16 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
|||
static unsigned int perf_poll(struct file *file, poll_table *wait)
|
||||
{
|
||||
struct perf_counter *counter = file->private_data;
|
||||
unsigned int events = POLLIN;
|
||||
struct perf_mmap_data *data;
|
||||
unsigned int events;
|
||||
|
||||
rcu_read_lock();
|
||||
data = rcu_dereference(counter->data);
|
||||
if (data)
|
||||
events = atomic_xchg(&data->wakeup, 0);
|
||||
else
|
||||
events = POLL_HUP;
|
||||
rcu_read_unlock();
|
||||
|
||||
poll_wait(file, &counter->waitq, wait);
|
||||
|
||||
|
@ -1425,7 +1434,7 @@ static int perf_output_write(struct perf_counter *counter, int nmi,
|
|||
|
||||
do {
|
||||
offset = head = atomic_read(&data->head);
|
||||
head += sizeof(u64);
|
||||
head += size;
|
||||
} while (atomic_cmpxchg(&data->head, offset, head) != offset);
|
||||
|
||||
wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT);
|
||||
|
@ -1446,6 +1455,7 @@ static int perf_output_write(struct perf_counter *counter, int nmi,
|
|||
* generate a poll() wakeup for every page boundary crossed
|
||||
*/
|
||||
if (wakeup) {
|
||||
atomic_xchg(&data->wakeup, POLL_IN);
|
||||
__perf_counter_update_userpage(counter, data);
|
||||
if (nmi) {
|
||||
counter->wakeup_pending = 1;
|
||||
|
|
Loading…
Reference in a new issue