static int perf_reserved_percpu __read_mostly;
static int perf_overcommit __read_mostly = 1;
+static atomic_t nr_mmap_tracking __read_mostly;
+static atomic_t nr_munmap_tracking __read_mostly;
+static atomic_t nr_comm_tracking __read_mostly;
+
+int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
+
/*
* Mutex for (sysadmin-configurable) counter reservations:
*/
/*
* Architecture provided APIs - weak aliases:
*/
-extern __weak const struct hw_perf_counter_ops *
-hw_perf_counter_init(struct perf_counter *counter)
+extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
{
return NULL;
}
return;
counter->state = PERF_COUNTER_STATE_INACTIVE;
- counter->tstamp_stopped = ctx->time_now;
- counter->hw_ops->disable(counter);
+ counter->tstamp_stopped = ctx->time;
+ counter->pmu->disable(counter);
counter->oncpu = -1;
if (!is_software_counter(counter))
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- curr_rq_lock_irq_save(&flags);
- spin_lock(&ctx->lock);
+ spin_lock_irqsave(&ctx->lock, flags);
counter_sched_out(counter, cpuctx, ctx);
perf_max_counters - perf_reserved_percpu);
}
- spin_unlock(&ctx->lock);
- curr_rq_unlock_irq_restore(&flags);
+ spin_unlock_irqrestore(&ctx->lock, flags);
}
spin_unlock_irq(&ctx->lock);
}
-/*
- * Get the current time for this context.
- * If this is a task context, we use the task's task clock,
- * or for a per-cpu context, we use the cpu clock.
- */
-static u64 get_context_time(struct perf_counter_context *ctx, int update)
+static inline u64 perf_clock(void)
{
- struct task_struct *curr = ctx->task;
-
- if (!curr)
- return cpu_clock(smp_processor_id());
-
- return __task_delta_exec(curr, update) + curr->se.sum_exec_runtime;
+ return cpu_clock(smp_processor_id());
}
/*
* Update the record of the current time in a context.
*/
-static void update_context_time(struct perf_counter_context *ctx, int update)
+static void update_context_time(struct perf_counter_context *ctx)
{
- ctx->time_now = get_context_time(ctx, update) - ctx->time_lost;
+ u64 now = perf_clock();
+
+ ctx->time += now - ctx->timestamp;
+ ctx->timestamp = now;
}
/*
struct perf_counter_context *ctx = counter->ctx;
u64 run_end;
- if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
- counter->total_time_enabled = ctx->time_now -
- counter->tstamp_enabled;
- if (counter->state == PERF_COUNTER_STATE_INACTIVE)
- run_end = counter->tstamp_stopped;
- else
- run_end = ctx->time_now;
- counter->total_time_running = run_end - counter->tstamp_running;
- }
+ if (counter->state < PERF_COUNTER_STATE_INACTIVE)
+ return;
+
+ counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
+
+ if (counter->state == PERF_COUNTER_STATE_INACTIVE)
+ run_end = counter->tstamp_stopped;
+ else
+ run_end = ctx->time;
+
+ counter->total_time_running = run_end - counter->tstamp_running;
}
/*
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- curr_rq_lock_irq_save(&flags);
- spin_lock(&ctx->lock);
+ spin_lock_irqsave(&ctx->lock, flags);
/*
* If the counter is on, turn it off.
* If it is in error state, leave it in error state.
*/
if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
- update_context_time(ctx, 1);
+ update_context_time(ctx);
update_counter_times(counter);
if (counter == counter->group_leader)
group_sched_out(counter, cpuctx, ctx);
counter->state = PERF_COUNTER_STATE_OFF;
}
- spin_unlock(&ctx->lock);
- curr_rq_unlock_irq_restore(&flags);
+ spin_unlock_irqrestore(&ctx->lock, flags);
}
/*
*/
smp_wmb();
- if (counter->hw_ops->enable(counter)) {
+ if (counter->pmu->enable(counter)) {
counter->state = PERF_COUNTER_STATE_INACTIVE;
counter->oncpu = -1;
return -EAGAIN;
}
- counter->tstamp_running += ctx->time_now - counter->tstamp_stopped;
+ counter->tstamp_running += ctx->time - counter->tstamp_stopped;
if (!is_software_counter(counter))
cpuctx->active_oncpu++;
list_add_counter(counter, ctx);
ctx->nr_counters++;
counter->prev_state = PERF_COUNTER_STATE_OFF;
- counter->tstamp_enabled = ctx->time_now;
- counter->tstamp_running = ctx->time_now;
- counter->tstamp_stopped = ctx->time_now;
+ counter->tstamp_enabled = ctx->time;
+ counter->tstamp_running = ctx->time;
+ counter->tstamp_stopped = ctx->time;
}
/*
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- curr_rq_lock_irq_save(&flags);
- spin_lock(&ctx->lock);
- update_context_time(ctx, 1);
+ spin_lock_irqsave(&ctx->lock, flags);
+ update_context_time(ctx);
/*
* Protect the list operation against NMI by disabling the
unlock:
hw_perf_restore(perf_flags);
- spin_unlock(&ctx->lock);
- curr_rq_unlock_irq_restore(&flags);
+ spin_unlock_irqrestore(&ctx->lock, flags);
}
/*
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- curr_rq_lock_irq_save(&flags);
- spin_lock(&ctx->lock);
- update_context_time(ctx, 1);
+ spin_lock_irqsave(&ctx->lock, flags);
+ update_context_time(ctx);
counter->prev_state = counter->state;
if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
goto unlock;
counter->state = PERF_COUNTER_STATE_INACTIVE;
- counter->tstamp_enabled = ctx->time_now - counter->total_time_enabled;
+ counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
/*
* If the counter is in a group and isn't the group leader,
}
unlock:
- spin_unlock(&ctx->lock);
- curr_rq_unlock_irq_restore(&flags);
+ spin_unlock_irqrestore(&ctx->lock, flags);
}
/*
*/
if (counter->state == PERF_COUNTER_STATE_OFF) {
counter->state = PERF_COUNTER_STATE_INACTIVE;
- counter->tstamp_enabled = ctx->time_now -
- counter->total_time_enabled;
+ counter->tstamp_enabled =
+ ctx->time - counter->total_time_enabled;
}
out:
spin_unlock_irq(&ctx->lock);
}
+static void perf_counter_refresh(struct perf_counter *counter, int refresh)
+{
+ atomic_add(refresh, &counter->event_limit);
+ perf_counter_enable(counter);
+}
+
/*
* Enable a counter and all its children.
*/
ctx->is_active = 0;
if (likely(!ctx->nr_counters))
goto out;
- update_context_time(ctx, 0);
+ update_context_time(ctx);
flags = hw_perf_save_disable();
if (ctx->nr_active) {
if (likely(!cpuctx->task_ctx))
return;
+ update_context_time(ctx);
+
regs = task_pt_regs(task);
- perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs);
+ perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
__perf_counter_sched_out(ctx, cpuctx);
cpuctx->task_ctx = NULL;
if (likely(!ctx->nr_counters))
goto out;
- /*
- * Add any time since the last sched_out to the lost time
- * so it doesn't get included in the total_time_enabled and
- * total_time_running measures for counters in the context.
- */
- ctx->time_lost = get_context_time(ctx, 0) - ctx->time_now;
+ ctx->timestamp = perf_clock();
flags = hw_perf_save_disable();
if (likely(!ctx->nr_counters))
return 0;
- curr_rq_lock_irq_save(&flags);
+ local_irq_save(flags);
cpu = smp_processor_id();
- /* force the update of the task clock: */
- __task_delta_exec(curr, 1);
-
perf_counter_task_sched_out(curr, cpu);
spin_lock(&ctx->lock);
hw_perf_restore(perf_flags);
- spin_unlock(&ctx->lock);
-
- curr_rq_unlock_irq_restore(&flags);
+ spin_unlock_irqrestore(&ctx->lock, flags);
return 0;
}
if (likely(!ctx->nr_counters))
return 0;
- curr_rq_lock_irq_save(&flags);
+ local_irq_save(flags);
cpu = smp_processor_id();
- /* force the update of the task clock: */
- __task_delta_exec(curr, 1);
-
perf_counter_task_sched_out(curr, cpu);
spin_lock(&ctx->lock);
if (counter->state > PERF_COUNTER_STATE_OFF)
continue;
counter->state = PERF_COUNTER_STATE_INACTIVE;
- counter->tstamp_enabled = ctx->time_now -
- counter->total_time_enabled;
+ counter->tstamp_enabled =
+ ctx->time - counter->total_time_enabled;
counter->hw_event.disabled = 0;
}
hw_perf_restore(perf_flags);
perf_counter_task_sched_in(curr, cpu);
- curr_rq_unlock_irq_restore(&flags);
+ local_irq_restore(flags);
return 0;
}
struct perf_counter_context *ctx = counter->ctx;
unsigned long flags;
- curr_rq_lock_irq_save(&flags);
+ local_irq_save(flags);
if (ctx->is_active)
- update_context_time(ctx, 1);
- counter->hw_ops->read(counter);
+ update_context_time(ctx);
+ counter->pmu->read(counter);
update_counter_times(counter);
- curr_rq_unlock_irq_restore(&flags);
+ local_irq_restore(flags);
}
static u64 perf_counter_read(struct perf_counter *counter)
*/
if (cpu != -1) {
/* Must be root to operate on a CPU counter: */
- if (!capable(CAP_SYS_ADMIN))
+ if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
if (cpu < 0 || cpu > num_possible_cpus())
{
perf_pending_sync(counter);
+ if (counter->hw_event.mmap)
+ atomic_dec(&nr_mmap_tracking);
+ if (counter->hw_event.munmap)
+ atomic_dec(&nr_munmap_tracking);
+ if (counter->hw_event.comm)
+ atomic_dec(&nr_comm_tracking);
+
if (counter->destroy)
counter->destroy(counter);
case PERF_COUNTER_IOC_DISABLE:
perf_counter_disable_family(counter);
break;
+ case PERF_COUNTER_IOC_REFRESH:
+ perf_counter_refresh(counter, arg);
+ break;
default:
err = -ENOTTY;
}
*/
preempt_disable();
++userpg->lock;
- smp_wmb();
+ barrier();
userpg->index = counter->hw.idx;
userpg->offset = atomic64_read(&counter->count);
if (counter->state == PERF_COUNTER_STATE_ACTIVE)
userpg->offset -= atomic64_read(&counter->hw.prev_count);
- smp_wmb();
+ barrier();
++userpg->lock;
preempt_enable();
unlock:
if (atomic_dec_and_mutex_lock(&counter->mmap_count,
&counter->mmap_mutex)) {
+ vma->vm_mm->locked_vm -= counter->data->nr_pages + 1;
perf_mmap_data_free(counter);
mutex_unlock(&counter->mmap_mutex);
}
}
static struct vm_operations_struct perf_mmap_vmops = {
- .open = perf_mmap_open,
+ .open = perf_mmap_open,
.close = perf_mmap_close,
.fault = perf_mmap_fault,
};
if (vma->vm_pgoff != 0)
return -EINVAL;
- locked = vma_size >> PAGE_SHIFT;
- locked += vma->vm_mm->locked_vm;
+ mutex_lock(&counter->mmap_mutex);
+ if (atomic_inc_not_zero(&counter->mmap_count)) {
+ if (nr_pages != counter->data->nr_pages)
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ locked = vma->vm_mm->locked_vm;
+ locked += nr_pages + 1;
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
lock_limit >>= PAGE_SHIFT;
- if ((locked > lock_limit) && !capable(CAP_IPC_LOCK))
- return -EPERM;
-
- mutex_lock(&counter->mmap_mutex);
- if (atomic_inc_not_zero(&counter->mmap_count))
- goto out;
+ if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
+ ret = -EPERM;
+ goto unlock;
+ }
WARN_ON(counter->data);
ret = perf_mmap_data_alloc(counter, nr_pages);
- if (!ret)
- atomic_set(&counter->mmap_count, 1);
-out:
+ if (ret)
+ goto unlock;
+
+ atomic_set(&counter->mmap_count, 1);
+ vma->vm_mm->locked_vm += nr_pages + 1;
+unlock:
mutex_unlock(&counter->mmap_mutex);
vma->vm_flags &= ~VM_MAYWRITE;
return ret;
}
+static int perf_fasync(int fd, struct file *filp, int on)
+{
+ struct perf_counter *counter = filp->private_data;
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ int retval;
+
+ mutex_lock(&inode->i_mutex);
+ retval = fasync_helper(fd, filp, on, &counter->fasync);
+ mutex_unlock(&inode->i_mutex);
+
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
static const struct file_operations perf_fops = {
.release = perf_release,
.read = perf_read,
.unlocked_ioctl = perf_ioctl,
.compat_ioctl = perf_ioctl,
.mmap = perf_mmap,
+ .fasync = perf_fasync,
};
/*
rcu_read_lock();
data = rcu_dereference(counter->data);
if (data) {
- (void)atomic_xchg(&data->wakeup, POLL_IN);
+ atomic_set(&data->wakeup, POLL_IN);
/*
* Ensure all data writes are issued before updating the
* user-space data head information. The matching rmb()
rcu_read_unlock();
wake_up_all(&counter->waitq);
+
+ if (counter->pending_kill) {
+ kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
+ counter->pending_kill = 0;
+ }
}
/*
* single linked list and use cmpxchg() to add entries lockless.
*/
-#define PENDING_TAIL ((struct perf_wakeup_entry *)-1UL)
+static void perf_pending_counter(struct perf_pending_entry *entry)
+{
+ struct perf_counter *counter = container_of(entry,
+ struct perf_counter, pending);
+
+ if (counter->pending_disable) {
+ counter->pending_disable = 0;
+ perf_counter_disable(counter);
+ }
-static DEFINE_PER_CPU(struct perf_wakeup_entry *, perf_wakeup_head) = {
+ if (counter->pending_wakeup) {
+ counter->pending_wakeup = 0;
+ perf_counter_wakeup(counter);
+ }
+}
+
+#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
+
+static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
PENDING_TAIL,
};
-static void perf_pending_queue(struct perf_counter *counter)
+static void perf_pending_queue(struct perf_pending_entry *entry,
+ void (*func)(struct perf_pending_entry *))
{
- struct perf_wakeup_entry **head;
- struct perf_wakeup_entry *prev, *next;
+ struct perf_pending_entry **head;
- if (cmpxchg(&counter->wakeup.next, NULL, PENDING_TAIL) != NULL)
+ if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
return;
- head = &get_cpu_var(perf_wakeup_head);
+ entry->func = func;
+
+ head = &get_cpu_var(perf_pending_head);
do {
- prev = counter->wakeup.next = *head;
- next = &counter->wakeup;
- } while (cmpxchg(head, prev, next) != prev);
+ entry->next = *head;
+ } while (cmpxchg(head, entry->next, entry) != entry->next);
set_perf_counter_pending();
- put_cpu_var(perf_wakeup_head);
+ put_cpu_var(perf_pending_head);
}
static int __perf_pending_run(void)
{
- struct perf_wakeup_entry *list;
+ struct perf_pending_entry *list;
int nr = 0;
- list = xchg(&__get_cpu_var(perf_wakeup_head), PENDING_TAIL);
+ list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
while (list != PENDING_TAIL) {
- struct perf_counter *counter = container_of(list,
- struct perf_counter, wakeup);
+ void (*func)(struct perf_pending_entry *);
+ struct perf_pending_entry *entry = list;
list = list->next;
- counter->wakeup.next = NULL;
+ func = entry->func;
+ entry->next = NULL;
/*
* Ensure we observe the unqueue before we issue the wakeup,
* so that we won't be waiting forever.
*/
smp_wmb();
- perf_counter_wakeup(counter);
+ func(entry);
nr++;
}
* so that we do not miss the wakeup. -- see perf_pending_handle()
*/
smp_rmb();
- return counter->wakeup.next == NULL;
+ return counter->pending.next == NULL;
}
static void perf_pending_sync(struct perf_counter *counter)
__perf_pending_run();
}
+/*
+ * Callchain support -- arch specific
+ */
+
+__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+ return NULL;
+}
+
/*
* Output
*/
unsigned int offset;
unsigned int head;
int wakeup;
+ int nmi;
+ int overflow;
};
+static inline void __perf_output_wakeup(struct perf_output_handle *handle)
+{
+ if (handle->nmi) {
+ handle->counter->pending_wakeup = 1;
+ perf_pending_queue(&handle->counter->pending,
+ perf_pending_counter);
+ } else
+ perf_counter_wakeup(handle->counter);
+}
+
static int perf_output_begin(struct perf_output_handle *handle,
- struct perf_counter *counter, unsigned int size)
+ struct perf_counter *counter, unsigned int size,
+ int nmi, int overflow)
{
struct perf_mmap_data *data;
unsigned int offset, head;
if (!data)
goto out;
+ handle->counter = counter;
+ handle->nmi = nmi;
+ handle->overflow = overflow;
+
if (!data->nr_pages)
- goto out;
+ goto fail;
do {
offset = head = atomic_read(&data->head);
head += size;
} while (atomic_cmpxchg(&data->head, offset, head) != offset);
- handle->counter = counter;
handle->data = data;
handle->offset = offset;
handle->head = head;
return 0;
+fail:
+ __perf_output_wakeup(handle);
out:
rcu_read_unlock();
#define perf_output_put(handle, x) \
perf_output_copy((handle), &(x), sizeof(x))
-static void perf_output_end(struct perf_output_handle *handle, int nmi)
+static void perf_output_end(struct perf_output_handle *handle)
{
- if (handle->wakeup) {
- if (nmi)
- perf_pending_queue(handle->counter);
- else
- perf_counter_wakeup(handle->counter);
- }
+ int wakeup_events = handle->counter->hw_event.wakeup_events;
+
+ if (handle->overflow && wakeup_events) {
+ int events = atomic_inc_return(&handle->data->events);
+ if (events >= wakeup_events) {
+ atomic_sub(wakeup_events, &handle->data->events);
+ __perf_output_wakeup(handle);
+ }
+ } else if (handle->wakeup)
+ __perf_output_wakeup(handle);
rcu_read_unlock();
}
-static int perf_output_write(struct perf_counter *counter, int nmi,
- void *buf, ssize_t size)
+static void perf_counter_output(struct perf_counter *counter,
+ int nmi, struct pt_regs *regs, u64 addr)
{
- struct perf_output_handle handle;
int ret;
+ u64 record_type = counter->hw_event.record_type;
+ struct perf_output_handle handle;
+ struct perf_event_header header;
+ u64 ip;
+ struct {
+ u32 pid, tid;
+ } tid_entry;
+ struct {
+ u64 event;
+ u64 counter;
+ } group_entry;
+ struct perf_callchain_entry *callchain = NULL;
+ int callchain_size = 0;
+ u64 time;
+
+ header.type = 0;
+ header.size = sizeof(header);
+
+ header.misc = PERF_EVENT_MISC_OVERFLOW;
+ header.misc |= user_mode(regs) ?
+ PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
+
+ if (record_type & PERF_RECORD_IP) {
+ ip = instruction_pointer(regs);
+ header.type |= PERF_RECORD_IP;
+ header.size += sizeof(ip);
+ }
+
+ if (record_type & PERF_RECORD_TID) {
+ /* namespace issues */
+ tid_entry.pid = current->group_leader->pid;
+ tid_entry.tid = current->pid;
+
+ header.type |= PERF_RECORD_TID;
+ header.size += sizeof(tid_entry);
+ }
+
+ if (record_type & PERF_RECORD_TIME) {
+ /*
+ * Maybe do better on x86 and provide cpu_clock_nmi()
+ */
+ time = sched_clock();
+
+ header.type |= PERF_RECORD_TIME;
+ header.size += sizeof(u64);
+ }
+
+ if (record_type & PERF_RECORD_ADDR) {
+ header.type |= PERF_RECORD_ADDR;
+ header.size += sizeof(u64);
+ }
+
+ if (record_type & PERF_RECORD_GROUP) {
+ header.type |= PERF_RECORD_GROUP;
+ header.size += sizeof(u64) +
+ counter->nr_siblings * sizeof(group_entry);
+ }
+
+ if (record_type & PERF_RECORD_CALLCHAIN) {
+ callchain = perf_callchain(regs);
- ret = perf_output_begin(&handle, counter, size);
+ if (callchain) {
+ callchain_size = (1 + callchain->nr) * sizeof(u64);
+
+ header.type |= PERF_RECORD_CALLCHAIN;
+ header.size += callchain_size;
+ }
+ }
+
+ ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
if (ret)
- goto out;
+ return;
- perf_output_copy(&handle, buf, size);
- perf_output_end(&handle, nmi);
+ perf_output_put(&handle, header);
-out:
- return ret;
-}
+ if (record_type & PERF_RECORD_IP)
+ perf_output_put(&handle, ip);
-static void perf_output_simple(struct perf_counter *counter,
- int nmi, struct pt_regs *regs)
-{
- unsigned int size;
- struct {
- struct perf_event_header header;
- u64 ip;
- u32 pid, tid;
- } event;
+ if (record_type & PERF_RECORD_TID)
+ perf_output_put(&handle, tid_entry);
- event.header.type = PERF_EVENT_IP;
- event.ip = instruction_pointer(regs);
+ if (record_type & PERF_RECORD_TIME)
+ perf_output_put(&handle, time);
- size = sizeof(event);
+ if (record_type & PERF_RECORD_ADDR)
+ perf_output_put(&handle, addr);
- if (counter->hw_event.include_tid) {
- /* namespace issues */
- event.pid = current->group_leader->pid;
- event.tid = current->pid;
+ if (record_type & PERF_RECORD_GROUP) {
+ struct perf_counter *leader, *sub;
+ u64 nr = counter->nr_siblings;
- event.header.type |= __PERF_EVENT_TID;
- } else
- size -= sizeof(u64);
+ perf_output_put(&handle, nr);
+
+ leader = counter->group_leader;
+ list_for_each_entry(sub, &leader->sibling_list, list_entry) {
+ if (sub != counter)
+ sub->pmu->read(sub);
+
+ group_entry.event = sub->hw_event.config;
+ group_entry.counter = atomic64_read(&sub->count);
+
+ perf_output_put(&handle, group_entry);
+ }
+ }
- event.header.size = size;
+ if (callchain)
+ perf_output_copy(&handle, callchain, callchain_size);
- perf_output_write(counter, nmi, &event, size);
+ perf_output_end(&handle);
}
-static void perf_output_group(struct perf_counter *counter, int nmi)
-{
- struct perf_output_handle handle;
- struct perf_event_header header;
- struct perf_counter *leader, *sub;
- unsigned int size;
+/*
+ * comm tracking
+ */
+
+struct perf_comm_event {
+ struct task_struct *task;
+ char *comm;
+ int comm_size;
+
struct {
- u64 event;
- u64 counter;
- } entry;
- int ret;
+ struct perf_event_header header;
+
+ u32 pid;
+ u32 tid;
+ } event;
+};
- size = sizeof(header) + counter->nr_siblings * sizeof(entry);
+static void perf_counter_comm_output(struct perf_counter *counter,
+ struct perf_comm_event *comm_event)
+{
+ struct perf_output_handle handle;
+ int size = comm_event->event.header.size;
+ int ret = perf_output_begin(&handle, counter, size, 0, 0);
- ret = perf_output_begin(&handle, counter, size);
if (ret)
return;
- header.type = PERF_EVENT_GROUP;
- header.size = size;
+ perf_output_put(&handle, comm_event->event);
+ perf_output_copy(&handle, comm_event->comm,
+ comm_event->comm_size);
+ perf_output_end(&handle);
+}
- perf_output_put(&handle, header);
+static int perf_counter_comm_match(struct perf_counter *counter,
+ struct perf_comm_event *comm_event)
+{
+ if (counter->hw_event.comm &&
+ comm_event->event.header.type == PERF_EVENT_COMM)
+ return 1;
- leader = counter->group_leader;
- list_for_each_entry(sub, &leader->sibling_list, list_entry) {
- if (sub != counter)
- sub->hw_ops->read(sub);
+ return 0;
+}
+
+static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
+ struct perf_comm_event *comm_event)
+{
+ struct perf_counter *counter;
- entry.event = sub->hw_event.config;
- entry.counter = atomic64_read(&sub->count);
+ if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
+ return;
- perf_output_put(&handle, entry);
+ rcu_read_lock();
+ list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
+ if (perf_counter_comm_match(counter, comm_event))
+ perf_counter_comm_output(counter, comm_event);
}
+ rcu_read_unlock();
+}
+
+static void perf_counter_comm_event(struct perf_comm_event *comm_event)
+{
+ struct perf_cpu_context *cpuctx;
+ unsigned int size;
+ char *comm = comm_event->task->comm;
+
+ size = ALIGN(strlen(comm)+1, sizeof(u64));
- perf_output_end(&handle, nmi);
+ comm_event->comm = comm;
+ comm_event->comm_size = size;
+
+ comm_event->event.header.size = sizeof(comm_event->event) + size;
+
+ cpuctx = &get_cpu_var(perf_cpu_context);
+ perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
+ put_cpu_var(perf_cpu_context);
+
+ perf_counter_comm_ctx(¤t->perf_counter_ctx, comm_event);
}
-void perf_counter_output(struct perf_counter *counter,
- int nmi, struct pt_regs *regs)
+void perf_counter_comm(struct task_struct *task)
{
- switch (counter->hw_event.record_type) {
- case PERF_RECORD_SIMPLE:
- return;
+ struct perf_comm_event comm_event;
- case PERF_RECORD_IRQ:
- perf_output_simple(counter, nmi, regs);
- break;
+ if (!atomic_read(&nr_comm_tracking))
+ return;
+
+ comm_event = (struct perf_comm_event){
+ .task = task,
+ .event = {
+ .header = { .type = PERF_EVENT_COMM, },
+ .pid = task->group_leader->pid,
+ .tid = task->pid,
+ },
+ };
- case PERF_RECORD_GROUP:
- perf_output_group(counter, nmi);
- break;
- }
+ perf_counter_comm_event(&comm_event);
}
/*
{
struct perf_output_handle handle;
int size = mmap_event->event.header.size;
- int ret = perf_output_begin(&handle, counter, size);
+ int ret = perf_output_begin(&handle, counter, size, 0, 0);
if (ret)
return;
perf_output_put(&handle, mmap_event->event);
perf_output_copy(&handle, mmap_event->file_name,
mmap_event->file_size);
- perf_output_end(&handle, 0);
+ perf_output_end(&handle);
}
static int perf_counter_mmap_match(struct perf_counter *counter,
name = strncpy(tmp, "//enomem", sizeof(tmp));
goto got_name;
}
- name = dentry_path(file->f_dentry, buf, PATH_MAX);
+ name = d_path(&file->f_path, buf, PATH_MAX);
if (IS_ERR(name)) {
name = strncpy(tmp, "//toolong", sizeof(tmp));
goto got_name;
}
got_name:
- size = ALIGN(strlen(name), sizeof(u64));
+ size = ALIGN(strlen(name)+1, sizeof(u64));
mmap_event->file_name = name;
mmap_event->file_size = size;
void perf_counter_mmap(unsigned long addr, unsigned long len,
unsigned long pgoff, struct file *file)
{
- struct perf_mmap_event mmap_event = {
+ struct perf_mmap_event mmap_event;
+
+ if (!atomic_read(&nr_mmap_tracking))
+ return;
+
+ mmap_event = (struct perf_mmap_event){
.file = file,
.event = {
.header = { .type = PERF_EVENT_MMAP, },
void perf_counter_munmap(unsigned long addr, unsigned long len,
unsigned long pgoff, struct file *file)
{
- struct perf_mmap_event mmap_event = {
+ struct perf_mmap_event mmap_event;
+
+ if (!atomic_read(&nr_munmap_tracking))
+ return;
+
+ mmap_event = (struct perf_mmap_event){
.file = file,
.event = {
.header = { .type = PERF_EVENT_MUNMAP, },
perf_counter_mmap_event(&mmap_event);
}
+/*
+ * Generic counter overflow handling.
+ */
+
+int perf_counter_overflow(struct perf_counter *counter,
+ int nmi, struct pt_regs *regs, u64 addr)
+{
+ int events = atomic_read(&counter->event_limit);
+ int ret = 0;
+
+ counter->pending_kill = POLL_IN;
+ if (events && atomic_dec_and_test(&counter->event_limit)) {
+ ret = 1;
+ counter->pending_kill = POLL_HUP;
+ if (nmi) {
+ counter->pending_disable = 1;
+ perf_pending_queue(&counter->pending,
+ perf_pending_counter);
+ } else
+ perf_counter_disable(counter);
+ }
+
+ perf_counter_output(counter, nmi, regs, addr);
+ return ret;
+}
+
/*
* Generic software counter infrastructure
*/
static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
{
+ enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_counter *counter;
struct pt_regs *regs;
counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
- counter->hw_ops->read(counter);
+ counter->pmu->read(counter);
regs = get_irq_regs();
/*
!counter->hw_event.exclude_user)
regs = task_pt_regs(current);
- if (regs)
- perf_counter_output(counter, 0, regs);
+ if (regs) {
+ if (perf_counter_overflow(counter, 0, regs, 0))
+ ret = HRTIMER_NORESTART;
+ }
hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
- return HRTIMER_RESTART;
+ return ret;
}
static void perf_swcounter_overflow(struct perf_counter *counter,
- int nmi, struct pt_regs *regs)
+ int nmi, struct pt_regs *regs, u64 addr)
{
perf_swcounter_update(counter);
perf_swcounter_set_period(counter);
- perf_counter_output(counter, nmi, regs);
+ if (perf_counter_overflow(counter, nmi, regs, addr))
+ /* soft-disable the counter */
+ ;
+
}
static int perf_swcounter_match(struct perf_counter *counter,
}
static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
- int nmi, struct pt_regs *regs)
+ int nmi, struct pt_regs *regs, u64 addr)
{
int neg = atomic64_add_negative(nr, &counter->hw.count);
if (counter->hw.irq_period && !neg)
- perf_swcounter_overflow(counter, nmi, regs);
+ perf_swcounter_overflow(counter, nmi, regs, addr);
}
static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
enum perf_event_types type, u32 event,
- u64 nr, int nmi, struct pt_regs *regs)
+ u64 nr, int nmi, struct pt_regs *regs,
+ u64 addr)
{
struct perf_counter *counter;
rcu_read_lock();
list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
if (perf_swcounter_match(counter, type, event, regs))
- perf_swcounter_add(counter, nr, nmi, regs);
+ perf_swcounter_add(counter, nr, nmi, regs, addr);
}
rcu_read_unlock();
}
}
static void __perf_swcounter_event(enum perf_event_types type, u32 event,
- u64 nr, int nmi, struct pt_regs *regs)
+ u64 nr, int nmi, struct pt_regs *regs,
+ u64 addr)
{
struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
int *recursion = perf_swcounter_recursion_context(cpuctx);
(*recursion)++;
barrier();
- perf_swcounter_ctx_event(&cpuctx->ctx, type, event, nr, nmi, regs);
+ perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
+ nr, nmi, regs, addr);
if (cpuctx->task_ctx) {
perf_swcounter_ctx_event(cpuctx->task_ctx, type, event,
- nr, nmi, regs);
+ nr, nmi, regs, addr);
}
barrier();
put_cpu_var(perf_cpu_context);
}
-void perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs)
+void
+perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
{
- __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs);
+ __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
}
static void perf_swcounter_read(struct perf_counter *counter)
perf_swcounter_update(counter);
}
-static const struct hw_perf_counter_ops perf_ops_generic = {
+static const struct pmu perf_ops_generic = {
.enable = perf_swcounter_enable,
.disable = perf_swcounter_disable,
.read = perf_swcounter_read,
cpu_clock_perf_counter_update(counter);
}
-static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
+static const struct pmu perf_ops_cpu_clock = {
.enable = cpu_clock_perf_counter_enable,
.disable = cpu_clock_perf_counter_disable,
.read = cpu_clock_perf_counter_read,
* Software counter: task time clock
*/
-/*
- * Called from within the scheduler:
- */
-static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update)
-{
- struct task_struct *curr = counter->task;
- u64 delta;
-
- delta = __task_delta_exec(curr, update);
-
- return curr->se.sum_exec_runtime + delta;
-}
-
static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
{
u64 prev;
s64 delta;
- prev = atomic64_read(&counter->hw.prev_count);
-
- atomic64_set(&counter->hw.prev_count, now);
-
+ prev = atomic64_xchg(&counter->hw.prev_count, now);
delta = now - prev;
-
atomic64_add(delta, &counter->count);
}
static int task_clock_perf_counter_enable(struct perf_counter *counter)
{
struct hw_perf_counter *hwc = &counter->hw;
+ u64 now;
- atomic64_set(&hwc->prev_count, task_clock_perf_counter_val(counter, 0));
+ now = counter->ctx->time;
+
+ atomic64_set(&hwc->prev_count, now);
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swcounter_hrtimer;
if (hwc->irq_period) {
static void task_clock_perf_counter_disable(struct perf_counter *counter)
{
hrtimer_cancel(&counter->hw.hrtimer);
- task_clock_perf_counter_update(counter,
- task_clock_perf_counter_val(counter, 0));
+ task_clock_perf_counter_update(counter, counter->ctx->time);
+
}
static void task_clock_perf_counter_read(struct perf_counter *counter)
{
- task_clock_perf_counter_update(counter,
- task_clock_perf_counter_val(counter, 1));
+ u64 time;
+
+ if (!in_nmi()) {
+ update_context_time(counter->ctx);
+ time = counter->ctx->time;
+ } else {
+ u64 now = perf_clock();
+ u64 delta = now - counter->ctx->timestamp;
+ time = counter->ctx->time + delta;
+ }
+
+ task_clock_perf_counter_update(counter, time);
}
-static const struct hw_perf_counter_ops perf_ops_task_clock = {
+static const struct pmu perf_ops_task_clock = {
.enable = task_clock_perf_counter_enable,
.disable = task_clock_perf_counter_disable,
.read = task_clock_perf_counter_read,
cpu_migrations_perf_counter_update(counter);
}
-static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
+static const struct pmu perf_ops_cpu_migrations = {
.enable = cpu_migrations_perf_counter_enable,
.disable = cpu_migrations_perf_counter_disable,
.read = cpu_migrations_perf_counter_read,
if (!regs)
regs = task_pt_regs(current);
- __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs);
+ __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
}
+EXPORT_SYMBOL_GPL(perf_tpcounter_event);
extern int ftrace_profile_enable(int);
extern void ftrace_profile_disable(int);
ftrace_profile_disable(perf_event_id(&counter->hw_event));
}
-static const struct hw_perf_counter_ops *
-tp_perf_counter_init(struct perf_counter *counter)
+static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
{
int event_id = perf_event_id(&counter->hw_event);
int ret;
return &perf_ops_generic;
}
#else
-static const struct hw_perf_counter_ops *
-tp_perf_counter_init(struct perf_counter *counter)
+static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
{
return NULL;
}
#endif
-static const struct hw_perf_counter_ops *
-sw_perf_counter_init(struct perf_counter *counter)
+static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
{
struct perf_counter_hw_event *hw_event = &counter->hw_event;
- const struct hw_perf_counter_ops *hw_ops = NULL;
+ const struct pmu *pmu = NULL;
struct hw_perf_counter *hwc = &counter->hw;
/*
*/
switch (perf_event_id(&counter->hw_event)) {
case PERF_COUNT_CPU_CLOCK:
- hw_ops = &perf_ops_cpu_clock;
+ pmu = &perf_ops_cpu_clock;
if (hw_event->irq_period && hw_event->irq_period < 10000)
hw_event->irq_period = 10000;
* use the cpu_clock counter instead.
*/
if (counter->ctx->task)
- hw_ops = &perf_ops_task_clock;
+ pmu = &perf_ops_task_clock;
else
- hw_ops = &perf_ops_cpu_clock;
+ pmu = &perf_ops_cpu_clock;
if (hw_event->irq_period && hw_event->irq_period < 10000)
hw_event->irq_period = 10000;
case PERF_COUNT_PAGE_FAULTS_MIN:
case PERF_COUNT_PAGE_FAULTS_MAJ:
case PERF_COUNT_CONTEXT_SWITCHES:
- hw_ops = &perf_ops_generic;
+ pmu = &perf_ops_generic;
break;
case PERF_COUNT_CPU_MIGRATIONS:
if (!counter->hw_event.exclude_kernel)
- hw_ops = &perf_ops_cpu_migrations;
+ pmu = &perf_ops_cpu_migrations;
break;
}
- if (hw_ops)
+ if (pmu)
hwc->irq_period = hw_event->irq_period;
- return hw_ops;
+ return pmu;
}
/*
struct perf_counter *group_leader,
gfp_t gfpflags)
{
- const struct hw_perf_counter_ops *hw_ops;
+ const struct pmu *pmu;
struct perf_counter *counter;
long err;
counter->cpu = cpu;
counter->hw_event = *hw_event;
counter->group_leader = group_leader;
- counter->hw_ops = NULL;
+ counter->pmu = NULL;
counter->ctx = ctx;
counter->state = PERF_COUNTER_STATE_INACTIVE;
if (hw_event->disabled)
counter->state = PERF_COUNTER_STATE_OFF;
- hw_ops = NULL;
+ pmu = NULL;
if (perf_event_raw(hw_event)) {
- hw_ops = hw_perf_counter_init(counter);
+ pmu = hw_perf_counter_init(counter);
goto done;
}
switch (perf_event_type(hw_event)) {
case PERF_TYPE_HARDWARE:
- hw_ops = hw_perf_counter_init(counter);
+ pmu = hw_perf_counter_init(counter);
break;
case PERF_TYPE_SOFTWARE:
- hw_ops = sw_perf_counter_init(counter);
+ pmu = sw_perf_counter_init(counter);
break;
case PERF_TYPE_TRACEPOINT:
- hw_ops = tp_perf_counter_init(counter);
+ pmu = tp_perf_counter_init(counter);
break;
}
done:
err = 0;
- if (!hw_ops)
+ if (!pmu)
err = -EINVAL;
- else if (IS_ERR(hw_ops))
- err = PTR_ERR(hw_ops);
+ else if (IS_ERR(pmu))
+ err = PTR_ERR(pmu);
if (err) {
kfree(counter);
return ERR_PTR(err);
}
- counter->hw_ops = hw_ops;
+ counter->pmu = pmu;
+
+ if (counter->hw_event.mmap)
+ atomic_inc(&nr_mmap_tracking);
+ if (counter->hw_event.munmap)
+ atomic_inc(&nr_munmap_tracking);
+ if (counter->hw_event.comm)
+ atomic_inc(&nr_comm_tracking);
return counter;
}
* Be careful about zapping the list - IRQ/NMI context
* could still be processing it:
*/
- curr_rq_lock_irq_save(&flags);
+ local_irq_save(flags);
perf_flags = hw_perf_save_disable();
cpuctx = &__get_cpu_var(perf_cpu_context);
child_ctx->nr_counters--;
hw_perf_restore(perf_flags);
- curr_rq_unlock_irq_restore(&flags);
+ local_irq_restore(flags);
}
parent_counter = child_counter->parent;