2 * Virtual Processor Dispatch Trace Log
4 * (C) Copyright IBM Corporation 2009
6 * Author: Jeremy Kerr <jk@ozlabs.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
26 #include <linux/uaccess.h>
27 #include <asm/firmware.h>
28 #include <asm/lppaca.h>
29 #include <asm/debugfs.h>
30 #include <asm/plpar_wrappers.h>
31 #include <asm/machdep.h>
34 struct dtl_entry *buf;
41 static DEFINE_PER_CPU(struct dtl, cpu_dtl);
43 static u8 dtl_event_mask = DTL_LOG_ALL;
47 * Size of per-cpu log buffers. Firmware requires that the buffer does
48 * not cross a 4k boundary.
50 static int dtl_buf_entries = N_DISPATCH_LOG;
52 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
55 struct dtl_entry *write_ptr;
56 struct dtl_entry *buf;
57 struct dtl_entry *buf_end;
61 static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
63 static atomic_t dtl_count;
66 * The cpu accounting code controls the DTL ring buffer, and we get
67 * given entries as they are processed.
69 static void consume_dtle(struct dtl_entry *dtle, u64 index)
71 struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
72 struct dtl_entry *wp = dtlr->write_ptr;
73 struct lppaca *vpa = local_paca->lppaca_ptr;
81 /* check for hypervisor ring buffer overflow, ignore this entry if so */
82 if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
86 if (wp == dtlr->buf_end)
90 /* incrementing write_index makes the new entry visible */
95 static int dtl_start(struct dtl *dtl)
97 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
100 dtlr->buf_end = dtl->buf + dtl->buf_entries;
101 dtlr->write_index = 0;
103 /* setting write_ptr enables logging into our buffer */
105 dtlr->write_ptr = dtl->buf;
107 /* enable event logging */
108 dtlr->saved_dtl_mask = lppaca_of(dtl->cpu).dtl_enable_mask;
109 lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
111 dtl_consumer = consume_dtle;
112 atomic_inc(&dtl_count);
116 static void dtl_stop(struct dtl *dtl)
118 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
120 dtlr->write_ptr = NULL;
125 /* restore dtl_enable_mask */
126 lppaca_of(dtl->cpu).dtl_enable_mask = dtlr->saved_dtl_mask;
128 if (atomic_dec_and_test(&dtl_count))
132 static u64 dtl_current_index(struct dtl *dtl)
134 return per_cpu(dtl_rings, dtl->cpu).write_index;
137 #else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
139 static int dtl_start(struct dtl *dtl)
144 /* Register our dtl buffer with the hypervisor. The HV expects the
145 * buffer size to be passed in the second word of the buffer */
146 ((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
148 hwcpu = get_hard_smp_processor_id(dtl->cpu);
149 addr = __pa(dtl->buf);
150 ret = register_dtl(hwcpu, addr);
152 printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
153 "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
157 /* set our initial buffer indices */
158 lppaca_of(dtl->cpu).dtl_idx = 0;
160 /* ensure that our updates to the lppaca fields have occurred before
161 * we actually enable the logging */
164 /* enable event logging */
165 lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
170 static void dtl_stop(struct dtl *dtl)
172 int hwcpu = get_hard_smp_processor_id(dtl->cpu);
174 lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
176 unregister_dtl(hwcpu);
179 static u64 dtl_current_index(struct dtl *dtl)
181 return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
183 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
185 static int dtl_enable(struct dtl *dtl)
189 struct dtl_entry *buf = NULL;
194 /* only allow one reader */
198 n_entries = dtl_buf_entries;
199 buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
201 printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
206 spin_lock(&dtl->lock);
209 /* store the original allocation size for use during read */
210 dtl->buf_entries = n_entries;
217 spin_unlock(&dtl->lock);
220 kmem_cache_free(dtl_cache, buf);
224 static void dtl_disable(struct dtl *dtl)
226 spin_lock(&dtl->lock);
228 kmem_cache_free(dtl_cache, dtl->buf);
230 dtl->buf_entries = 0;
231 spin_unlock(&dtl->lock);
236 static int dtl_file_open(struct inode *inode, struct file *filp)
238 struct dtl *dtl = inode->i_private;
241 rc = dtl_enable(dtl);
245 filp->private_data = dtl;
249 static int dtl_file_release(struct inode *inode, struct file *filp)
251 struct dtl *dtl = inode->i_private;
256 static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
259 long int rc, n_read, n_req, read_size;
261 u64 cur_idx, last_idx, i;
263 if ((len % sizeof(struct dtl_entry)) != 0)
266 dtl = filp->private_data;
268 /* requested number of entries to read */
269 n_req = len / sizeof(struct dtl_entry);
271 /* actual number of entries read */
274 spin_lock(&dtl->lock);
276 cur_idx = dtl_current_index(dtl);
277 last_idx = dtl->last_idx;
279 if (last_idx + dtl->buf_entries <= cur_idx)
280 last_idx = cur_idx - dtl->buf_entries + 1;
282 if (last_idx + n_req > cur_idx)
283 n_req = cur_idx - last_idx;
286 dtl->last_idx = last_idx + n_req;
288 spin_unlock(&dtl->lock);
293 i = last_idx % dtl->buf_entries;
295 /* read the tail of the buffer if we've wrapped */
296 if (i + n_req > dtl->buf_entries) {
297 read_size = dtl->buf_entries - i;
299 rc = copy_to_user(buf, &dtl->buf[i],
300 read_size * sizeof(struct dtl_entry));
307 buf += read_size * sizeof(struct dtl_entry);
310 /* .. and now the head */
311 rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));
317 return n_read * sizeof(struct dtl_entry);
320 static const struct file_operations dtl_fops = {
321 .open = dtl_file_open,
322 .release = dtl_file_release,
323 .read = dtl_file_read,
327 static struct dentry *dtl_dir;
329 static int dtl_setup_file(struct dtl *dtl)
333 sprintf(name, "cpu-%d", dtl->cpu);
335 dtl->file = debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
342 static int dtl_init(void)
344 struct dentry *event_mask_file, *buf_entries_file;
347 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
350 /* set up common debugfs structure */
353 dtl_dir = debugfs_create_dir("dtl", powerpc_debugfs_root);
355 printk(KERN_WARNING "%s: can't create dtl root dir\n",
360 event_mask_file = debugfs_create_x8("dtl_event_mask", 0600,
361 dtl_dir, &dtl_event_mask);
362 buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400,
363 dtl_dir, &dtl_buf_entries);
365 if (!event_mask_file || !buf_entries_file) {
366 printk(KERN_WARNING "%s: can't create dtl files\n", __func__);
370 /* set up the per-cpu log structures */
371 for_each_possible_cpu(i) {
372 struct dtl *dtl = &per_cpu(cpu_dtl, i);
373 spin_lock_init(&dtl->lock);
376 rc = dtl_setup_file(dtl);
384 debugfs_remove_recursive(dtl_dir);
388 machine_arch_initcall(pseries, dtl_init);