};
struct ring_buffer_cpu_meta {
- unsigned long kaslr_addr;
unsigned long first_buffer;
unsigned long head_buffer;
unsigned long commit_buffer;
struct ring_buffer_meta *meta;
- unsigned long kaslr_addr;
-
unsigned int subbuf_size;
unsigned int subbuf_order;
unsigned int max_data_size;
}
}
-static void rb_meta_init_text_addr(struct ring_buffer_cpu_meta *meta)
-{
-#ifdef CONFIG_RANDOMIZE_BASE
- meta->kaslr_addr = kaslr_offset();
-#else
- meta->kaslr_addr = 0;
-#endif
-}
-
static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages, int scratch_size)
{
struct ring_buffer_cpu_meta *meta;
meta->first_buffer += delta;
meta->head_buffer += delta;
meta->commit_buffer += delta;
- buffer->kaslr_addr = meta->kaslr_addr;
continue;
}
subbuf = rb_subbufs_from_meta(meta);
meta->first_buffer = (unsigned long)subbuf;
- rb_meta_init_text_addr(meta);
/*
* The buffers[] array holds the order of the sub-buffers
scratch_size, key);
}
-/**
- * ring_buffer_last_boot_delta - return the delta offset from last boot
- * @buffer: The buffer to return the delta from
- * @text: Return text delta
- * @data: Return data delta
- *
- * Returns: The true if the delta is non zero
- */
-bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, unsigned long *kaslr_addr)
-{
- if (!buffer)
- return false;
-
- if (!buffer->kaslr_addr)
- return false;
-
- *kaslr_addr = buffer->kaslr_addr;
-
- return true;
-}
-
void *ring_buffer_meta_scratch(struct trace_buffer *buffer, unsigned int *size)
{
+ struct ring_buffer_meta *meta;
+ void *ptr;
+
if (!buffer || !buffer->meta)
return NULL;
- *size = PAGE_SIZE - sizeof(*buffer->meta);
+ meta = buffer->meta;
- return (void *)buffer->meta + sizeof(*buffer->meta);
+ ptr = (void *)ALIGN((unsigned long)meta + sizeof(*meta), sizeof(long));
+
+ if (size)
+ *size = (void *)meta + meta->buffers_offset - ptr;
+
+ return ptr;
}
/**
void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
- struct ring_buffer_cpu_meta *meta;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
atomic_dec(&cpu_buffer->record_disabled);
atomic_dec(&cpu_buffer->resize_disabled);
- /* Make sure persistent meta now uses this buffer's addresses */
- meta = rb_range_meta(buffer, 0, cpu_buffer->cpu);
- if (meta)
- rb_meta_init_text_addr(meta);
-
mutex_unlock(&buffer->mutex);
}
EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
- struct ring_buffer_cpu_meta *meta;
int cpu;
/* prevent another thread from changing buffer sizes */
reset_disabled_cpu_buffer(cpu_buffer);
- /* Make sure persistent meta now uses this buffer's addresses */
- meta = rb_range_meta(buffer, 0, cpu_buffer->cpu);
- if (meta)
- rb_meta_init_text_addr(meta);
-
atomic_dec(&cpu_buffer->record_disabled);
atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
}
return __tracing_resize_ring_buffer(tr, size, cpu_id);
}
+struct trace_scratch {
+ unsigned long kaslr_addr;
+};
+
static void update_last_data(struct trace_array *tr)
{
+ struct trace_scratch *tscratch;
+
if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
return;
/* Using current data now */
tr->text_delta = 0;
+ if (!tr->scratch)
+ return;
+
+ tscratch = tr->scratch;
+
+ /* Set the persistent ring buffer meta data to this address */
+#ifdef CONFIG_RANDOMIZE_BASE
+ tscratch->kaslr_addr = kaslr_offset();
+#else
+ tscratch->kaslr_addr = 0;
+#endif
tr->flags &= ~TRACE_ARRAY_FL_LAST_BOOT;
}
tracing_last_boot_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
+ struct trace_scratch *tscratch = tr->scratch;
struct seq_buf seq;
char buf[64];
* Otherwise it shows the KASLR address from the previous boot which
* should not be the same as the current boot.
*/
- if (tr->flags & TRACE_ARRAY_FL_LAST_BOOT)
- seq_buf_printf(&seq, "%lx\t[kernel]\n", tr->kaslr_addr);
+ if (tscratch && (tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
+ seq_buf_printf(&seq, "%lx\t[kernel]\n", tscratch->kaslr_addr);
else
seq_buf_puts(&seq, "# Current\n");
allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
{
enum ring_buffer_flags rb_flags;
+ struct trace_scratch *tscratch;
+ unsigned int scratch_size;
rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
if (tr->range_addr_start && tr->range_addr_size) {
buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0,
tr->range_addr_start,
- tr->range_addr_size, 0);
+ tr->range_addr_size,
+ sizeof(*tscratch));
+
+ tscratch = ring_buffer_meta_scratch(buf->buffer, &scratch_size);
+ if (tscratch) {
+ tr->scratch = tscratch;
+ tr->scratch_size = scratch_size;
#ifdef CONFIG_RANDOMIZE_BASE
- if (ring_buffer_last_boot_delta(buf->buffer, &tr->kaslr_addr))
- tr->text_delta = kaslr_offset() - tr->kaslr_addr;
+ if (tscratch->kaslr_addr)
+ tr->text_delta = kaslr_offset() - tscratch->kaslr_addr;
#endif
+ }
/*
* This is basically the same as a mapped buffer,
* with the same restrictions.