mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
ring-buffer: Use subbuf_order for buffer page masking
The comparisons to PAGE_SIZE were all converted to use the
buffer->subbuf_order, but the use of PAGE_MASK was missed.
Convert all the PAGE_MASK usages over to:
(PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1
Link: https://lore.kernel.org/linux-trace-kernel/20231219173800.66eefb7a@gandalf.local.home
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Tzvetomir Stoyanov <tz.stoyanov@gmail.com>
Cc: Vincent Donnefort <vdonnefort@google.com>
Cc: Kent Overstreet <kent.overstreet@gmail.com>
Fixes: 139f840021
("ring-buffer: Page size per ring buffer")
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
parent
2f84b39f48
commit
3cb3091138
1 changed files with 11 additions and 8 deletions
|
@ -2269,11 +2269,13 @@ rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
|
|||
}
|
||||
|
||||
static __always_inline unsigned
|
||||
rb_event_index(struct ring_buffer_event *event)
|
||||
rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event)
|
||||
{
|
||||
unsigned long addr = (unsigned long)event;
|
||||
|
||||
return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
|
||||
addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1;
|
||||
|
||||
return addr - BUF_PAGE_HDR_SIZE;
|
||||
}
|
||||
|
||||
static void rb_inc_iter(struct ring_buffer_iter *iter)
|
||||
|
@ -2646,7 +2648,8 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
|
|||
|
||||
/* Slow path */
|
||||
static struct ring_buffer_event *
|
||||
rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
|
||||
rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
struct ring_buffer_event *event, u64 delta, bool abs)
|
||||
{
|
||||
if (abs)
|
||||
event->type_len = RINGBUF_TYPE_TIME_STAMP;
|
||||
|
@ -2654,7 +2657,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
|
|||
event->type_len = RINGBUF_TYPE_TIME_EXTEND;
|
||||
|
||||
/* Not the first event on the page, or not delta? */
|
||||
if (abs || rb_event_index(event)) {
|
||||
if (abs || rb_event_index(cpu_buffer, event)) {
|
||||
event->time_delta = delta & TS_MASK;
|
||||
event->array[0] = delta >> TS_SHIFT;
|
||||
} else {
|
||||
|
@ -2728,7 +2731,7 @@ static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
|
|||
if (!abs)
|
||||
info->delta = 0;
|
||||
}
|
||||
*event = rb_add_time_stamp(*event, info->delta, abs);
|
||||
*event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs);
|
||||
*length -= RB_LEN_TIME_EXTEND;
|
||||
*delta = 0;
|
||||
}
|
||||
|
@ -2812,10 +2815,10 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
|
|||
struct buffer_page *bpage;
|
||||
unsigned long addr;
|
||||
|
||||
new_index = rb_event_index(event);
|
||||
new_index = rb_event_index(cpu_buffer, event);
|
||||
old_index = new_index + rb_event_ts_length(event);
|
||||
addr = (unsigned long)event;
|
||||
addr &= PAGE_MASK;
|
||||
addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
|
||||
|
||||
bpage = READ_ONCE(cpu_buffer->tail_page);
|
||||
|
||||
|
@ -3726,7 +3729,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
|
|||
struct buffer_page *bpage = cpu_buffer->commit_page;
|
||||
struct buffer_page *start;
|
||||
|
||||
addr &= PAGE_MASK;
|
||||
addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
|
||||
|
||||
/* Do the likely case first */
|
||||
if (likely(bpage->page == (void *)addr)) {
|
||||
|
|
Loading…
Add table
Reference in a new issue