Skip to content

Commit

Permalink
locking, tracing: Annotate tracing locks as raw
Browse files Browse the repository at this point in the history
The tracing locks can be taken in atomic context and therefore
cannot be preempted on -rt - annotate it.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.

Signed-off-by: Thomas Gleixner <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
KAGA-KOKO authored and Ingo Molnar committed Sep 13, 2011
1 parent 740969f commit 5389f6f
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 34 deletions.
52 changes: 26 additions & 26 deletions kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -478,7 +478,7 @@ struct ring_buffer_per_cpu {
int cpu;
atomic_t record_disabled;
struct ring_buffer *buffer;
spinlock_t reader_lock; /* serialize readers */
raw_spinlock_t reader_lock; /* serialize readers */
arch_spinlock_t lock;
struct lock_class_key lock_key;
struct list_head *pages;
Expand Down Expand Up @@ -1062,7 +1062,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)

cpu_buffer->cpu = cpu;
cpu_buffer->buffer = buffer;
spin_lock_init(&cpu_buffer->reader_lock);
raw_spin_lock_init(&cpu_buffer->reader_lock);
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;

Expand Down Expand Up @@ -1259,7 +1259,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
struct list_head *p;
unsigned i;

spin_lock_irq(&cpu_buffer->reader_lock);
raw_spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer);

for (i = 0; i < nr_pages; i++) {
Expand All @@ -1277,7 +1277,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
rb_check_pages(cpu_buffer);

out:
spin_unlock_irq(&cpu_buffer->reader_lock);
raw_spin_unlock_irq(&cpu_buffer->reader_lock);
}

static void
Expand All @@ -1288,7 +1288,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
struct list_head *p;
unsigned i;

spin_lock_irq(&cpu_buffer->reader_lock);
raw_spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer);

for (i = 0; i < nr_pages; i++) {
Expand All @@ -1303,7 +1303,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
rb_check_pages(cpu_buffer);

out:
spin_unlock_irq(&cpu_buffer->reader_lock);
raw_spin_unlock_irq(&cpu_buffer->reader_lock);
}

/**
Expand Down Expand Up @@ -2804,9 +2804,9 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)

cpu_buffer = iter->cpu_buffer;

spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_iter_reset(iter);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);

Expand Down Expand Up @@ -3265,12 +3265,12 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
again:
local_irq_save(flags);
if (dolock)
spin_lock(&cpu_buffer->reader_lock);
raw_spin_lock(&cpu_buffer->reader_lock);
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
rb_advance_reader(cpu_buffer);
if (dolock)
spin_unlock(&cpu_buffer->reader_lock);
raw_spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags);

if (event && event->type_len == RINGBUF_TYPE_PADDING)
Expand All @@ -3295,9 +3295,9 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
unsigned long flags;

again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_iter_peek(iter, ts);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);

if (event && event->type_len == RINGBUF_TYPE_PADDING)
goto again;
Expand Down Expand Up @@ -3337,7 +3337,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
if (dolock)
spin_lock(&cpu_buffer->reader_lock);
raw_spin_lock(&cpu_buffer->reader_lock);

event = rb_buffer_peek(cpu_buffer, ts, lost_events);
if (event) {
Expand All @@ -3346,7 +3346,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
}

if (dolock)
spin_unlock(&cpu_buffer->reader_lock);
raw_spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags);

out:
Expand Down Expand Up @@ -3438,11 +3438,11 @@ ring_buffer_read_start(struct ring_buffer_iter *iter)

cpu_buffer = iter->cpu_buffer;

spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
arch_spin_lock(&cpu_buffer->lock);
rb_iter_reset(iter);
arch_spin_unlock(&cpu_buffer->lock);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
EXPORT_SYMBOL_GPL(ring_buffer_read_start);

Expand Down Expand Up @@ -3477,7 +3477,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;

spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
again:
event = rb_iter_peek(iter, ts);
if (!event)
Expand All @@ -3488,7 +3488,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)

rb_advance_iter(iter);
out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);

return event;
}
Expand Down Expand Up @@ -3557,7 +3557,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)

atomic_inc(&cpu_buffer->record_disabled);

spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);

if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
goto out;
Expand All @@ -3569,7 +3569,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
arch_spin_unlock(&cpu_buffer->lock);

out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);

atomic_dec(&cpu_buffer->record_disabled);
}
Expand Down Expand Up @@ -3607,10 +3607,10 @@ int ring_buffer_empty(struct ring_buffer *buffer)
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
if (dolock)
spin_lock(&cpu_buffer->reader_lock);
raw_spin_lock(&cpu_buffer->reader_lock);
ret = rb_per_cpu_empty(cpu_buffer);
if (dolock)
spin_unlock(&cpu_buffer->reader_lock);
raw_spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags);

if (!ret)
Expand Down Expand Up @@ -3641,10 +3641,10 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
if (dolock)
spin_lock(&cpu_buffer->reader_lock);
raw_spin_lock(&cpu_buffer->reader_lock);
ret = rb_per_cpu_empty(cpu_buffer);
if (dolock)
spin_unlock(&cpu_buffer->reader_lock);
raw_spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags);

return ret;
Expand Down Expand Up @@ -3841,7 +3841,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
if (!bpage)
goto out;

spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);

reader = rb_get_reader_page(cpu_buffer);
if (!reader)
Expand Down Expand Up @@ -3964,7 +3964,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);

out_unlock:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);

out:
return ret;
Expand Down
10 changes: 5 additions & 5 deletions kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,7 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;

static int trace_stop_count;
static DEFINE_SPINLOCK(tracing_start_lock);
static DEFINE_RAW_SPINLOCK(tracing_start_lock);

static void wakeup_work_handler(struct work_struct *work)
{
Expand Down Expand Up @@ -960,7 +960,7 @@ void tracing_start(void)
if (tracing_disabled)
return;

spin_lock_irqsave(&tracing_start_lock, flags);
raw_spin_lock_irqsave(&tracing_start_lock, flags);
if (--trace_stop_count) {
if (trace_stop_count < 0) {
/* Someone screwed up their debugging */
Expand All @@ -985,7 +985,7 @@ void tracing_start(void)

ftrace_start();
out:
spin_unlock_irqrestore(&tracing_start_lock, flags);
raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
}

/**
Expand All @@ -1000,7 +1000,7 @@ void tracing_stop(void)
unsigned long flags;

ftrace_stop();
spin_lock_irqsave(&tracing_start_lock, flags);
raw_spin_lock_irqsave(&tracing_start_lock, flags);
if (trace_stop_count++)
goto out;

Expand All @@ -1018,7 +1018,7 @@ void tracing_stop(void)
arch_spin_unlock(&ftrace_max_lock);

out:
spin_unlock_irqrestore(&tracing_start_lock, flags);
raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
}

void trace_stop_cmdline_recording(void);
Expand Down
6 changes: 3 additions & 3 deletions kernel/trace/trace_irqsoff.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ static int tracer_enabled __read_mostly;

static DEFINE_PER_CPU(int, tracing_cpu);

static DEFINE_SPINLOCK(max_trace_lock);
static DEFINE_RAW_SPINLOCK(max_trace_lock);

enum {
TRACER_IRQS_OFF = (1 << 1),
Expand Down Expand Up @@ -321,7 +321,7 @@ check_critical_timing(struct trace_array *tr,
if (!report_latency(delta))
goto out;

spin_lock_irqsave(&max_trace_lock, flags);
raw_spin_lock_irqsave(&max_trace_lock, flags);

/* check if we are still the max latency */
if (!report_latency(delta))
Expand All @@ -344,7 +344,7 @@ check_critical_timing(struct trace_array *tr,
max_sequence++;

out_unlock:
spin_unlock_irqrestore(&max_trace_lock, flags);
raw_spin_unlock_irqrestore(&max_trace_lock, flags);

out:
data->critical_sequence = max_sequence;
Expand Down

0 comments on commit 5389f6f

Please sign in to comment.