Skip to content

Commit

Permalink
[PATCH] work around ppc64 bootup bug by making mutex-debugging save/r…
Browse files Browse the repository at this point in the history
…estore irqs

It seems ppc64 wants to lock mutexes in early bootup code, with interrupts
disabled, and they expect interrupts to stay disabled, else they crash.

Work around this bug by making mutex debugging variants save/restore irq
flags.

Signed-off-by: Ingo Molnar <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Cc: Paul Mackerras <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Ingo Molnar authored and Linus Torvalds committed Jun 26, 2006
1 parent 20c5426 commit 1fb00c6
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 37 deletions.
12 changes: 6 additions & 6 deletions kernel/mutex-debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -153,13 +153,13 @@ void show_held_locks(struct task_struct *filter)
continue;
count++;
cursor = curr->next;
debug_spin_lock_restore(&debug_mutex_lock, flags);
debug_spin_unlock_restore(&debug_mutex_lock, flags);

printk("\n#%03d: ", count);
printk_lock(lock, filter ? 0 : 1);
goto next;
}
debug_spin_lock_restore(&debug_mutex_lock, flags);
debug_spin_unlock_restore(&debug_mutex_lock, flags);
printk("\n");
}

Expand Down Expand Up @@ -316,7 +316,7 @@ void mutex_debug_check_no_locks_held(struct task_struct *task)
continue;
list_del_init(curr);
DEBUG_OFF();
debug_spin_lock_restore(&debug_mutex_lock, flags);
debug_spin_unlock_restore(&debug_mutex_lock, flags);

printk("BUG: %s/%d, lock held at task exit time!\n",
task->comm, task->pid);
Expand All @@ -325,7 +325,7 @@ void mutex_debug_check_no_locks_held(struct task_struct *task)
printk("exiting task is not even the owner??\n");
return;
}
debug_spin_lock_restore(&debug_mutex_lock, flags);
debug_spin_unlock_restore(&debug_mutex_lock, flags);
}

/*
Expand All @@ -352,7 +352,7 @@ void mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
continue;
list_del_init(curr);
DEBUG_OFF();
debug_spin_lock_restore(&debug_mutex_lock, flags);
debug_spin_unlock_restore(&debug_mutex_lock, flags);

printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
current->comm, current->pid, lock, from, to);
Expand All @@ -362,7 +362,7 @@ void mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
printk("freeing task is not even the owner??\n");
return;
}
debug_spin_lock_restore(&debug_mutex_lock, flags);
debug_spin_unlock_restore(&debug_mutex_lock, flags);
}

/*
Expand Down
25 changes: 5 additions & 20 deletions kernel/mutex-debug.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,50 +46,35 @@ extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name);

#define debug_spin_lock(lock) \
do { \
local_irq_disable(); \
if (debug_mutex_on) \
spin_lock(lock); \
} while (0)

#define debug_spin_unlock(lock) \
do { \
if (debug_mutex_on) \
spin_unlock(lock); \
local_irq_enable(); \
preempt_check_resched(); \
} while (0)

#define debug_spin_lock_save(lock, flags) \
do { \
local_irq_save(flags); \
if (debug_mutex_on) \
spin_lock(lock); \
} while (0)

#define debug_spin_lock_restore(lock, flags) \
#define debug_spin_unlock_restore(lock, flags) \
do { \
if (debug_mutex_on) \
spin_unlock(lock); \
local_irq_restore(flags); \
preempt_check_resched(); \
} while (0)

#define spin_lock_mutex(lock) \
#define spin_lock_mutex(lock, flags) \
do { \
struct mutex *l = container_of(lock, struct mutex, wait_lock); \
\
DEBUG_WARN_ON(in_interrupt()); \
debug_spin_lock(&debug_mutex_lock); \
debug_spin_lock_save(&debug_mutex_lock, flags); \
spin_lock(lock); \
DEBUG_WARN_ON(l->magic != l); \
} while (0)

#define spin_unlock_mutex(lock) \
#define spin_unlock_mutex(lock, flags) \
do { \
spin_unlock(lock); \
debug_spin_unlock(&debug_mutex_lock); \
debug_spin_unlock_restore(&debug_mutex_lock, flags); \
} while (0)

#define DEBUG_OFF() \
Expand Down
21 changes: 12 additions & 9 deletions kernel/mutex.c
Original file line number Diff line number Diff line change
Expand Up @@ -125,10 +125,11 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
struct task_struct *task = current;
struct mutex_waiter waiter;
unsigned int old_val;
unsigned long flags;

debug_mutex_init_waiter(&waiter);

spin_lock_mutex(&lock->wait_lock);
spin_lock_mutex(&lock->wait_lock, flags);

debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip);

Expand Down Expand Up @@ -157,17 +158,17 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
if (unlikely(state == TASK_INTERRUPTIBLE &&
signal_pending(task))) {
mutex_remove_waiter(lock, &waiter, task->thread_info);
spin_unlock_mutex(&lock->wait_lock);
spin_unlock_mutex(&lock->wait_lock, flags);

debug_mutex_free_waiter(&waiter);
return -EINTR;
}
__set_task_state(task, state);

/* didnt get the lock, go to sleep: */
spin_unlock_mutex(&lock->wait_lock);
spin_unlock_mutex(&lock->wait_lock, flags);
schedule();
spin_lock_mutex(&lock->wait_lock);
spin_lock_mutex(&lock->wait_lock, flags);
}

/* got the lock - rejoice! */
Expand All @@ -178,7 +179,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
if (likely(list_empty(&lock->wait_list)))
atomic_set(&lock->count, 0);

spin_unlock_mutex(&lock->wait_lock);
spin_unlock_mutex(&lock->wait_lock, flags);

debug_mutex_free_waiter(&waiter);

Expand All @@ -203,10 +204,11 @@ static fastcall noinline void
__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
unsigned long flags;

DEBUG_WARN_ON(lock->owner != current_thread_info());

spin_lock_mutex(&lock->wait_lock);
spin_lock_mutex(&lock->wait_lock, flags);

/*
* some architectures leave the lock unlocked in the fastpath failure
Expand All @@ -231,7 +233,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)

debug_mutex_clear_owner(lock);

spin_unlock_mutex(&lock->wait_lock);
spin_unlock_mutex(&lock->wait_lock, flags);
}

/*
Expand Down Expand Up @@ -276,9 +278,10 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__)
static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
unsigned long flags;
int prev;

spin_lock_mutex(&lock->wait_lock);
spin_lock_mutex(&lock->wait_lock, flags);

prev = atomic_xchg(&lock->count, -1);
if (likely(prev == 1))
Expand All @@ -287,7 +290,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
if (likely(list_empty(&lock->wait_list)))
atomic_set(&lock->count, 0);

spin_unlock_mutex(&lock->wait_lock);
spin_unlock_mutex(&lock->wait_lock, flags);

return prev == 1;
}
Expand Down
6 changes: 4 additions & 2 deletions kernel/mutex.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,10 @@
* !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
*/

#define spin_lock_mutex(lock) spin_lock(lock)
#define spin_unlock_mutex(lock) spin_unlock(lock)
#define spin_lock_mutex(lock, flags) \
do { spin_lock(lock); (void)(flags); } while (0)
#define spin_unlock_mutex(lock, flags) \
do { spin_unlock(lock); (void)(flags); } while (0)
#define mutex_remove_waiter(lock, waiter, ti) \
__list_del((waiter)->list.prev, (waiter)->list.next)

Expand Down

0 comments on commit 1fb00c6

Please sign in to comment.