Skip to content

Commit

Permalink
[PATCH] mutex: some cleanups
Browse files Browse the repository at this point in the history
Turn some macros into inline functions and add proper type checking as
well as being more readable.  Also a minor comment adjustment.

Signed-off-by: Nicolas Pitre <[email protected]>
Acked-by: Ingo Molnar <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Nicolas Pitre authored and Linus Torvalds committed Mar 31, 2006
1 parent a58e00e commit e358c1a
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 30 deletions.
30 changes: 16 additions & 14 deletions include/asm-generic/mutex-dec.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,14 @@
* it wasn't 1 originally. This function MUST leave the value lower than
* 1 even when the "1" assertion wasn't true.
*/
#define __mutex_fastpath_lock(count, fail_fn) \
do { \
if (unlikely(atomic_dec_return(count) < 0)) \
fail_fn(count); \
else \
smp_mb(); \
} while (0)
static inline void
__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
fail_fn(count);
else
smp_mb();
}

/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
Expand All @@ -36,7 +37,7 @@ do { \
* or anything the slow path function returns.
*/
static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count);
Expand All @@ -59,12 +60,13 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
* to return 0 otherwise.
*/
#define __mutex_fastpath_unlock(count, fail_fn) \
do { \
smp_mb(); \
if (unlikely(atomic_inc_return(count) <= 0)) \
fail_fn(count); \
} while (0)
static inline void
__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
{
smp_mb();
if (unlikely(atomic_inc_return(count) <= 0))
fail_fn(count);
}

#define __mutex_slowpath_needs_to_unlock() 1

Expand Down
33 changes: 17 additions & 16 deletions include/asm-generic/mutex-xchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
*
* Generic implementation of the mutex fastpath, based on xchg().
*
* NOTE: An xchg based implementation is less optimal than an atomic
* NOTE: An xchg based implementation might be less optimal than an atomic
* decrement/increment based implementation. If your architecture
* has a reasonable atomic dec/inc then you should probably use
* asm-generic/mutex-dec.h instead, or you could open-code an
Expand All @@ -22,14 +22,14 @@
* wasn't 1 originally. This function MUST leave the value lower than 1
* even when the "1" assertion wasn't true.
*/
#define __mutex_fastpath_lock(count, fail_fn) \
do { \
if (unlikely(atomic_xchg(count, 0) != 1)) \
fail_fn(count); \
else \
smp_mb(); \
} while (0)

static inline void
__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
fail_fn(count);
else
smp_mb();
}

/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
Expand All @@ -42,7 +42,7 @@ do { \
* or anything the slow path function returns
*/
static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
return fail_fn(count);
Expand All @@ -64,12 +64,13 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
* to return 0 otherwise.
*/
#define __mutex_fastpath_unlock(count, fail_fn) \
do { \
smp_mb(); \
if (unlikely(atomic_xchg(count, 1) != 0)) \
fail_fn(count); \
} while (0)
static inline void
__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
{
smp_mb();
if (unlikely(atomic_xchg(count, 1) != 0))
fail_fn(count);
}

#define __mutex_slowpath_needs_to_unlock() 0

Expand Down

0 comments on commit e358c1a

Please sign in to comment.