Skip to content

Commit

Permalink
atomic: move atomic_add_unless to generic code
Browse files Browse the repository at this point in the history
This is in preparation for more generic atomic primitives based on
__atomic_add_unless.

Signed-off-by: Arun Sharma <[email protected]>
Signed-off-by: Hans-Christian Egtvedt <[email protected]>
Reviewed-by: Eric Dumazet <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: David Miller <[email protected]>
Acked-by: Mike Frysinger <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Arun Sharma authored and torvalds committed Jul 26, 2011
1 parent 6006349 commit f24219b
Show file tree
Hide file tree
Showing 26 changed files with 109 additions and 102 deletions.
10 changes: 5 additions & 5 deletions arch/alpha/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -176,15 +176,15 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

/**
* atomic_add_unless - add unless the number is a given value
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
* Returns the old value of @v.
*/
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
Expand All @@ -196,7 +196,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
break;
c = old;
}
return c != (u);
return c;
}


Expand All @@ -207,7 +207,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
* Returns the old value of @v.
*/
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
Expand Down
4 changes: 2 additions & 2 deletions arch/arm/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -208,14 +208,14 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)

#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

static inline int atomic_add_unless(atomic_t *v, int a, int u)
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;

c = atomic_read(v);
while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
c = old;
return c != u;
return c;
}

#define atomic_inc(v) atomic_add(1, v)
Expand Down
57 changes: 25 additions & 32 deletions arch/avr32/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,70 +78,63 @@ static inline int atomic_add_return(int i, atomic_t *v)
/*
* atomic_sub_unless - sub unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @a: the amount to subtract from v...
* @u: ...unless v is equal to u.
*
* If the atomic value v is not equal to u, this function subtracts a
* from v, and returns non zero. If v is equal to u then it returns
* zero. This is done as an atomic operation.
* Atomically subtract @a from @v, so long as it was not @u.
* Returns the old value of @v.
*/
static inline int atomic_sub_unless(atomic_t *v, int a, int u)
static inline void atomic_sub_unless(atomic_t *v, int a, int u)
{
int tmp, result = 0;
int tmp;

asm volatile(
"/* atomic_sub_unless */\n"
"1: ssrf 5\n"
" ld.w %0, %3\n"
" cp.w %0, %5\n"
" ld.w %0, %2\n"
" cp.w %0, %4\n"
" breq 1f\n"
" sub %0, %4\n"
" stcond %2, %0\n"
" sub %0, %3\n"
" stcond %1, %0\n"
" brne 1b\n"
" mov %1, 1\n"
"1:"
: "=&r"(tmp), "=&r"(result), "=o"(v->counter)
: "m"(v->counter), "rKs21"(a), "rKs21"(u), "1"(result)
: "=&r"(tmp), "=o"(v->counter)
: "m"(v->counter), "rKs21"(a), "rKs21"(u)
: "cc", "memory");

return result;
}

/*
* atomic_add_unless - add unless the number is a given value
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* If the atomic value v is not equal to u, this function adds a to v,
* and returns non zero. If v is equal to u then it returns zero. This
* is done as an atomic operation.
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static inline int atomic_add_unless(atomic_t *v, int a, int u)
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int tmp, result;
int tmp, old = atomic_read(v);

if (__builtin_constant_p(a) && (a >= -1048575) && (a <= 1048576))
result = atomic_sub_unless(v, -a, u);
atomic_sub_unless(v, -a, u);
else {
result = 0;
asm volatile(
"/* atomic_add_unless */\n"
"/* __atomic_add_unless */\n"
"1: ssrf 5\n"
" ld.w %0, %3\n"
" cp.w %0, %5\n"
" ld.w %0, %2\n"
" cp.w %0, %4\n"
" breq 1f\n"
" add %0, %4\n"
" stcond %2, %0\n"
" add %0, %3\n"
" stcond %1, %0\n"
" brne 1b\n"
" mov %1, 1\n"
"1:"
: "=&r"(tmp), "=&r"(result), "=o"(v->counter)
: "m"(v->counter), "r"(a), "ir"(u), "1"(result)
: "=&r"(tmp), "=o"(v->counter)
: "m"(v->counter), "r"(a), "ir"(u)
: "cc", "memory");
}

return result;
return old;
}

/*
Expand Down
4 changes: 2 additions & 2 deletions arch/blackfin/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,13 +89,13 @@ static inline void atomic_set_mask(int mask, atomic_t *v)
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

#define atomic_add_unless(v, a, u) \
#define __atomic_add_unless(v, a, u) \
({ \
int c, old; \
c = atomic_read(v); \
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
c = old; \
c != (u); \
c; \
})

/*
Expand Down
4 changes: 2 additions & 2 deletions arch/cris/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)

#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

static inline int atomic_add_unless(atomic_t *v, int a, int u)
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
Expand All @@ -148,7 +148,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
if (ret != u)
v->counter += a;
cris_atomic_restore(v, flags);
return ret != u;
return ret;
}

/* Atomic operations are already serializing */
Expand Down
4 changes: 2 additions & 2 deletions arch/frv/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
#define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
#define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))

static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
Expand All @@ -253,7 +253,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
break;
c = old;
}
return c != (u);
return c;
}


Expand Down
4 changes: 2 additions & 2 deletions arch/h8300/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)

#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

static inline int atomic_add_unless(atomic_t *v, int a, int u)
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
Expand All @@ -114,7 +114,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
if (ret != u)
v->counter += a;
local_irq_restore(flags);
return ret != u;
return ret;
}

static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
Expand Down
4 changes: 2 additions & 2 deletions arch/ia64/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
(cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))

static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
Expand All @@ -102,7 +102,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
break;
c = old;
}
return c != (u);
return c;
}


Expand Down
8 changes: 4 additions & 4 deletions arch/m32r/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -239,15 +239,15 @@ static __inline__ int atomic_dec_return(atomic_t *v)
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

/**
* atomic_add_unless - add unless the number is a given value
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
* Returns the old value of @v.
*/
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
Expand All @@ -259,7 +259,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
break;
c = old;
}
return c != (u);
return c;
}


Expand Down
4 changes: 2 additions & 2 deletions arch/m68k/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
__asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
}

static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
Expand All @@ -195,7 +195,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
break;
c = old;
}
return c != (u);
return c;
}


Expand Down
10 changes: 5 additions & 5 deletions arch/mips/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -303,15 +303,15 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))

/**
* atomic_add_unless - add unless the number is a given value
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
* Returns the old value of @v.
*/
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
Expand All @@ -323,7 +323,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
break;
c = old;
}
return c != (u);
return c;
}

#define atomic_dec_return(v) atomic_sub_return(1, (v))
Expand Down Expand Up @@ -679,7 +679,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
* Returns the old value of @v.
*/
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
Expand Down
4 changes: 2 additions & 2 deletions arch/mn10300/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -260,13 +260,13 @@ static inline void atomic_dec(atomic_t *v)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)

#define atomic_add_unless(v, a, u) \
#define __atomic_add_unless(v, a, u) \
({ \
int c, old; \
c = atomic_read(v); \
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
c = old; \
c != (u); \
c; \
})


Expand Down
10 changes: 5 additions & 5 deletions arch/parisc/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -197,15 +197,15 @@ static __inline__ int atomic_read(const atomic_t *v)
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

/**
* atomic_add_unless - add unless the number is a given value
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
* Returns the old value of @v.
*/
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
Expand All @@ -217,7 +217,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
break;
c = old;
}
return c != (u);
return c;
}


Expand Down Expand Up @@ -316,7 +316,7 @@ atomic64_read(const atomic64_t *v)
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
* Returns the old value of @v.
*/
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
Expand Down
Loading

0 comments on commit f24219b

Please sign in to comment.