Skip to content

Commit

Permalink
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
 "The main changes in this cycle were:

   - a big round of FUTEX_UNLOCK_PI improvements, fixes, cleanups and
     general restructuring

   - lockdep updates such as new checks for lock_downgrade()

   - introduce the new atomic_try_cmpxchg() locking API and use it to
     optimize refcount code generation

   - ... plus misc fixes, updates and cleanups"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (38 commits)
  MAINTAINERS: Add FUTEX SUBSYSTEM
  futex: Clarify mark_wake_futex memory barrier usage
  futex: Fix small (and harmless looking) inconsistencies
  futex: Avoid freeing an active timer
  rtmutex: Plug preempt count leak in rt_mutex_futex_unlock()
  rtmutex: Fix more prio comparisons
  rtmutex: Fix PI chain order integrity
  sched,tracing: Update trace_sched_pi_setprio()
  sched/rtmutex: Refactor rt_mutex_setprio()
  rtmutex: Clean up
  sched/deadline/rtmutex: Dont miss the dl_runtime/dl_period update
  sched/rtmutex/deadline: Fix a PI crash for deadline tasks
  rtmutex: Deboost before waking up the top waiter
  locking/ww-mutex: Limit stress test to 2 seconds
  locking/atomic: Fix atomic_try_cmpxchg() semantics
  lockdep: Fix per-cpu static objects
  futex: Drop hb->lock before enqueueing on the rtmutex
  futex: Futex_unlock_pi() determinism
  futex: Rework futex_lock_pi() to use rt_mutex_*_proxy_lock()
  futex,rt_mutex: Restructure rt_mutex_finish_proxy_lock()
  ...
  • Loading branch information
torvalds committed May 2, 2017
2 parents 3527d3e + 59cd42c commit 207fb8c
Show file tree
Hide file tree
Showing 29 changed files with 1,231 additions and 610 deletions.
17 changes: 17 additions & 0 deletions MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -5415,6 +5415,23 @@ F: fs/fuse/
F: include/uapi/linux/fuse.h
F: Documentation/filesystems/fuse.txt

FUTEX SUBSYSTEM
M: Thomas Gleixner <[email protected]>
M: Ingo Molnar <[email protected]>
R: Peter Zijlstra <[email protected]>
R: Darren Hart <[email protected]>
L: [email protected]
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
S: Maintained
F: kernel/futex.c
F: kernel/futex_compat.c
F: include/asm-generic/futex.h
F: include/linux/futex.h
F: include/uapi/linux/futex.h
F: tools/testing/selftests/futex/
F: tools/perf/bench/futex*
F: Documentation/*futex*

FUTURE DOMAIN TMC-16x0 SCSI DRIVER (16-bit)
M: Rik Faith <[email protected]>
L: [email protected]
Expand Down
33 changes: 15 additions & 18 deletions arch/x86/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,12 @@ static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return cmpxchg(&v->counter, old, new);
}

#define atomic_try_cmpxchg atomic_try_cmpxchg
static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
return try_cmpxchg(&v->counter, old, new);
}

static inline int atomic_xchg(atomic_t *v, int new)
{
return xchg(&v->counter, new);
Expand All @@ -201,16 +207,12 @@ static inline void atomic_##op(int i, atomic_t *v) \
}

#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
int old, val = atomic_read(v); \
for (;;) { \
old = atomic_cmpxchg(v, val, val c_op i); \
if (old == val) \
break; \
val = old; \
} \
return old; \
int val = atomic_read(v); \
do { \
} while (!atomic_try_cmpxchg(v, &val, val c_op i)); \
return val; \
}

#define ATOMIC_OPS(op, c_op) \
Expand All @@ -236,16 +238,11 @@ ATOMIC_OPS(xor, ^)
*/
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
int c = atomic_read(v);
do {
if (unlikely(c == u))
break;
c = old;
}
} while (!atomic_try_cmpxchg(v, &c, c + a));
return c;
}

Expand Down
46 changes: 19 additions & 27 deletions arch/x86/include/asm/atomic64_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,12 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
return cmpxchg(&v->counter, old, new);
}

#define atomic64_try_cmpxchg atomic64_try_cmpxchg
static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, long *old, long new)
{
return try_cmpxchg(&v->counter, old, new);
}

static inline long atomic64_xchg(atomic64_t *v, long new)
{
return xchg(&v->counter, new);
Expand All @@ -192,17 +198,12 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
*/
static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
{
long c, old;
c = atomic64_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic64_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
long c = atomic64_read(v);
do {
if (unlikely(c == u))
return false;
} while (!atomic64_try_cmpxchg(v, &c, c + a));
return true;
}

#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
Expand All @@ -216,17 +217,12 @@ static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
*/
static inline long atomic64_dec_if_positive(atomic64_t *v)
{
long c, old, dec;
c = atomic64_read(v);
for (;;) {
long dec, c = atomic64_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
break;
old = atomic64_cmpxchg((v), c, dec);
if (likely(old == c))
break;
c = old;
}
} while (!atomic64_try_cmpxchg(v, &c, dec));
return dec;
}

Expand All @@ -242,14 +238,10 @@ static inline void atomic64_##op(long i, atomic64_t *v) \
#define ATOMIC64_FETCH_OP(op, c_op) \
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
{ \
long old, val = atomic64_read(v); \
for (;;) { \
old = atomic64_cmpxchg(v, val, val c_op i); \
if (old == val) \
break; \
val = old; \
} \
return old; \
long val = atomic64_read(v); \
do { \
} while (!atomic64_try_cmpxchg(v, &val, val c_op i)); \
return val; \
}

#define ATOMIC64_OPS(op, c_op) \
Expand Down
70 changes: 70 additions & 0 deletions arch/x86/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,76 @@ extern void __add_wrong_size(void)
#define cmpxchg_local(ptr, old, new) \
__cmpxchg_local(ptr, old, new, sizeof(*(ptr)))


#define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock) \
({ \
bool success; \
__typeof__(_ptr) _old = (_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
switch (size) { \
case __X86_CASE_B: \
{ \
volatile u8 *__ptr = (volatile u8 *)(_ptr); \
asm volatile(lock "cmpxchgb %[new], %[ptr]" \
CC_SET(z) \
: CC_OUT(z) (success), \
[ptr] "+m" (*__ptr), \
[old] "+a" (__old) \
: [new] "q" (__new) \
: "memory"); \
break; \
} \
case __X86_CASE_W: \
{ \
volatile u16 *__ptr = (volatile u16 *)(_ptr); \
asm volatile(lock "cmpxchgw %[new], %[ptr]" \
CC_SET(z) \
: CC_OUT(z) (success), \
[ptr] "+m" (*__ptr), \
[old] "+a" (__old) \
: [new] "r" (__new) \
: "memory"); \
break; \
} \
case __X86_CASE_L: \
{ \
volatile u32 *__ptr = (volatile u32 *)(_ptr); \
asm volatile(lock "cmpxchgl %[new], %[ptr]" \
CC_SET(z) \
: CC_OUT(z) (success), \
[ptr] "+m" (*__ptr), \
[old] "+a" (__old) \
: [new] "r" (__new) \
: "memory"); \
break; \
} \
case __X86_CASE_Q: \
{ \
volatile u64 *__ptr = (volatile u64 *)(_ptr); \
asm volatile(lock "cmpxchgq %[new], %[ptr]" \
CC_SET(z) \
: CC_OUT(z) (success), \
[ptr] "+m" (*__ptr), \
[old] "+a" (__old) \
: [new] "r" (__new) \
: "memory"); \
break; \
} \
default: \
__cmpxchg_wrong_size(); \
} \
if (unlikely(!success)) \
*_old = __old; \
likely(success); \
})

#define __try_cmpxchg(ptr, pold, new, size) \
__raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX)

#define try_cmpxchg(ptr, pold, new) \
__try_cmpxchg((ptr), (pold), (new), sizeof(*(ptr)))

/*
* xadd() adds "inc" to "*ptr" and atomically returns the previous
* value of "*ptr".
Expand Down
46 changes: 46 additions & 0 deletions include/linux/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -423,6 +423,29 @@
#endif
#endif /* atomic_cmpxchg_relaxed */

#ifndef atomic_try_cmpxchg

#define __atomic_try_cmpxchg(type, _p, _po, _n) \
({ \
typeof(_po) __po = (_po); \
typeof(*(_po)) __r, __o = *__po; \
__r = atomic_cmpxchg##type((_p), __o, (_n)); \
if (unlikely(__r != __o)) \
*__po = __r; \
likely(__r == __o); \
})

#define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n)
#define atomic_try_cmpxchg_relaxed(_p, _po, _n) __atomic_try_cmpxchg(_relaxed, _p, _po, _n)
#define atomic_try_cmpxchg_acquire(_p, _po, _n) __atomic_try_cmpxchg(_acquire, _p, _po, _n)
#define atomic_try_cmpxchg_release(_p, _po, _n) __atomic_try_cmpxchg(_release, _p, _po, _n)

#else /* atomic_try_cmpxchg */
#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg
#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
#define atomic_try_cmpxchg_release atomic_try_cmpxchg
#endif /* atomic_try_cmpxchg */

/* cmpxchg_relaxed */
#ifndef cmpxchg_relaxed
#define cmpxchg_relaxed cmpxchg
Expand Down Expand Up @@ -996,6 +1019,29 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_cmpxchg_relaxed */

#ifndef atomic64_try_cmpxchg

#define __atomic64_try_cmpxchg(type, _p, _po, _n) \
({ \
typeof(_po) __po = (_po); \
typeof(*(_po)) __r, __o = *__po; \
__r = atomic64_cmpxchg##type((_p), __o, (_n)); \
if (unlikely(__r != __o)) \
*__po = __r; \
likely(__r == __o); \
})

#define atomic64_try_cmpxchg(_p, _po, _n) __atomic64_try_cmpxchg(, _p, _po, _n)
#define atomic64_try_cmpxchg_relaxed(_p, _po, _n) __atomic64_try_cmpxchg(_relaxed, _p, _po, _n)
#define atomic64_try_cmpxchg_acquire(_p, _po, _n) __atomic64_try_cmpxchg(_acquire, _p, _po, _n)
#define atomic64_try_cmpxchg_release(_p, _po, _n) __atomic64_try_cmpxchg(_release, _p, _po, _n)

#else /* atomic64_try_cmpxchg */
#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg
#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
#endif /* atomic64_try_cmpxchg */

#ifndef atomic64_andnot
static inline void atomic64_andnot(long long i, atomic64_t *v)
{
Expand Down
1 change: 1 addition & 0 deletions include/linux/init_task.h
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,7 @@ extern struct cred init_cred;
#ifdef CONFIG_RT_MUTEXES
# define INIT_RT_MUTEXES(tsk) \
.pi_waiters = RB_ROOT, \
.pi_top_task = NULL, \
.pi_waiters_leftmost = NULL,
#else
# define INIT_RT_MUTEXES(tsk)
Expand Down
3 changes: 3 additions & 0 deletions include/linux/lockdep.h
Original file line number Diff line number Diff line change
Expand Up @@ -361,6 +361,8 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
lock_set_class(lock, lock->name, lock->key, subclass, ip);
}

extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);

extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
Expand Down Expand Up @@ -411,6 +413,7 @@ static inline void lockdep_on(void)

# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
# define lock_release(l, n, i) do { } while (0)
# define lock_downgrade(l, i) do { } while (0)
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_set_current_reclaim_state(g) do { } while (0)
Expand Down
6 changes: 6 additions & 0 deletions include/linux/module.h
Original file line number Diff line number Diff line change
Expand Up @@ -493,6 +493,7 @@ static inline int module_is_live(struct module *mod)
struct module *__module_text_address(unsigned long addr);
struct module *__module_address(unsigned long addr);
bool is_module_address(unsigned long addr);
bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr);
bool is_module_percpu_address(unsigned long addr);
bool is_module_text_address(unsigned long addr);

Expand Down Expand Up @@ -660,6 +661,11 @@ static inline bool is_module_percpu_address(unsigned long addr)
return false;
}

static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
{
return false;
}

static inline bool is_module_text_address(unsigned long addr)
{
return false;
Expand Down
1 change: 1 addition & 0 deletions include/linux/percpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
#endif

extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
extern bool is_kernel_percpu_address(unsigned long addr);

#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
Expand Down
19 changes: 19 additions & 0 deletions include/linux/refcount.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,36 @@
#include <linux/spinlock.h>
#include <linux/kernel.h>

/**
* refcount_t - variant of atomic_t specialized for reference counts
* @refs: atomic_t counter field
*
* The counter saturates at UINT_MAX and will not move once
* there. This avoids wrapping the counter and causing 'spurious'
* use-after-free bugs.
*/
typedef struct refcount_struct {
atomic_t refs;
} refcount_t;

#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }

/**
* refcount_set - set a refcount's value
* @r: the refcount
* @n: value to which the refcount will be set
*/
static inline void refcount_set(refcount_t *r, unsigned int n)
{
atomic_set(&r->refs, n);
}

/**
* refcount_read - get a refcount's value
* @r: the refcount
*
* Return: the refcount's value
*/
static inline unsigned int refcount_read(const refcount_t *r)
{
return atomic_read(&r->refs);
Expand Down
2 changes: 2 additions & 0 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -779,6 +779,8 @@ struct task_struct {
/* PI waiters blocked on a rt_mutex held by this task: */
struct rb_root pi_waiters;
struct rb_node *pi_waiters_leftmost;
/* Updated under owner's pi_lock and rq lock */
struct task_struct *pi_top_task;
/* Deadlock detection and priority inheritance handling: */
struct rt_mutex_waiter *pi_blocked_on;
#endif
Expand Down
Loading

0 comments on commit 207fb8c

Please sign in to comment.