Skip to content

Commit

Permalink
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
 "The main changes in this cycle were:

   - rwsem micro-optimizations (Davidlohr Bueso)

   - Improve the implementation and optimize the performance of
     percpu-rwsems. (Peter Zijlstra.)

   - Convert all lglock users to better facilities such as percpu-rwsems
     or percpu-spinlocks and remove lglocks. (Peter Zijlstra)

   - Remove the ticket (spin)lock implementation. (Peter Zijlstra)

   - Korean translation of memory-barriers.txt and related fixes to the
     English document. (SeongJae Park)

   - misc fixes and cleanups"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
  x86/cmpxchg, locking/atomics: Remove superfluous definitions
  x86, locking/spinlocks: Remove ticket (spin)lock implementation
  locking/lglock: Remove lglock implementation
  stop_machine: Remove stop_cpus_lock and lg_double_lock/unlock()
  fs/locks: Use percpu_down_read_preempt_disable()
  locking/percpu-rwsem: Add down_read_preempt_disable()
  fs/locks: Replace lg_local with a per-cpu spinlock
  fs/locks: Replace lg_global with a percpu-rwsem
  locking/percpu-rwsem: Add DEFINE_STATIC_PERCPU_RWSEMand percpu_rwsem_assert_held()
  locking/pv-qspinlock: Use cmpxchg_release() in __pv_queued_spin_unlock()
  locking/rwsem, x86: Drop a bogus cc clobber
  futex: Add some more function commentry
  locking/hung_task: Show all locks
  locking/rwsem: Scan the wait_list for readers only once
  locking/rwsem: Remove a few useless comments
  locking/rwsem: Return void in __rwsem_mark_wake()
  locking, rcu, cgroup: Avoid synchronize_sched() in __cgroup_procs_write()
  locking/Documentation: Add Korean translation
  locking/Documentation: Fix a typo of example result
  locking/Documentation: Fix wrong section reference
  ...
  • Loading branch information
torvalds committed Oct 3, 2016
2 parents de956b8 + 0864507 commit 00bcf5c
Show file tree
Hide file tree
Showing 31 changed files with 3,540 additions and 1,337 deletions.
3,135 changes: 3,135 additions & 0 deletions Documentation/ko_KR/memory-barriers.txt

Large diffs are not rendered by default.

166 changes: 0 additions & 166 deletions Documentation/locking/lglock.txt

This file was deleted.

5 changes: 3 additions & 2 deletions Documentation/memory-barriers.txt
Original file line number Diff line number Diff line change
Expand Up @@ -609,7 +609,7 @@ A data-dependency barrier must also order against dependent writes:
The data-dependency barrier must order the read into Q with the store
into *Q. This prohibits this outcome:

(Q == B) && (B == 4)
(Q == &B) && (B == 4)

Please note that this pattern should be rare. After all, the whole point
of dependency ordering is to -prevent- writes to the data structure, along
Expand Down Expand Up @@ -1928,6 +1928,7 @@ There are some more advanced barrier functions:

See Documentation/DMA-API.txt for more information on consistent memory.


MMIO WRITE BARRIER
------------------

Expand Down Expand Up @@ -2075,7 +2076,7 @@ systems, and so cannot be counted on in such a situation to actually achieve
anything at all - especially with respect to I/O accesses - unless combined
with interrupt disabling operations.

See also the section on "Inter-CPU locking barrier effects".
See also the section on "Inter-CPU acquiring barrier effects".


As an example, consider the following:
Expand Down
3 changes: 1 addition & 2 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -705,7 +705,6 @@ config PARAVIRT_DEBUG
config PARAVIRT_SPINLOCKS
bool "Paravirtualization layer for spinlocks"
depends on PARAVIRT && SMP
select UNINLINE_SPIN_UNLOCK if !QUEUED_SPINLOCKS
---help---
Paravirtualized spinlocks allow a pvops backend to replace the
spinlock implementation with something virtualization-friendly
Expand All @@ -718,7 +717,7 @@ config PARAVIRT_SPINLOCKS

config QUEUED_LOCK_STAT
bool "Paravirt queued spinlock statistics"
depends on PARAVIRT_SPINLOCKS && DEBUG_FS && QUEUED_SPINLOCKS
depends on PARAVIRT_SPINLOCKS && DEBUG_FS
---help---
Enable the collection of statistical data on the slowpath
behavior of paravirtualized queued spinlocks and report
Expand Down
44 changes: 0 additions & 44 deletions arch/x86/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -158,53 +158,9 @@ extern void __add_wrong_size(void)
* value of "*ptr".
*
* xadd() is locked when multiple CPUs are online
* xadd_sync() is always locked
* xadd_local() is never locked
*/
#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")

#define __add(ptr, inc, lock) \
({ \
__typeof__ (*(ptr)) __ret = (inc); \
switch (sizeof(*(ptr))) { \
case __X86_CASE_B: \
asm volatile (lock "addb %b1, %0\n" \
: "+m" (*(ptr)) : "qi" (inc) \
: "memory", "cc"); \
break; \
case __X86_CASE_W: \
asm volatile (lock "addw %w1, %0\n" \
: "+m" (*(ptr)) : "ri" (inc) \
: "memory", "cc"); \
break; \
case __X86_CASE_L: \
asm volatile (lock "addl %1, %0\n" \
: "+m" (*(ptr)) : "ri" (inc) \
: "memory", "cc"); \
break; \
case __X86_CASE_Q: \
asm volatile (lock "addq %1, %0\n" \
: "+m" (*(ptr)) : "ri" (inc) \
: "memory", "cc"); \
break; \
default: \
__add_wrong_size(); \
} \
__ret; \
})

/*
* add_*() adds "inc" to "*ptr"
*
* __add() takes a lock prefix
* add_smp() is locked when multiple CPUs are online
* add_sync() is always locked
*/
#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")

#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
({ \
Expand Down
18 changes: 0 additions & 18 deletions arch/x86/include/asm/paravirt.h
Original file line number Diff line number Diff line change
Expand Up @@ -661,8 +661,6 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,

#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)

#ifdef CONFIG_QUEUED_SPINLOCKS

static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
u32 val)
{
Expand All @@ -684,22 +682,6 @@ static __always_inline void pv_kick(int cpu)
PVOP_VCALL1(pv_lock_ops.kick, cpu);
}

#else /* !CONFIG_QUEUED_SPINLOCKS */

static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
__ticket_t ticket)
{
PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
}

static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
__ticket_t ticket)
{
PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
}

#endif /* CONFIG_QUEUED_SPINLOCKS */

#endif /* SMP && PARAVIRT_SPINLOCKS */

#ifdef CONFIG_X86_32
Expand Down
7 changes: 0 additions & 7 deletions arch/x86/include/asm/paravirt_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -301,23 +301,16 @@ struct pv_mmu_ops {
struct arch_spinlock;
#ifdef CONFIG_SMP
#include <asm/spinlock_types.h>
#else
typedef u16 __ticket_t;
#endif

struct qspinlock;

struct pv_lock_ops {
#ifdef CONFIG_QUEUED_SPINLOCKS
void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
struct paravirt_callee_save queued_spin_unlock;

void (*wait)(u8 *ptr, u8 val);
void (*kick)(int cpu);
#else /* !CONFIG_QUEUED_SPINLOCKS */
struct paravirt_callee_save lock_spinning;
void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
#endif /* !CONFIG_QUEUED_SPINLOCKS */
};

/* This contains all the paravirt structures: we get a convenient
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/rwsem.h
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ static inline bool __down_write_trylock(struct rw_semaphore *sem)
: "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1),
CC_OUT(e) (result)
: "er" (RWSEM_ACTIVE_WRITE_BIAS)
: "memory", "cc");
: "memory");
return result;
}

Expand Down
Loading

0 comments on commit 00bcf5c

Please sign in to comment.