Skip to content

Commit

Permalink
rwsem: fully separate code paths to wake writers vs readers
Browse files Browse the repository at this point in the history
This is in preparation for later changes in the series.

In __rwsem_do_wake(), the first queued waiter is checked first in order to
determine whether it's a writer or a reader.  The code paths diverge at
this point.  The code that checks and increments the rwsem active count is
duplicated on both sides - the point is that later changes in the series
will be able to independently modify both sides.

Signed-off-by: Michel Lespinasse <[email protected]>
Acked-by: David Howells <[email protected]>
Cc: Mike Waychison <[email protected]>
Cc: Suleiman Souhlal <[email protected]>
Cc: Ying Han <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
walken-google authored and torvalds committed Aug 10, 2010
1 parent 7721fea commit 345af7b
Showing 1 changed file with 34 additions and 27 deletions.
61 changes: 34 additions & 27 deletions lib/rwsem.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ struct rwsem_waiter {
* - if we come here from up_xxxx(), then:
* - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
* - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
* - there must be someone on the queue
* - there must be someone on the queue
* - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only woken if downgrading is false
Expand All @@ -54,26 +54,23 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
struct list_head *next;
signed long oldcount, woken, loop;

waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
goto readers_only;

if (downgrading)
goto dont_wake_writers;
goto out;

/* if we came through an up_xxxx() call, we only only wake someone up
* if we can transition the active part of the count from 0 -> 1
/* There's a writer at the front of the queue - try to grant it the
* write lock. However, we only wake this writer if we can transition
* the active part of the count from 0 -> 1
*/
try_again:
try_again_write:
oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
- RWSEM_ACTIVE_BIAS;
if (oldcount & RWSEM_ACTIVE_MASK)
goto undo;

waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);

/* try to grant a single write lock if there's a writer at the front
* of the queue - note we leave the 'active part' of the count
* incremented by 1 and the waiting part incremented by 0x00010000
*/
if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
goto readers_only;
/* Someone grabbed the sem already */
goto undo_write;

/* We must be careful not to touch 'waiter' after we set ->task = NULL.
* It is an allocated on the waiter's stack and may become invalid at
Expand All @@ -87,18 +84,24 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
put_task_struct(tsk);
goto out;

/* don't want to wake any writers */
dont_wake_writers:
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
goto out;
readers_only:
if (downgrading)
goto wake_readers;

/* if we came through an up_xxxx() call, we only only wake someone up
* if we can transition the active part of the count from 0 -> 1 */
try_again_read:
oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
- RWSEM_ACTIVE_BIAS;
if (oldcount & RWSEM_ACTIVE_MASK)
/* Someone grabbed the sem already */
goto undo_read;

/* grant an infinite number of read locks to the readers at the front
* of the queue
* - note we increment the 'active part' of the count by the number of
* readers before waking any processes up
wake_readers:
/* Grant an infinite number of read locks to the readers at the front
* of the queue. Note we increment the 'active part' of the count by
* the number of readers before waking any processes up.
*/
readers_only:
woken = 0;
do {
woken++;
Expand Down Expand Up @@ -138,10 +141,14 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)

/* undo the change to the active count, but check for a transition
* 1->0 */
undo:
undo_write:
if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
goto out;
goto try_again_write;
undo_read:
if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
goto out;
goto try_again;
goto try_again_read;
}

/*
Expand Down

0 comments on commit 345af7b

Please sign in to comment.