]> git.itanic.dy.fi Git - linux-stable/commitdiff
locking/rwsem: Add ACQUIRE comments
authorPeter Zijlstra <peterz@infradead.org>
Thu, 18 Jul 2019 13:08:53 +0000 (15:08 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 25 Jul 2019 13:39:25 +0000 (15:39 +0200)
Since we just reviewed read_slowpath for ACQUIRE correctness, add a
few coments to retain our findings.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/rwsem.c

index 571938887cc884d2afe9ab9d110354b73ad19aa4..bd0f0d05724ca943649788f5a72bd1c36f0bb567 100644 (file)
@@ -1004,6 +1004,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
        atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
        adjustment = 0;
        if (rwsem_optimistic_spin(sem, false)) {
+               /* rwsem_optimistic_spin() implies ACQUIRE on success */
                /*
                 * Wake up other readers in the wait list if the front
                 * waiter is a reader.
@@ -1018,6 +1019,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
                }
                return sem;
        } else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
+               /* rwsem_reader_phase_trylock() implies ACQUIRE on success */
                return sem;
        }
 
@@ -1071,10 +1073,10 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
        wake_up_q(&wake_q);
 
        /* wait to be given the lock */
-       while (true) {
+       for (;;) {
                set_current_state(state);
                if (!smp_load_acquire(&waiter.task)) {
-                       /* Orders against rwsem_mark_wake()'s smp_store_release() */
+                       /* Matches rwsem_mark_wake()'s smp_store_release(). */
                        break;
                }
                if (signal_pending_state(state, current)) {
@@ -1082,6 +1084,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
                        if (waiter.task)
                                goto out_nolock;
                        raw_spin_unlock_irq(&sem->wait_lock);
+                       /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
                        break;
                }
                schedule();
@@ -1091,6 +1094,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
        __set_current_state(TASK_RUNNING);
        lockevent_inc(rwsem_rlock);
        return sem;
+
 out_nolock:
        list_del(&waiter.list);
        if (list_empty(&sem->wait_list)) {
@@ -1131,8 +1135,10 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
 
        /* do optimistic spinning and steal lock if possible */
        if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
-           rwsem_optimistic_spin(sem, true))
+           rwsem_optimistic_spin(sem, true)) {
+               /* rwsem_optimistic_spin() implies ACQUIRE on success */
                return sem;
+       }
 
        /*
         * Disable reader optimistic spinning for this rwsem after
@@ -1192,9 +1198,11 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
 wait:
        /* wait until we successfully acquire the lock */
        set_current_state(state);
-       while (true) {
-               if (rwsem_try_write_lock(sem, wstate))
+       for (;;) {
+               if (rwsem_try_write_lock(sem, wstate)) {
+                       /* rwsem_try_write_lock() implies ACQUIRE on success */
                        break;
+               }
 
                raw_spin_unlock_irq(&sem->wait_lock);