On Wed, 25 Jan 2023 19:36:28 -0500 Waiman Long <longman@xxxxxxxxxx>
static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)Nit, once waiter is signaled, the address of waiter on stack could be destructed,
{
- unsigned long flags;
DEFINE_WAKE_Q(wake_q);
+ unsigned long flags;
+ unsigned long count;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- if (!list_empty(&sem->wait_list))
- rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
+ if (list_empty(&sem->wait_list))
+ goto unlock_out;
+
+ /*
+ * If the rwsem is free and handoff flag is set with wait_lock held,
+ * no other CPUs can take an active lock.
+ */
+ count = atomic_long_read(&sem->count);
+ if (!(count & RWSEM_LOCK_MASK) && (count & RWSEM_FLAG_HANDOFF)) {
+ /*
+ * Since rwsem_mark_wake() will handle the handoff to reader
+ * properly, we don't need to do anything extra for reader.
+ * Special handoff processing will only be needed for writer.
+ */
+ struct rwsem_waiter *waiter = rwsem_first_waiter(sem);
+ long adj = RWSEM_WRITER_LOCKED - RWSEM_FLAG_HANDOFF;
+
+ if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
+ atomic_long_set(&sem->owner, (long)waiter->task);
+ atomic_long_add(adj, &sem->count);
+ wake_q_add(&wake_q, waiter->task);
+ rwsem_del_waiter(sem, waiter);
+ waiter->task = NULL; /* Signal the handoff */
so use smp_store_release() instead.
+ goto unlock_out;
+ }
+ }
+ rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
+unlock_out:
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
wake_up_q(&wake_q);
--
2.31.1