Re: 2.6.15-rt17

From: Esben Nielsen
Date: Sat Feb 25 2006 - 17:23:20 EST


On Sat, 25 Feb 2006, Esben Nielsen wrote:

> On Sat, 25 Feb 2006, Steven Rostedt wrote:
> [...]
> This time around wake_up_process_mutex() wasn't called when it ought to
> be... Now that I think about it there still is a problem with the
> patch I sent: First the priority is set down, then the task is woken up.
> But then it can't continue to de-boost the next task... Let me write a
> test with 4 tasks and 3 locks to demonstrate.
>
Ok, attached is the test and a better patch which solves the problem.

Esben

>
> > > I have attached the patch againt 2.6.17-rt17 (and therefore also
> > > included the previous patch) along with the updated tester and tests.
> > >
> > > Esben
> >
> > I'll take a look at this tomorrow.
> >
> > -- Steve
> >
> > >
> > >
> > > > > That was why I had _reversed_ the lock ordering relative to normal in the
> > > > > original patch: First lock task->pi_lock. Assign lock. Lock
> > > > > lock->wait_lock. Then unlock task->pi_lock. Now it is safe to refer to
> > > > > lock. To avoid deadlocks I used _raw_spin_trylock() when locking the 2.
> > > > > lock.
> > > >
> > > > Stupid me. I messed that one up. Should show up in the next -rt
> > > >
> > > > Thanks
> > > >
> > > > tglx
> > > >
> > > >
> > >
> > >
> > -
> > To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> > the body of a message to majordomo@xxxxxxxxxxxxxxx
> > More majordomo info at http://vger.kernel.org/majordomo-info.html
> > Please read the FAQ at http://www.tux.org/lkml/
> >
>
> -
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@xxxxxxxxxxxxxxx
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
>
threads: 4 3 2 1
lockintr 1 + + +
+ lockintr 2 + +
+ + lockintr 3 +
+ lockintr 3 + +
lockintr 2 + + +
test: prio 4 prio 3 prio 2 prio 1
test: - - + +
+ + + lockintr 1
test: prio 1 prio 1 prio 1 prio 1
test: - - + -

+ + signal 4 +
test: - - + +
test: prio 4 prio 3 prio 2 prio 1

--- linux-2.6.15-rt17/kernel/rt.c.orig 2006-02-22 16:53:44.000000000 +0100
+++ linux-2.6.15-rt17/kernel/rt.c 2006-02-25 23:20:34.000000000 +0100
@@ -873,10 +873,19 @@ pid_t get_blocked_on(task_t *task)
}

lock = task->blocked_on->lock;
+
+ /*
+ * Now we have to take lock->wait_lock _before_ releasing
+ * task->pi_lock. Otherwise lock can be deallocated while we are
+ * refering to it as the subsystem has no way of knowing about us
+ * hanging around in here.
+ */
+ if (!_raw_spin_trylock(&lock->wait_lock)) {
+ _raw_spin_unlock(&task->pi_lock);
+ goto try_again;
+ }
_raw_spin_unlock(&task->pi_lock);

- if (!_raw_spin_trylock(&lock->wait_lock))
- goto try_again;

owner = lock_owner(lock);
if (owner)
@@ -964,14 +973,52 @@ static inline int calc_pi_prio(task_t *t
}

/*
- * Adjust priority of a task
+ * Adjust priority of a task.
*/
-static void adjust_prio(task_t *task)
+static void adjust_prio_no_wakeup(task_t *task)
{
int prio = calc_pi_prio(task);

- if (task->prio != prio)
+ if (task->prio != prio) {
+ mutex_setprio(task, prio);
+ }
+}
+
+/*
+ * Adjust priority of a task and wake it up if the prio is changed
+ * and it is blocked on a mutex
+ */
+static void adjust_prio_wakeup(task_t *task)
+{
+ int prio = calc_pi_prio(task);
+
+ if (task->prio < prio) {
+ if (task->blocked_on) {
+ /*
+ * The owner will have the blocked field set if it is
+ * blocked on a lock. So in this case we want to wake
+ * the owner up so it can boost who it is blocked on.
+ *
+ * We have to wait with lowering it's priority until this is done
+ * or we risk letting other high priority task hang around.
+ */
+ wake_up_process_mutex(task);
+ }
+ else {
+ mutex_setprio(task, prio);
+ }
+ }
+ else if (task->prio > prio) {
mutex_setprio(task, prio);
+ if (task->blocked_on) {
+ /*
+ * The owner will have the blocked field set if it is
+ * blocked on a lock. So in this case we want to wake
+ * the owner up so it can boost who it is blocked on.
+ */
+ wake_up_process_mutex(task);
+ }
+ }
}

/*
@@ -1001,6 +1048,7 @@ static long task_blocks_on_lock(struct r

/* Enqueue the task into the lock waiter list */
_raw_spin_lock(&current->pi_lock);
+ adjust_prio_no_wakeup(current);
current->blocked_on = waiter;
waiter->lock = lock;
waiter->task = current;
@@ -1034,16 +1082,7 @@ static long task_blocks_on_lock(struct r

/* Add the new top priority waiter to the owners waiter list */
plist_add(&waiter->pi_list, &owner->pi_waiters);
- adjust_prio(owner);
-
- /*
- * The owner will have the blocked field set if it is
- * blocked on a lock. So in this case we want to wake
- * the owner up so it can boost who it is blocked on.
- */
- if (owner->blocked_on)
- wake_up_process_mutex(owner);
-
+ adjust_prio_wakeup(owner);
_raw_spin_unlock(&owner->pi_lock);
return ret;
}
@@ -1139,7 +1178,7 @@ static void remove_waiter(struct rt_mute
next = lock_first_waiter(lock);
plist_add(&next->pi_list, &owner->pi_waiters);
}
- adjust_prio(owner);
+ adjust_prio_wakeup(owner);
_raw_spin_unlock(&owner->pi_lock);
}
}
@@ -1201,7 +1240,7 @@ static void release_lock(struct rt_mutex

/* Readjust priority, when necessary. */
_raw_spin_lock(&current->pi_lock);
- adjust_prio(current);
+ adjust_prio_no_wakeup(current);
_raw_spin_unlock(&current->pi_lock);
}

@@ -1453,7 +1492,7 @@ static int __sched down_rtsem(struct rt_
* PI boost has to go
*/
_raw_spin_lock(&current->pi_lock);
- adjust_prio(current);
+ adjust_prio_no_wakeup(current);
_raw_spin_unlock(&current->pi_lock);
}
trace_unlock_irqrestore(&tracelock, flags);