/*Have you thought about odd corner cases:
+ * when a semaphore is modified, we want to retry the series of operations
+ * for anyone that was blocking on that semaphore. This breaks down into
+ * a few different common operations:
+ *
+ * 1) One modification releases one or more waiters for zero.
+ * 2) Many waiters are trying to get a single lock, only one will get it.
+ * 3) Many modifications to the count will succeed.
+ *
SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,Does sorting preserve the behavior?
unsigned, nsops, const struct timespec __user *, timeout)
{
@@ -1129,6 +1306,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
+ struct sem *blocker = NULL;
+ LIST_HEAD(pending);
ns = current->nsproxy->ipc_ns;
@@ -1168,6 +1347,14 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ /*
+ * try_atomic_semop takes all the locks of all the semaphores in
+ * the sops array. We have to make sure we don't deadlock if userland
+ * happens to send them out of order, so we sort them by semnum.
+ */
+ if (nsops> 1)
+ sort(sops, nsops, sizeof(*sops), sembuf_compare, NULL);
+