[PATCH 4/8] net: af_netlink: deadlock

From: Gautham R Shenoy
Date: Tue Apr 29 2008 - 09:01:46 EST


Subject: net: af_netlink: deadlock

From: Gautham R Shenoy <ego@xxxxxxxxxx>

=========================================================
[ INFO: possible irq lock inversion dependency detected ]
2.6.25-rc3 #61
---------------------------------------------------------
swapper/0 just changed the state of lock:
(&tb->tb6_lock){-+..}, at: [<c049a5ab>] __ip6_ins_rt+0x1e/0x3c
but this lock took another, soft-read-irq-unsafe lock in the past:
(nl_table_lock){..-?}

and interrupts could create inverse lock ordering between them.

Full lockdep report at
http://gautham.shenoy.googlepages.com/nl_table_lock_irq_inversion.log

This patch fixes the problem by making the read acquire of nl_table_lock
softirq safe.

Signed-off-by: Gautham R Shenoy <ego@xxxxxxxxxx>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
Cc: Eric Dumazet <dada1@xxxxxxxxxxxxx>
Cc: David S. Miller <davem@xxxxxxxxxxxxx>
---

net/netlink/af_netlink.c | 16 ++++++++--------
1 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1ab0da2..7ade317 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -205,9 +205,9 @@ netlink_lock_table(void)
{
/* read_lock() synchronizes us to netlink_table_grab */

- read_lock(&nl_table_lock);
+ read_lock_bh(&nl_table_lock);
atomic_inc(&nl_table_users);
- read_unlock(&nl_table_lock);
+ read_unlock_bh(&nl_table_lock);
}

static inline void
@@ -225,7 +225,7 @@ static inline struct sock *netlink_lookup(struct net *net, int protocol,
struct sock *sk;
struct hlist_node *node;

- read_lock(&nl_table_lock);
+ read_lock_bh(&nl_table_lock);
head = nl_pid_hashfn(hash, pid);
sk_for_each(sk, node, head) {
if ((sk->sk_net == net) && (nlk_sk(sk)->pid == pid)) {
@@ -235,7 +235,7 @@ static inline struct sock *netlink_lookup(struct net *net, int protocol,
}
sk = NULL;
found:
- read_unlock(&nl_table_lock);
+ read_unlock_bh(&nl_table_lock);
return sk;
}

@@ -1078,12 +1078,12 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
info.group = group;
info.code = code;

- read_lock(&nl_table_lock);
+ read_lock_bh(&nl_table_lock);

sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
do_one_set_err(sk, &info);

- read_unlock(&nl_table_lock);
+ read_unlock_bh(&nl_table_lock);
}

/* must be called with netlink table grabbed */
@@ -1776,7 +1776,7 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(nl_table_lock)
{
- read_lock(&nl_table_lock);
+ read_lock_bh(&nl_table_lock);
return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}

@@ -1825,7 +1825,7 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void netlink_seq_stop(struct seq_file *seq, void *v)
__releases(nl_table_lock)
{
- read_unlock(&nl_table_lock);
+ read_unlock_bh(&nl_table_lock);
}


--
Thanks and Regards
gautham
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/