Re: [patch] race-fix for bottom-half-functions [Re: Subtle trouble in remove_bh().]

Andrea Arcangeli (andrea@e-mind.com)
Wed, 20 Jan 1999 07:50:00 +0100 (CET)


On Fri, 15 Jan 1999, Andrea Arcangeli wrote:

> Agreed, it will make the code more robust with a tiny overhead. I repost
> here the _whole_ fix for the bottom-half-function SMP races. I think this
> should go in fast to avoid maintainers of other archs to fix things two
> times ;)

Woops I did a little mistake in it. Excuse me. When we do remove_bh() we
must not wmb() but synchronize_bh() instead, because there could be a bh
running that has just passed the mask_bh() check. The wmb() is still
needed in the init_bh() case though ;)

Here a fixed patch rediffed against clean pre8in-testing:

Index: linux/include/asm-i386/softirq.h
diff -u linux/include/asm-i386/softirq.h:1.1.1.1 linux/include/asm-i386/softirq.h:1.1.2.2
--- linux/include/asm-i386/softirq.h:1.1.1.1 Mon Jan 18 02:27:17 1999
+++ linux/include/asm-i386/softirq.h Wed Jan 20 07:41:42 1999
@@ -9,24 +9,6 @@
#define get_active_bhs() (bh_mask & bh_active)
#define clear_active_bhs(x) atomic_clear_mask((x),&bh_active)

-extern inline void init_bh(int nr, void (*routine)(void))
-{
- bh_base[nr] = routine;
- atomic_set(&bh_mask_count[nr], 0);
- bh_mask |= 1 << nr;
-}
-
-extern inline void remove_bh(int nr)
-{
- bh_base[nr] = NULL;
- bh_mask &= ~(1 << nr);
-}
-
-extern inline void mark_bh(int nr)
-{
- set_bit(nr, &bh_active);
-}
-
#ifdef __SMP__

/*
@@ -90,21 +72,49 @@

#endif /* SMP */

+extern inline void init_bh(int nr, void (*routine)(void))
+{
+ bh_base[nr] = routine;
+ bh_mask_count[nr] = 0;
+ wmb();
+ bh_mask |= 1 << nr;
+}
+
+extern inline void remove_bh(int nr)
+{
+ bh_mask &= ~(1 << nr);
+ synchronize_bh();
+ bh_base[nr] = NULL;
+}
+
+extern inline void mark_bh(int nr)
+{
+ set_bit(nr, &bh_active);
+}
+
/*
* These use a mask count to correctly handle
* nested disable/enable calls
*/
extern inline void disable_bh(int nr)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bh_lock, flags);
bh_mask &= ~(1 << nr);
- atomic_inc(&bh_mask_count[nr]);
+ bh_mask_count[nr]++;
+ spin_unlock_irqrestore(&bh_lock, flags);
synchronize_bh();
}

extern inline void enable_bh(int nr)
{
- if (atomic_dec_and_test(&bh_mask_count[nr]))
+ unsigned long flags;
+
+ spin_lock_irqsave(&bh_lock, flags);
+ if (!--bh_mask_count[nr])
bh_mask |= 1 << nr;
+ spin_unlock_irqrestore(&bh_lock, flags);
}

#endif /* __ASM_SOFTIRQ_H */
Index: linux/include/linux/interrupt.h
diff -u linux/include/linux/interrupt.h:1.1.1.1 linux/include/linux/interrupt.h:1.1.2.1
--- linux/include/linux/interrupt.h:1.1.1.1 Mon Jan 18 02:27:09 1999
+++ linux/include/linux/interrupt.h Mon Jan 18 02:32:58 1999
@@ -17,7 +17,8 @@

extern volatile unsigned char bh_running;

-extern atomic_t bh_mask_count[32];
+extern spinlock_t bh_lock;
+extern int bh_mask_count[32];
extern unsigned long bh_active;
extern unsigned long bh_mask;
extern void (*bh_base[32])(void);
Index: linux/kernel/softirq.c
diff -u linux/kernel/softirq.c:1.1.1.1 linux/kernel/softirq.c:1.1.2.1
--- linux/kernel/softirq.c:1.1.1.1 Mon Jan 18 02:27:00 1999
+++ linux/kernel/softirq.c Mon Jan 18 02:32:52 1999
@@ -20,7 +20,8 @@

/* intr_count died a painless death... -DaveM */

-atomic_t bh_mask_count[32];
+spinlock_t bh_lock = SPIN_LOCK_UNLOCKED;
+int bh_mask_count[32];
unsigned long bh_active = 0;
unsigned long bh_mask = 0;
void (*bh_base[32])(void);

Andrea Arcangeli

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/