[PATCH] bitops: Use volatile in generic atomic bitops.

From: Will Newton
Date: Wed Jul 27 2011 - 08:21:25 EST


The generic atomic bitops currently explicitly cast away the
volatile from the pointer passed to them. This will allow the
access to the bitfield to happen outside of the critical section
thus making the bitops no longer interrupt-safe. Remove this cast
and add a volatile keyword to make sure all accesses to the
bitfield happen inside the critical section.

Signed-off-by: Will Newton <will.newton@xxxxxxxxxx>
---
include/asm-generic/bitops/atomic.h | 12 ++++++------
1 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/include/asm-generic/bitops/atomic.h
b/include/asm-generic/bitops/atomic.h
index ecc44a8..57e4b1f 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -65,7 +65,7 @@ extern arch_spinlock_t
__atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
static inline void set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ volatile unsigned long *p = addr + BIT_WORD(nr);
unsigned long flags;

_atomic_spin_lock_irqsave(p, flags);
@@ -86,7 +86,7 @@ static inline void set_bit(int nr, volatile unsigned
long *addr)
static inline void clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ volatile unsigned long *p = addr + BIT_WORD(nr);
unsigned long flags;

_atomic_spin_lock_irqsave(p, flags);
@@ -107,7 +107,7 @@ static inline void clear_bit(int nr, volatile
unsigned long *addr)
static inline void change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ volatile unsigned long *p = addr + BIT_WORD(nr);
unsigned long flags;

_atomic_spin_lock_irqsave(p, flags);
@@ -127,7 +127,7 @@ static inline void change_bit(int nr, volatile
unsigned long *addr)
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ volatile unsigned long *p = addr + BIT_WORD(nr);
unsigned long old;
unsigned long flags;

@@ -151,7 +151,7 @@ static inline int test_and_set_bit(int nr,
volatile unsigned long *addr)
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ volatile unsigned long *p = addr + BIT_WORD(nr);
unsigned long old;
unsigned long flags;

@@ -174,7 +174,7 @@ static inline int test_and_clear_bit(int nr,
volatile unsigned long *addr)
static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ volatile unsigned long *p = addr + BIT_WORD(nr);
unsigned long old;
unsigned long flags;

--
1.7.3.4
From 62ff1302350d88fce1f7c30727046aea268fd989 Mon Sep 17 00:00:00 2001
From: Will Newton <will.newton@xxxxxxxxxx>
Date: Wed, 27 Jul 2011 11:40:29 +0100
Subject: [PATCH] bitops: Use volatile in generic atomic bitops.

The generic atomic bitops currently explicitly cast away the
volatile from the pointer passed to them. This will allow the
access to the bitfield to happen outside of the critical section
thus making the bitops no longer interrupt-safe. Remove this cast
and add a volatile keyword to make sure all accesses to the
bitfield happen inside the critical section.

Signed-off-by: Will Newton <will.newton@xxxxxxxxxx>
---
include/asm-generic/bitops/atomic.h | 12 ++++++------
1 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index ecc44a8..57e4b1f 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -65,7 +65,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
static inline void set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ volatile unsigned long *p = addr + BIT_WORD(nr);
unsigned long flags;

_atomic_spin_lock_irqsave(p, flags);
@@ -86,7 +86,7 @@ static inline void set_bit(int nr, volatile unsigned long *addr)
static inline void clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ volatile unsigned long *p = addr + BIT_WORD(nr);
unsigned long flags;

_atomic_spin_lock_irqsave(p, flags);
@@ -107,7 +107,7 @@ static inline void clear_bit(int nr, volatile unsigned long *addr)
static inline void change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ volatile unsigned long *p = addr + BIT_WORD(nr);
unsigned long flags;

_atomic_spin_lock_irqsave(p, flags);
@@ -127,7 +127,7 @@ static inline void change_bit(int nr, volatile unsigned long *addr)
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ volatile unsigned long *p = addr + BIT_WORD(nr);
unsigned long old;
unsigned long flags;

@@ -151,7 +151,7 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ volatile unsigned long *p = addr + BIT_WORD(nr);
unsigned long old;
unsigned long flags;

@@ -174,7 +174,7 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ volatile unsigned long *p = addr + BIT_WORD(nr);
unsigned long old;
unsigned long flags;

--
1.7.3.4