[ 058/171 ] ext4: use atomic64_t for the per-flexbg free_clusters count

From: Steven Rostedt
Date: Thu Apr 11 2013 - 16:26:39 EST


3.6.11.2 stable review patch.
If anyone has any objections, please let me know.

------------------

From: Theodore Ts'o <tytso@xxxxxxx>

[ Upstream commit 90ba983f6889e65a3b506b30dc606aa9d1d46cd2 ]

A user who was using a 8TB+ file system and with a very large flexbg
size (> 65536) could cause the atomic_t used in the struct flex_groups
to overflow. This was detected by PaX security patchset:

http://forums.grsecurity.net/viewtopic.php?f=3&t=3289&p=12551#p12551

This bug was introduced in commit 9f24e4208f7e, so it's been around
since 2.6.30. :-(

Fix this by using an atomic64_t for struct orlav_stats's
free_clusters.

Signed-off-by: "Theodore Ts'o" <tytso@xxxxxxx>
Reviewed-by: Lukas Czerner <lczerner@xxxxxxxxxx>
Cc: stable@xxxxxxxxxxxxxxx
Signed-off-by: Steven Rostedt <rostedt@xxxxxxxxxxx>
---
fs/ext4/ext4.h | 6 +++---
fs/ext4/ialloc.c | 4 ++--
fs/ext4/mballoc.c | 12 ++++++------
fs/ext4/super.c | 4 ++--
4 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index b686b43..1b3df8f 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -329,9 +329,9 @@ struct ext4_group_desc
*/

struct flex_groups {
- atomic_t free_inodes;
- atomic_t free_clusters;
- atomic_t used_dirs;
+ atomic64_t free_clusters;
+ atomic_t free_inodes;
+ atomic_t used_dirs;
};

#define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index cc2d77c..c690ff9 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -324,8 +324,8 @@ error_return:
}

struct orlov_stats {
+ __u64 free_clusters;
__u32 free_inodes;
- __u32 free_clusters;
__u32 used_dirs;
};

@@ -342,7 +342,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,

if (flex_size > 1) {
stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
- stats->free_clusters = atomic_read(&flex_group[g].free_clusters);
+ stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
return;
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index b26410c..87b5519 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2820,8 +2820,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi,
ac->ac_b_ex.fe_group);
- atomic_sub(ac->ac_b_ex.fe_len,
- &sbi->s_flex_groups[flex_group].free_clusters);
+ atomic64_sub(ac->ac_b_ex.fe_len,
+ &sbi->s_flex_groups[flex_group].free_clusters);
}

err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
@@ -4670,8 +4670,8 @@ do_more:

if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
- atomic_add(count_clusters,
- &sbi->s_flex_groups[flex_group].free_clusters);
+ atomic64_add(count_clusters,
+ &sbi->s_flex_groups[flex_group].free_clusters);
}

ext4_mb_unload_buddy(&e4b);
@@ -4815,8 +4815,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,

if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
- atomic_add(EXT4_B2C(sbi, blocks_freed),
- &sbi->s_flex_groups[flex_group].free_clusters);
+ atomic64_add(EXT4_B2C(sbi, blocks_freed),
+ &sbi->s_flex_groups[flex_group].free_clusters);
}

ext4_mb_unload_buddy(&e4b);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 2b5fb60..f581886 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1945,8 +1945,8 @@ static int ext4_fill_flex_info(struct super_block *sb)
flex_group = ext4_flex_group(sbi, i);
atomic_add(ext4_free_inodes_count(sb, gdp),
&sbi->s_flex_groups[flex_group].free_inodes);
- atomic_add(ext4_free_group_clusters(sb, gdp),
- &sbi->s_flex_groups[flex_group].free_clusters);
+ atomic64_add(ext4_free_group_clusters(sb, gdp),
+ &sbi->s_flex_groups[flex_group].free_clusters);
atomic_add(ext4_used_dirs_count(sb, gdp),
&sbi->s_flex_groups[flex_group].used_dirs);
}
--
1.7.10.4


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/