[PATCH 4.9 129/165] x86/mce/amd: Publish the bank pointer only after setup has succeeded

From: Greg Kroah-Hartman
Date: Thu Feb 27 2020 - 08:52:12 EST


From: Borislav Petkov <bp@xxxxxxx>

commit 6e5cf31fbe651bed7ba1df768f2e123531132417 upstream.

threshold_create_bank() creates a bank descriptor per MCA error
thresholding counter which can be controlled over sysfs. It publishes
the pointer to that bank in a per-CPU variable and then goes on to
create additional thresholding blocks if the bank has such.

However, that creation of additional blocks in
allocate_threshold_blocks() can fail, leading to a use-after-free
through the per-CPU pointer.

Therefore, publish that pointer only after all blocks have been setup
successfully.

Fixes: 019f34fccfd5 ("x86, MCE, AMD: Move shared bank to node descriptor")
Reported-by: Saar Amar <Saar.Amar@xxxxxxxxxxxxx>
Reported-by: Dan Carpenter <dan.carpenter@xxxxxxxxxx>
Signed-off-by: Borislav Petkov <bp@xxxxxxx>
Cc: <stable@xxxxxxxxxxxxxxx>
Link: http://lkml.kernel.org/r/20200128140846.phctkvx5btiexvbx@xxxxxxxxxxxxx
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
arch/x86/kernel/cpu/mcheck/mce_amd.c | 33 ++++++++++++++++-----------------
1 file changed, 16 insertions(+), 17 deletions(-)

--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -879,8 +879,9 @@ static const char *get_name(unsigned int
return buf_mcatype;
}

-static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
- unsigned int block, u32 address)
+static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
+ unsigned int bank, unsigned int block,
+ u32 address)
{
struct threshold_block *b = NULL;
u32 low, high;
@@ -924,16 +925,12 @@ static int allocate_threshold_blocks(uns

INIT_LIST_HEAD(&b->miscj);

- if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
- list_add(&b->miscj,
- &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
- } else {
- per_cpu(threshold_banks, cpu)[bank]->blocks = b;
- }
+ if (tb->blocks)
+ list_add(&b->miscj, &tb->blocks->miscj);
+ else
+ tb->blocks = b;

- err = kobject_init_and_add(&b->kobj, &threshold_ktype,
- per_cpu(threshold_banks, cpu)[bank]->kobj,
- get_name(bank, b));
+ err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
if (err)
goto out_free;
recurse:
@@ -941,7 +938,7 @@ recurse:
if (!address)
return 0;

- err = allocate_threshold_blocks(cpu, bank, block, address);
+ err = allocate_threshold_blocks(cpu, tb, bank, block, address);
if (err)
goto out_free;

@@ -1026,8 +1023,6 @@ static int threshold_create_bank(unsigne
goto out_free;
}

- per_cpu(threshold_banks, cpu)[bank] = b;
-
if (is_shared_bank(bank)) {
atomic_set(&b->cpus, 1);

@@ -1038,9 +1033,13 @@ static int threshold_create_bank(unsigne
}
}

- err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
- if (!err)
- goto out;
+ err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
+ if (err)
+ goto out_free;
+
+ per_cpu(threshold_banks, cpu)[bank] = b;
+
+ return 0;

out_free:
kfree(b);