[PATCH 4.4 103/105] Revert "x86/mm/pat: Ensure cpa->pfn only contains page frame numbers"

From: Greg Kroah-Hartman
Date: Fri Dec 15 2017 - 05:20:11 EST


4.4-stable review patch. If anyone has any objections, please let me know.

------------------

From: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

This reverts commit 87e2bd898d3a79a8c609f183180adac47879a2a4 which is
commit edc3b9129cecd0f0857112136f5b8b1bc1d45918 upstream.

Turns there was too many other issues with this patch to make it viable
for the stable tree.

Reported-by: Ben Hutchings <ben.hutchings@xxxxxxxxxxxxxxx>
Cc: Matt Fleming <matt@xxxxxxxxxxxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: Ard Biesheuvel <ard.biesheuvel@xxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Brian Gerst <brgerst@xxxxxxxxx>
Cc: Dave Jones <davej@xxxxxxxxxxxxxxxxx>
Cc: Denys Vlasenko <dvlasenk@xxxxxxxxxx>
Cc: H. Peter Anvin <hpa@xxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@xxxxxxxxx>
Cc: Stephen Smalley <sds@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Toshi Kani <toshi.kani@xxxxxx>
Cc: linux-efi@xxxxxxxxxxxxxxx
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: "Ghannam, Yazen" <Yazen.Ghannam@xxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
arch/x86/mm/pageattr.c | 17 +++++++++++------
arch/x86/platform/efi/efi_64.c | 16 ++++++----------
2 files changed, 17 insertions(+), 16 deletions(-)

--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -911,10 +911,15 @@ static void populate_pte(struct cpa_data
pte = pte_offset_kernel(pmd, start);

while (num_pages-- && start < end) {
- set_pte(pte, pfn_pte(cpa->pfn, pgprot));
+
+ /* deal with the NX bit */
+ if (!(pgprot_val(pgprot) & _PAGE_NX))
+ cpa->pfn &= ~_PAGE_NX;
+
+ set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot));

start += PAGE_SIZE;
- cpa->pfn++;
+ cpa->pfn += PAGE_SIZE;
pte++;
}
}
@@ -970,11 +975,11 @@ static int populate_pmd(struct cpa_data

pmd = pmd_offset(pud, start);

- set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
+ set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
massage_pgprot(pmd_pgprot)));

start += PMD_SIZE;
- cpa->pfn += PMD_SIZE >> PAGE_SHIFT;
+ cpa->pfn += PMD_SIZE;
cur_pages += PMD_SIZE >> PAGE_SHIFT;
}

@@ -1043,11 +1048,11 @@ static int populate_pud(struct cpa_data
* Map everything starting from the Gb boundary, possibly with 1G pages
*/
while (end - start >= PUD_SIZE) {
- set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
+ set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
massage_pgprot(pud_pgprot)));

start += PUD_SIZE;
- cpa->pfn += PUD_SIZE >> PAGE_SHIFT;
+ cpa->pfn += PUD_SIZE;
cur_pages += PUD_SIZE >> PAGE_SHIFT;
pud++;
}
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -143,7 +143,7 @@ void efi_sync_low_kernel_mappings(void)

int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
- unsigned long pfn, text;
+ unsigned long text;
struct page *page;
unsigned npages;
pgd_t *pgd;
@@ -160,8 +160,7 @@ int __init efi_setup_page_tables(unsigne
* and ident-map those pages containing the map before calling
* phys_efi_set_virtual_address_map().
*/
- pfn = pa_memmap >> PAGE_SHIFT;
- if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX)) {
+ if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) {
pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
return 1;
}
@@ -186,9 +185,8 @@ int __init efi_setup_page_tables(unsigne

npages = (_end - _text) >> PAGE_SHIFT;
text = __pa(_text);
- pfn = text >> PAGE_SHIFT;

- if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, 0)) {
+ if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) {
pr_err("Failed to map kernel text 1:1\n");
return 1;
}
@@ -206,14 +204,12 @@ void __init efi_cleanup_page_tables(unsi
static void __init __map_region(efi_memory_desc_t *md, u64 va)
{
pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
- unsigned long flags = 0;
- unsigned long pfn;
+ unsigned long pf = 0;

if (!(md->attribute & EFI_MEMORY_WB))
- flags |= _PAGE_PCD;
+ pf |= _PAGE_PCD;

- pfn = md->phys_addr >> PAGE_SHIFT;
- if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
+ if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf))
pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
md->phys_addr, va);
}