[PATCH] arm32: fix flushcache syscall with device address

From: Tian Tao
Date: Tue Apr 21 2020 - 04:09:45 EST


An issue has been observed on our Kungpeng916 systems when using a PCI
express GPU. This occurs when a 32 bit application running on a 64 bit
kernel issues a cache flush operation to a memory address that is in
a PCI BAR of the GPU.The results in an illegal operation and
subsequent crash.

Signed-off-by: Tian Tao <tiantao6@xxxxxxxxxxxxx>
Signed-off-by: Lixin Chen <chenlixin1@xxxxxxxxxx>
Reviewed-by: Jonathan Cameron <jonathan.cameron@xxxxxxxxxx>
---
arch/arm64/kernel/sys_compat.c | 69 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 69 insertions(+)

diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 3c18c24..6c07944 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -15,12 +15,74 @@
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
+#include <linux/hugetlb.h>

#include <asm/cacheflush.h>
#include <asm/system_misc.h>
#include <asm/tlbflush.h>
#include <asm/unistd.h>

+static long __check_pt_cacheable(unsigned long vaddr)
+{
+ struct mm_struct *mm = current->mm;
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pudval_t pudval;
+ pmd_t *pmd;
+ pmdval_t pmdval;
+ pte_t *pte;
+ pteval_t pteval;
+ pgprot_t pgprot;
+
+ spin_lock(&mm->page_table_lock);
+ pgd = pgd_offset(mm, vaddr);
+ if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+ goto no_page;
+
+ p4d = p4d_offset(pgd, vaddr);
+ if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
+ goto no_page;
+
+ pud = pud_offset(p4d, vaddr);
+ if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+ goto no_page;
+ if (pud_huge(*pud)) {
+ pudval = pud_val(*pud);
+ pgprot = __pgprot(pudval);
+ goto out;
+ }
+
+ pmd = pmd_offset(pud, vaddr);
+ if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+ goto no_page;
+ if (pmd_huge(*pmd)) {
+ pmdval = pmd_val(*pmd);
+ pgprot = __pgprot(pmdval);
+ goto out;
+ }
+
+ pte = pte_offset_map(pmd, vaddr);
+ if (!pte_present(*pte) || pte_none(*pte))
+ goto no_page;
+ pteval = pte_val(*pte);
+ pgprot = __pgprot(pteval);
+
+out:
+ pgprot.pgprot &= PTE_ATTRINDX_MASK;
+ if (pgprot.pgprot != PTE_ATTRINDX(MT_NORMAL)) {
+ pr_debug("non-cache page pgprot value=0x%llx.\n",
+ pgprot.pgprot);
+ goto no_page;
+ }
+ spin_unlock(&mm->page_table_lock);
+ return 1;
+
+no_page:
+ spin_unlock(&mm->page_table_lock);
+ return 0;
+}
+
static long
__do_compat_cache_op(unsigned long start, unsigned long end)
{
@@ -32,6 +94,13 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
if (fatal_signal_pending(current))
return 0;

+ /* do not flush page table is non-cacheable */
+ if (!__check_pt_cacheable(start)) {
+ cond_resched();
+ start += chunk;
+ continue;
+ }
+
if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
/*
* The workaround requires an inner-shareable tlbi.
--
2.7.4