[PATCH RFC 2/5] ARM: introduce arm_dma_direct

From: Nicolas Saenz Julienne
Date: Mon Oct 14 2019 - 14:32:03 EST


ARM devices might use the arch's custom dma-mapping implementation or
dma-direct/swiotlb depending on how the kernel is built. This is not
good enough as we need to be able to control the device's DMA ops based
on the specific machine configuration.

Centralise control over DMA ops with arm_dma_direct, a global variable
which will be set accordingly during init.

Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@xxxxxxx>
---
arch/arm/include/asm/dma-mapping.h | 3 ++-
arch/arm/include/asm/dma.h | 2 ++
arch/arm/mm/dma-mapping.c | 10 ++--------
arch/arm/mm/init.c | 13 +++++++++++++
4 files changed, 19 insertions(+), 9 deletions(-)

diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index bdd80ddbca34..b19af5c55bee 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -8,6 +8,7 @@
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>

+#include <asm/dma.h>
#include <asm/memory.h>

#include <xen/xen.h>
@@ -18,7 +19,7 @@ extern const struct dma_map_ops arm_coherent_dma_ops;

static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- if (IS_ENABLED(CONFIG_MMU) && !IS_ENABLED(CONFIG_ARM_LPAE))
+ if (IS_ENABLED(CONFIG_MMU) && !arm_dma_direct)
return &arm_dma_ops;
return NULL;
}
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index a81dda65c576..d386719c53cd 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -14,6 +14,8 @@
(PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
#endif

+extern bool arm_dma_direct __ro_after_init;
+
#ifdef CONFIG_ISA_DMA_API
/*
* This is used to support drivers written for the x86 ISA DMA API.
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 13ef9f131975..172eea707cf7 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -27,6 +27,7 @@
#include <linux/sizes.h>
#include <linux/cma.h>

+#include <asm/dma.h>
#include <asm/memory.h>
#include <asm/highmem.h>
#include <asm/cacheflush.h>
@@ -1100,14 +1101,7 @@ int arm_dma_supported(struct device *dev, u64 mask)

static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
{
- /*
- * When CONFIG_ARM_LPAE is set, physical address can extend above
- * 32-bits, which then can't be addressed by devices that only support
- * 32-bit DMA.
- * Use the generic dma-direct / swiotlb ops code in that case, as that
- * handles bounce buffering for us.
- */
- if (IS_ENABLED(CONFIG_ARM_LPAE))
+ if (arm_dma_direct)
return NULL;
return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index b4be3baa83d4..0a63379a4d1a 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -105,8 +105,21 @@ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
}
#endif

+bool arm_dma_direct __ro_after_init;
+EXPORT_SYMBOL(arm_dma_direct);
+
void __init setup_dma_zone(const struct machine_desc *mdesc)
{
+ /*
+ * When CONFIG_ARM_LPAE is set, physical address can extend above
+ * 32-bits, which then can't be addressed by devices that only support
+ * 32-bit DMA.
+ * Use the generic dma-direct / swiotlb ops code in that case, as that
+ * handles bounce buffering for us.
+ */
+ if (IS_ENABLED(CONFIG_ARM_LPAE))
+ arm_dma_direct = true;
+
#ifdef CONFIG_ZONE_DMA
if (mdesc->dma_zone_size) {
arm_dma_zone_size = mdesc->dma_zone_size;
--
2.23.0