[PATCH v2 4/7] drm/gpuvm: Add a helper to check if two VA can be merged

From: Caterina Shablia
Date: Wed Jul 02 2025 - 19:38:35 EST


From: Boris Brezillon <boris.brezillon@xxxxxxxxxxxxx>

We are going to add flags/properties that will impact the VA merging
ability. Instead of sprinkling tests all over the place in
__drm_gpuvm_sm_map(), let's add a helper aggregating all these checks
can call it for every existing VA we walk through in the
__drm_gpuvm_sm_map() loop.

Signed-off-by: Boris Brezillon <boris.brezillon@xxxxxxxxxxxxx>
Signed-off-by: Caterina Shablia <caterina.shablia@xxxxxxxxxxxxx>
---
drivers/gpu/drm/drm_gpuvm.c | 47 +++++++++++++++++++++++++++++--------
1 file changed, 37 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index ae201d45e6b8..2df04dfcb6ef 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -2098,12 +2098,48 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
return fn->sm_step_unmap(&op, priv);
}

+static bool can_merge(struct drm_gpuvm *gpuvm, const struct drm_gpuva *a,
+ const struct drm_gpuva *b)
+{
+ /* Only GEM-based mappings can be merged, and they must point to
+ * the same GEM object.
+ */
+ if (a->gem.obj != b->gem.obj || !a->gem.obj)
+ return false;
+
+ /* Let's keep things simple for now and force all flags to match. */
+ if (a->flags != b->flags)
+ return false;
+
+ /* Order VAs for the rest of the checks. */
+ if (a->va.addr > b->va.addr)
+ swap(a, b);
+
+ /* We assume the caller already checked that VAs overlap or are
+ * contiguous.
+ */
+ if (drm_WARN_ON(gpuvm->drm, b->va.addr > a->va.addr + a->va.range))
+ return false;
+
+ /* We intentionally ignore u64 underflows because all we care about
+ * here is whether the VA diff matches the GEM offset diff.
+ */
+ return b->va.addr - a->va.addr == b->gem.offset - a->gem.offset;
+}
+
static int
__drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_ops *ops, void *priv,
const struct drm_gpuvm_map_req *req)
{
struct drm_gpuva *va, *next;
+ struct drm_gpuva reqva = {
+ .va.addr = req->va.addr,
+ .va.range = req->va.range,
+ .gem.offset = req->gem.offset,
+ .gem.obj = req->gem.obj,
+ .flags = req->flags,
+ };
u64 req_end = req->va.addr + req->va.range;
int ret;

@@ -2116,12 +2152,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
u64 addr = va->va.addr;
u64 range = va->va.range;
u64 end = addr + range;
- bool merge = !!va->gem.obj;
+ bool merge = can_merge(gpuvm, va, &reqva);

if (addr == req->va.addr) {
- merge &= obj == req->gem.obj &&
- offset == req->gem.offset;
-
if (end == req_end) {
ret = op_unmap_cb(ops, priv, va, merge);
if (ret)
@@ -2163,8 +2196,6 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
};
struct drm_gpuva_op_unmap u = { .va = va };

- merge &= obj == req->gem.obj &&
- offset + ls_range == req->gem.offset;
u.keep = merge;

if (end == req_end) {
@@ -2196,10 +2227,6 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
break;
}
} else if (addr > req->va.addr) {
- merge &= obj == req->gem.obj &&
- offset == req->gem.offset +
- (addr - req->va.addr);
-
if (end == req_end) {
ret = op_unmap_cb(ops, priv, va, merge);
if (ret)
--
2.47.2