[PATCH v2 2/4] dmabuf: Add cgroup charge transfer function

From: T.J. Mercier
Date: Mon Jan 23 2023 - 14:18:18 EST


The dma_buf_transfer_charge function provides a way for processes to
transfer charge of a buffer to a different cgroup. This is essential
for the cases where a central allocator process does allocations for
various subsystems, hands over the fd to the client who requested the
memory, and drops all references to the allocated memory.

Signed-off-by: T.J. Mercier <tjmercier@xxxxxxxxxx>
---
drivers/dma-buf/dma-buf.c | 56 ++++++++++++++++++++++++++++++++++++++
include/linux/dma-buf.h | 1 +
include/linux/memcontrol.h | 5 ++++
3 files changed, 62 insertions(+)

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index a6a8cb5cb32d..ac3d02a7ecf8 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -11,6 +11,7 @@
* refining of this idea.
*/

+#include <linux/atomic.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
@@ -1626,6 +1627,61 @@ void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
}
EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF);

+/**
+ * dma_buf_transfer_charge - Change the cgroup to which the provided dma_buf is charged.
+ * @dmabuf_file: [in] file for buffer whose charge will be migrated to a different cgroup
+ * @target: [in] the task_struct of the destination process for the cgroup charge
+ *
+ * Only tasks that belong to the same cgroup the buffer is currently charged to
+ * may call this function, otherwise it will return -EPERM.
+ *
+ * Returns 0 on success, or a negative errno code otherwise.
+ */
+int dma_buf_transfer_charge(struct file *dmabuf_file, struct task_struct *target)
+{
+ struct mem_cgroup *current_cg, *target_cg;
+ struct dma_buf *dmabuf;
+ unsigned int nr_pages;
+ int ret = 0;
+
+ if (!IS_ENABLED(CONFIG_MEMCG))
+ return 0;
+
+ if (WARN_ON(!dmabuf_file) || WARN_ON(!target))
+ return -EINVAL;
+
+ if (!is_dma_buf_file(dmabuf_file))
+ return -EBADF;
+ dmabuf = dmabuf_file->private_data;
+
+ nr_pages = PAGE_ALIGN(dmabuf->size) / PAGE_SIZE;
+ current_cg = mem_cgroup_from_task(current);
+ target_cg = get_mem_cgroup_from_mm(target->mm);
+
+ if (current_cg == target_cg)
+ goto skip_transfer;
+
+ if (!mem_cgroup_charge_dmabuf(target_cg, nr_pages, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto skip_transfer;
+ }
+
+ if (cmpxchg(&dmabuf->memcg, current_cg, target_cg) != current_cg) {
+ /* Only the current owner can transfer the charge */
+ ret = -EPERM;
+ mem_cgroup_uncharge_dmabuf(target_cg, nr_pages);
+ goto skip_transfer;
+ }
+
+ mem_cgroup_uncharge_dmabuf(current_cg, nr_pages);
+ mem_cgroup_put(current_cg); /* unref from buffer - buffer keeps new ref to target_cg */
+ return 0;
+
+skip_transfer:
+ mem_cgroup_put(target_cg);
+ return ret;
+}
+
#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)
{
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 1f0ffb8e4bf5..f25eb8e60fb2 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -634,4 +634,5 @@ int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
+int dma_buf_transfer_charge(struct file *dmabuf_file, struct task_struct *target);
#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index c10b8565fdbf..009298a446fe 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1335,6 +1335,11 @@ struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
return NULL;
}

+static inline struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
+{
+ return NULL;
+}
+
static inline void obj_cgroup_put(struct obj_cgroup *objcg)
{
}
--
2.39.0.246.g2a6d74b583-goog