Re: [PATCHv4 10/11] videobuf2: add begin/end cpu_access callbacks to dma-sg

From: Hans Verkuil
Date: Fri Mar 06 2020 - 09:04:10 EST


On 02/03/2020 05:12, Sergey Senozhatsky wrote:
> Provide begin_cpu_access() and end_cpu_access() dma_buf_ops
> callbacks for cache synchronisation on exported buffers.
>
> V4L2_FLAG_MEMORY_NON_CONSISTENT has no effect on dma-sg buffers.
> dma-sg allocates memory using the page allocator directly, so
> there is no memory consistency guarantee.
>
> Signed-off-by: Sergey Senozhatsky <senozhatsky@xxxxxxxxxxxx>
> ---
> .../media/common/videobuf2/videobuf2-dma-sg.c | 28 +++++++++++++++++++
> 1 file changed, 28 insertions(+)
>
> diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> index 6db60e9d5183..ddc67c9aaedb 100644
> --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> @@ -120,6 +120,12 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
> buf->num_pages = size >> PAGE_SHIFT;
> buf->dma_sgt = &buf->sg_table;
>
> + /*
> + * NOTE: dma-sg allocates memory using the page allocator directly, so
> + * there is no memory consistency guarantee, hence dma-sg ignores DMA
> + * attributes passed from the upper layer. That means that
> + * V4L2_FLAG_MEMORY_NON_CONSISTENT has no effect on dma-sg buffers.
> + */
> buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
> GFP_KERNEL | __GFP_ZERO);
> if (!buf->pages)
> @@ -470,6 +476,26 @@ static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
> vb2_dma_sg_put(dbuf->priv);
> }
>
> +static int vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
> + enum dma_data_direction direction)

I suggest you use this style to avoid checkpatch warnings:

static int
vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
enum dma_data_direction direction)

> +{
> + struct vb2_dma_sg_buf *buf = dbuf->priv;
> + struct sg_table *sgt = buf->dma_sgt;
> +
> + dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
> + return 0;
> +}
> +
> +static int vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
> + enum dma_data_direction direction)

Ditto.

Regards,

Hans

> +{
> + struct vb2_dma_sg_buf *buf = dbuf->priv;
> + struct sg_table *sgt = buf->dma_sgt;
> +
> + dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
> + return 0;
> +}
> +
> static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
> {
> struct vb2_dma_sg_buf *buf = dbuf->priv;
> @@ -488,6 +514,8 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
> .detach = vb2_dma_sg_dmabuf_ops_detach,
> .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
> .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
> + .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
> + .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
> .vmap = vb2_dma_sg_dmabuf_ops_vmap,
> .mmap = vb2_dma_sg_dmabuf_ops_mmap,
> .release = vb2_dma_sg_dmabuf_ops_release,
>