summaryrefslogtreecommitdiffstats
path: root/drivers/media/common
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@s-opensource.com>2017-12-21 08:29:39 -0500
committerMauro Carvalho Chehab <mchehab@s-opensource.com>2017-12-28 11:17:29 -0500
commit03fbdb2fc2b8bb27b0ee0534fd3e9c57cdc3854a (patch)
treecccf7f6813d7279f2d9ad884e33a21128244df23 /drivers/media/common
parent4021053ed52d57a2a1a3ec8355408fb290a23d9f (diff)
media: move videobuf2 to drivers/media/common
Now that VB2 is used by both V4L2 and DVB core, move it to the common part of the subsystem. Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
Diffstat (limited to 'drivers/media/common')
-rw-r--r--drivers/media/common/Kconfig1
-rw-r--r--drivers/media/common/Makefile2
-rw-r--r--drivers/media/common/videobuf/Kconfig31
-rw-r--r--drivers/media/common/videobuf/Makefile7
-rw-r--r--drivers/media/common/videobuf/videobuf2-core.c2607
-rw-r--r--drivers/media/common/videobuf/videobuf2-dma-contig.c787
-rw-r--r--drivers/media/common/videobuf/videobuf2-dma-sg.c669
-rw-r--r--drivers/media/common/videobuf/videobuf2-dvb.c345
-rw-r--r--drivers/media/common/videobuf/videobuf2-memops.c135
-rw-r--r--drivers/media/common/videobuf/videobuf2-v4l2.c966
-rw-r--r--drivers/media/common/videobuf/videobuf2-vmalloc.c452
11 files changed, 6001 insertions, 1 deletions
diff --git a/drivers/media/common/Kconfig b/drivers/media/common/Kconfig
index 326df0ad75c0..cdfc905967dc 100644
--- a/drivers/media/common/Kconfig
+++ b/drivers/media/common/Kconfig
@@ -16,6 +16,7 @@ config CYPRESS_FIRMWARE
tristate "Cypress firmware helper routines"
depends on USB
+source "drivers/media/common/videobuf/Kconfig"
source "drivers/media/common/b2c2/Kconfig"
source "drivers/media/common/saa7146/Kconfig"
source "drivers/media/common/siano/Kconfig"
diff --git a/drivers/media/common/Makefile b/drivers/media/common/Makefile
index 2d1b0a025084..f24b5ed39982 100644
--- a/drivers/media/common/Makefile
+++ b/drivers/media/common/Makefile
@@ -1,4 +1,4 @@
-obj-y += b2c2/ saa7146/ siano/ v4l2-tpg/
+obj-y += b2c2/ saa7146/ siano/ v4l2-tpg/ videobuf/
obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
obj-$(CONFIG_CYPRESS_FIRMWARE) += cypress_firmware.o
diff --git a/drivers/media/common/videobuf/Kconfig b/drivers/media/common/videobuf/Kconfig
new file mode 100644
index 000000000000..5df05250de94
--- /dev/null
+++ b/drivers/media/common/videobuf/Kconfig
@@ -0,0 +1,31 @@
+# Used by drivers that need Videobuf2 modules
+config VIDEOBUF2_CORE
+ select DMA_SHARED_BUFFER
+ tristate
+
+config VIDEOBUF2_MEMOPS
+ tristate
+ select FRAME_VECTOR
+
+config VIDEOBUF2_DMA_CONTIG
+ tristate
+ depends on HAS_DMA
+ select VIDEOBUF2_CORE
+ select VIDEOBUF2_MEMOPS
+ select DMA_SHARED_BUFFER
+
+config VIDEOBUF2_VMALLOC
+ tristate
+ select VIDEOBUF2_CORE
+ select VIDEOBUF2_MEMOPS
+ select DMA_SHARED_BUFFER
+
+config VIDEOBUF2_DMA_SG
+ tristate
+ depends on HAS_DMA
+ select VIDEOBUF2_CORE
+ select VIDEOBUF2_MEMOPS
+
+config VIDEOBUF2_DVB
+ tristate
+ select VIDEOBUF2_CORE
diff --git a/drivers/media/common/videobuf/Makefile b/drivers/media/common/videobuf/Makefile
new file mode 100644
index 000000000000..19de5ccda20b
--- /dev/null
+++ b/drivers/media/common/videobuf/Makefile
@@ -0,0 +1,7 @@
+
+obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-core.o videobuf2-v4l2.o
+obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o
+obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o
+obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o
+obj-$(CONFIG_VIDEOBUF2_DMA_SG) += videobuf2-dma-sg.o
+obj-$(CONFIG_VIDEOBUF2_DVB) += videobuf2-dvb.o
diff --git a/drivers/media/common/videobuf/videobuf2-core.c b/drivers/media/common/videobuf/videobuf2-core.c
new file mode 100644
index 000000000000..a8589d96ef72
--- /dev/null
+++ b/drivers/media/common/videobuf/videobuf2-core.c
@@ -0,0 +1,2607 @@
+/*
+ * videobuf2-core.c - video buffer 2 core framework
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * The vb2_thread implementation was based on code from videobuf-dvb.c:
+ * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+
+#include <media/videobuf2-core.h>
+#include <media/v4l2-mc.h>
+
+#include <trace/events/vb2.h>
+
+static int debug;
+module_param(debug, int, 0644);
+
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (debug >= level) \
+ pr_info("vb2-core: %s: " fmt, __func__, ## arg); \
+ } while (0)
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+
+/*
+ * If advanced debugging is on, then count how often each op is called
+ * successfully, which can either be per-buffer or per-queue.
+ *
+ * This makes it easy to check that the 'init' and 'cleanup'
+ * (and variations thereof) stay balanced.
+ */
+
+#define log_memop(vb, op) \
+ dprintk(2, "call_memop(%p, %d, %s)%s\n", \
+ (vb)->vb2_queue, (vb)->index, #op, \
+ (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
+
+#define call_memop(vb, op, args...) \
+({ \
+ struct vb2_queue *_q = (vb)->vb2_queue; \
+ int err; \
+ \
+ log_memop(vb, op); \
+ err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
+ if (!err) \
+ (vb)->cnt_mem_ ## op++; \
+ err; \
+})
+
+#define call_ptr_memop(vb, op, args...) \
+({ \
+ struct vb2_queue *_q = (vb)->vb2_queue; \
+ void *ptr; \
+ \
+ log_memop(vb, op); \
+ ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
+ if (!IS_ERR_OR_NULL(ptr)) \
+ (vb)->cnt_mem_ ## op++; \
+ ptr; \
+})
+
+#define call_void_memop(vb, op, args...) \
+({ \
+ struct vb2_queue *_q = (vb)->vb2_queue; \
+ \
+ log_memop(vb, op); \
+ if (_q->mem_ops->op) \
+ _q->mem_ops->op(args); \
+ (vb)->cnt_mem_ ## op++; \
+})
+
+#define log_qop(q, op) \
+ dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
+ (q)->ops->op ? "" : " (nop)")
+
+#define call_qop(q, op, args...) \
+({ \
+ int err; \
+ \
+ log_qop(q, op); \
+ err = (q)->ops->op ? (q)->ops->op(args) : 0; \
+ if (!err) \
+ (q)->cnt_ ## op++; \
+ err; \
+})
+
+#define call_void_qop(q, op, args...) \
+({ \
+ log_qop(q, op); \
+ if ((q)->ops->op) \
+ (q)->ops->op(args); \
+ (q)->cnt_ ## op++; \
+})
+
+#define log_vb_qop(vb, op, args...) \
+ dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
+ (vb)->vb2_queue, (vb)->index, #op, \
+ (vb)->vb2_queue->ops->op ? "" : " (nop)")
+
+#define call_vb_qop(vb, op, args...) \
+({ \
+ int err; \
+ \
+ log_vb_qop(vb, op); \
+ err = (vb)->vb2_queue->ops->op ? \
+ (vb)->vb2_queue->ops->op(args) : 0; \
+ if (!err) \
+ (vb)->cnt_ ## op++; \
+ err; \
+})
+
+#define call_void_vb_qop(vb, op, args...) \
+({ \
+ log_vb_qop(vb, op); \
+ if ((vb)->vb2_queue->ops->op) \
+ (vb)->vb2_queue->ops->op(args); \
+ (vb)->cnt_ ## op++; \
+})
+
+#else
+
+#define call_memop(vb, op, args...) \
+ ((vb)->vb2_queue->mem_ops->op ? \
+ (vb)->vb2_queue->mem_ops->op(args) : 0)
+
+#define call_ptr_memop(vb, op, args...) \
+ ((vb)->vb2_queue->mem_ops->op ? \
+ (vb)->vb2_queue->mem_ops->op(args) : NULL)
+
+#define call_void_memop(vb, op, args...) \
+ do { \
+ if ((vb)->vb2_queue->mem_ops->op) \
+ (vb)->vb2_queue->mem_ops->op(args); \
+ } while (0)
+
+#define call_qop(q, op, args...) \
+ ((q)->ops->op ? (q)->ops->op(args) : 0)
+
+#define call_void_qop(q, op, args...) \
+ do { \
+ if ((q)->ops->op) \
+ (q)->ops->op(args); \
+ } while (0)
+
+#define call_vb_qop(vb, op, args...) \
+ ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
+
+#define call_void_vb_qop(vb, op, args...) \
+ do { \
+ if ((vb)->vb2_queue->ops->op) \
+ (vb)->vb2_queue->ops->op(args); \
+ } while (0)
+
+#endif
+
+#define call_bufop(q, op, args...) \
+({ \
+ int ret = 0; \
+ if (q && q->buf_ops && q->buf_ops->op) \
+ ret = q->buf_ops->op(args); \
+ ret; \
+})
+
+#define call_void_bufop(q, op, args...) \
+({ \
+ if (q && q->buf_ops && q->buf_ops->op) \
+ q->buf_ops->op(args); \
+})
+
+static void __vb2_queue_cancel(struct vb2_queue *q);
+static void __enqueue_in_driver(struct vb2_buffer *vb);
+
+/*
+ * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
+ */
+static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ void *mem_priv;
+ int plane;
+ int ret = -ENOMEM;
+
+ /*
+ * Allocate memory for all planes in this buffer
+ * NOTE: mmapped areas should be page aligned
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ unsigned long size = PAGE_ALIGN(vb->planes[plane].length);
+
+ mem_priv = call_ptr_memop(vb, alloc,
+ q->alloc_devs[plane] ? : q->dev,
+ q->dma_attrs, size, q->dma_dir, q->gfp_flags);
+ if (IS_ERR_OR_NULL(mem_priv)) {
+ if (mem_priv)
+ ret = PTR_ERR(mem_priv);
+ goto free;
+ }
+
+ /* Associate allocator private data with this plane */
+ vb->planes[plane].mem_priv = mem_priv;
+ }
+
+ return 0;
+free:
+ /* Free already allocated memory if one of the allocations failed */
+ for (; plane > 0; --plane) {
+ call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
+ vb->planes[plane - 1].mem_priv = NULL;
+ }
+
+ return ret;
+}
+
+/*
+ * __vb2_buf_mem_free() - free memory of the given buffer
+ */
+static void __vb2_buf_mem_free(struct vb2_buffer *vb)
+{
+ unsigned int plane;
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ call_void_memop(vb, put, vb->planes[plane].mem_priv);
+ vb->planes[plane].mem_priv = NULL;
+ dprintk(3, "freed plane %d of buffer %d\n", plane, vb->index);
+ }
+}
+
+/*
+ * __vb2_buf_userptr_put() - release userspace memory associated with
+ * a USERPTR buffer
+ */
+static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
+{
+ unsigned int plane;
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ if (vb->planes[plane].mem_priv)
+ call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
+ vb->planes[plane].mem_priv = NULL;
+ }
+}
+
+/*
+ * __vb2_plane_dmabuf_put() - release memory associated with
+ * a DMABUF shared plane
+ */
+static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
+{
+ if (!p->mem_priv)
+ return;
+
+ if (p->dbuf_mapped)
+ call_void_memop(vb, unmap_dmabuf, p->mem_priv);
+
+ call_void_memop(vb, detach_dmabuf, p->mem_priv);
+ dma_buf_put(p->dbuf);
+ p->mem_priv = NULL;
+ p->dbuf = NULL;
+ p->dbuf_mapped = 0;
+}
+
+/*
+ * __vb2_buf_dmabuf_put() - release memory associated with
+ * a DMABUF shared buffer
+ */
+static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
+{
+ unsigned int plane;
+
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
+}
+
+/*
+ * __setup_offsets() - setup unique offsets ("cookies") for every plane in
+ * the buffer.
+ */
+static void __setup_offsets(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
+ unsigned long off = 0;
+
+ if (vb->index) {
+ struct vb2_buffer *prev = q->bufs[vb->index - 1];
+ struct vb2_plane *p = &prev->planes[prev->num_planes - 1];
+
+ off = PAGE_ALIGN(p->m.offset + p->length);
+ }
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ vb->planes[plane].m.offset = off;
+
+ dprintk(3, "buffer %d, plane %d offset 0x%08lx\n",
+ vb->index, plane, off);
+
+ off += vb->planes[plane].length;
+ off = PAGE_ALIGN(off);
+ }
+}
+
+/*
+ * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
+ * video buffer memory for all buffers/planes on the queue and initializes the
+ * queue
+ *
+ * Returns the number of buffers successfully allocated.
+ */
+static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
+ unsigned int num_buffers, unsigned int num_planes,
+ const unsigned plane_sizes[VB2_MAX_PLANES])
+{
+ unsigned int buffer, plane;
+ struct vb2_buffer *vb;
+ int ret;
+
+ for (buffer = 0; buffer < num_buffers; ++buffer) {
+ /* Allocate videobuf buffer structures */
+ vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
+ if (!vb) {
+ dprintk(1, "memory alloc for buffer struct failed\n");
+ break;
+ }
+
+ vb->state = VB2_BUF_STATE_DEQUEUED;
+ vb->vb2_queue = q;
+ vb->num_planes = num_planes;
+ vb->index = q->num_buffers + buffer;
+ vb->type = q->type;
+ vb->memory = memory;
+ for (plane = 0; plane < num_planes; ++plane) {
+ vb->planes[plane].length = plane_sizes[plane];
+ vb->planes[plane].min_length = plane_sizes[plane];
+ }
+ q->bufs[vb->index] = vb;
+
+ /* Allocate video buffer memory for the MMAP type */
+ if (memory == VB2_MEMORY_MMAP) {
+ ret = __vb2_buf_mem_alloc(vb);
+ if (ret) {
+ dprintk(1, "failed allocating memory for buffer %d\n",
+ buffer);
+ q->bufs[vb->index] = NULL;
+ kfree(vb);
+ break;
+ }
+ __setup_offsets(vb);
+ /*
+ * Call the driver-provided buffer initialization
+ * callback, if given. An error in initialization
+ * results in queue setup failure.
+ */
+ ret = call_vb_qop(vb, buf_init, vb);
+ if (ret) {
+ dprintk(1, "buffer %d %p initialization failed\n",
+ buffer, vb);
+ __vb2_buf_mem_free(vb);
+ q->bufs[vb->index] = NULL;
+ kfree(vb);
+ break;
+ }
+ }
+ }
+
+ dprintk(1, "allocated %d buffers, %d plane(s) each\n",
+ buffer, num_planes);
+
+ return buffer;
+}
+
+/*
+ * __vb2_free_mem() - release all video buffer memory for a given queue
+ */
+static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
+{
+ unsigned int buffer;
+ struct vb2_buffer *vb;
+
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
+ vb = q->bufs[buffer];
+ if (!vb)
+ continue;
+
+ /* Free MMAP buffers or release USERPTR buffers */
+ if (q->memory == VB2_MEMORY_MMAP)
+ __vb2_buf_mem_free(vb);
+ else if (q->memory == VB2_MEMORY_DMABUF)
+ __vb2_buf_dmabuf_put(vb);
+ else
+ __vb2_buf_userptr_put(vb);
+ }
+}
+
+/*
+ * __vb2_queue_free() - free buffers at the end of the queue - video memory and
+ * related information, if no buffers are left return the queue to an
+ * uninitialized state. Might be called even if the queue has already been freed.
+ */
+static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
+{
+ unsigned int buffer;
+
+ /*
+ * Sanity check: when preparing a buffer the queue lock is released for
+ * a short while (see __buf_prepare for the details), which would allow
+ * a race with a reqbufs which can call this function. Removing the
+ * buffers from underneath __buf_prepare is obviously a bad idea, so we
+ * check if any of the buffers is in the state PREPARING, and if so we
+ * just return -EAGAIN.
+ */
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
+ if (q->bufs[buffer] == NULL)
+ continue;
+ if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
+ dprintk(1, "preparing buffers, cannot free\n");
+ return -EAGAIN;
+ }
+ }
+
+ /* Call driver-provided cleanup function for each buffer, if provided */
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
+ struct vb2_buffer *vb = q->bufs[buffer];
+
+ if (vb && vb->planes[0].mem_priv)
+ call_void_vb_qop(vb, buf_cleanup, vb);
+ }
+
+ /* Release video buffer memory */
+ __vb2_free_mem(q, buffers);
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ /*
+ * Check that all the calls were balances during the life-time of this
+ * queue. If not (or if the debug level is 1 or up), then dump the
+ * counters to the kernel log.
+ */
+ if (q->num_buffers) {
+ bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
+ q->cnt_wait_prepare != q->cnt_wait_finish;
+
+ if (unbalanced || debug) {
+ pr_info("vb2: counters for queue %p:%s\n", q,
+ unbalanced ? " UNBALANCED!" : "");
+ pr_info("vb2: setup: %u start_streaming: %u stop_streaming: %u\n",
+ q->cnt_queue_setup, q->cnt_start_streaming,
+ q->cnt_stop_streaming);
+ pr_info("vb2: wait_prepare: %u wait_finish: %u\n",
+ q->cnt_wait_prepare, q->cnt_wait_finish);
+ }
+ q->cnt_queue_setup = 0;
+ q->cnt_wait_prepare = 0;
+ q->cnt_wait_finish = 0;
+ q->cnt_start_streaming = 0;
+ q->cnt_stop_streaming = 0;
+ }
+ for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ struct vb2_buffer *vb = q->bufs[buffer];
+ bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
+ vb->cnt_mem_prepare != vb->cnt_mem_finish ||
+ vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
+ vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
+ vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
+ vb->cnt_buf_queue != vb->cnt_buf_done ||
+ vb->cnt_buf_prepare != vb->cnt_buf_finish ||
+ vb->cnt_buf_init != vb->cnt_buf_cleanup;
+
+ if (unbalanced || debug) {
+ pr_info("vb2: counters for queue %p, buffer %d:%s\n",
+ q, buffer, unbalanced ? " UNBALANCED!" : "");
+ pr_info("vb2: buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
+ vb->cnt_buf_init, vb->cnt_buf_cleanup,
+ vb->cnt_buf_prepare, vb->cnt_buf_finish);
+ pr_info("vb2: buf_queue: %u buf_done: %u\n",
+ vb->cnt_buf_queue, vb->cnt_buf_done);
+ pr_info("vb2: alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
+ vb->cnt_mem_alloc, vb->cnt_mem_put,
+ vb->cnt_mem_prepare, vb->cnt_mem_finish,
+ vb->cnt_mem_mmap);
+ pr_info("vb2: get_userptr: %u put_userptr: %u\n",
+ vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
+ pr_info("vb2: attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
+ vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
+ vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
+ pr_info("vb2: get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
+ vb->cnt_mem_get_dmabuf,
+ vb->cnt_mem_num_users,
+ vb->cnt_mem_vaddr,
+ vb->cnt_mem_cookie);
+ }
+ }
+#endif
+
+ /* Free videobuf buffers */
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
+ kfree(q->bufs[buffer]);
+ q->bufs[buffer] = NULL;
+ }
+
+ q->num_buffers -= buffers;
+ if (!q->num_buffers) {
+ q->memory = 0;
+ INIT_LIST_HEAD(&q->queued_list);
+ }
+ return 0;
+}
+
+bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
+{
+ unsigned int plane;
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ void *mem_priv = vb->planes[plane].mem_priv;
+ /*
+ * If num_users() has not been provided, call_memop
+ * will return 0, apparently nobody cares about this
+ * case anyway. If num_users() returns more than 1,
+ * we are not the only user of the plane's memory.
+ */
+ if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL(vb2_buffer_in_use);
+
+/*
+ * __buffers_in_use() - return true if any buffers on the queue are in use and
+ * the queue cannot be freed (by the means of REQBUFS(0)) call
+ */
+static bool __buffers_in_use(struct vb2_queue *q)
+{
+ unsigned int buffer;
+ for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ if (vb2_buffer_in_use(q, q->bufs[buffer]))
+ return true;
+ }
+ return false;
+}
+
+void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
+{
+ call_void_bufop(q, fill_user_buffer, q->bufs[index], pb);
+}
+EXPORT_SYMBOL_GPL(vb2_core_querybuf);
+
+/*
+ * __verify_userptr_ops() - verify that all memory operations required for
+ * USERPTR queue type have been provided
+ */
+static int __verify_userptr_ops(struct vb2_queue *q)
+{
+ if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
+ !q->mem_ops->put_userptr)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * __verify_mmap_ops() - verify that all memory operations required for
+ * MMAP queue type have been provided
+ */
+static int __verify_mmap_ops(struct vb2_queue *q)
+{
+ if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
+ !q->mem_ops->put || !q->mem_ops->mmap)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * __verify_dmabuf_ops() - verify that all memory operations required for
+ * DMABUF queue type have been provided
+ */
+static int __verify_dmabuf_ops(struct vb2_queue *q)
+{
+ if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
+ !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
+ !q->mem_ops->unmap_dmabuf)
+ return -EINVAL;
+
+ return 0;
+}
+
+int vb2_verify_memory_type(struct vb2_queue *q,
+ enum vb2_memory memory, unsigned int type)
+{
+ if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR &&
+ memory != VB2_MEMORY_DMABUF) {
+ dprintk(1, "unsupported memory type\n");
+ return -EINVAL;
+ }
+
+ if (type != q->type) {
+ dprintk(1, "requested type is incorrect\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Make sure all the required memory ops for given memory type
+ * are available.
+ */
+ if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) {
+ dprintk(1, "MMAP for current setup unsupported\n");
+ return -EINVAL;
+ }
+
+ if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
+ dprintk(1, "USERPTR for current setup unsupported\n");
+ return -EINVAL;
+ }
+
+ if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
+ dprintk(1, "DMABUF for current setup unsupported\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Place the busy tests at the end: -EBUSY can be ignored when
+ * create_bufs is called with count == 0, but count == 0 should still
+ * do the memory and type validation.
+ */
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(vb2_verify_memory_type);
+
+int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
+ unsigned int *count)
+{
+ unsigned int num_buffers, allocated_buffers, num_planes = 0;
+ unsigned plane_sizes[VB2_MAX_PLANES] = { };
+ int ret;
+
+ if (q->streaming) {
+ dprintk(1, "streaming active\n");
+ return -EBUSY;
+ }
+
+ if (*count == 0 || q->num_buffers != 0 || q->memory != memory) {
+ /*
+ * We already have buffers allocated, so first check if they
+ * are not in use and can be freed.
+ */
+ mutex_lock(&q->mmap_lock);
+ if (q->memory == VB2_MEMORY_MMAP && __buffers_in_use(q)) {
+ mutex_unlock(&q->mmap_lock);
+ dprintk(1, "memory in use, cannot free\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Call queue_cancel to clean up any buffers in the PREPARED or
+ * QUEUED state which is possible if buffers were prepared or
+ * queued without ever calling STREAMON.
+ */
+ __vb2_queue_cancel(q);
+ ret = __vb2_queue_free(q, q->num_buffers);
+ mutex_unlock(&q->mmap_lock);
+ if (ret)
+ return ret;
+
+ /*
+ * In case of REQBUFS(0) return immediately without calling
+ * driver's queue_setup() callback and allocating resources.
+ */
+ if (*count == 0)
+ return 0;
+ }
+
+ /*
+ * Make sure the requested values and current defaults are sane.
+ */
+ num_buffers = min_t(unsigned int, *count, VB2_MAX_FRAME);
+ num_buffers = max_t(unsigned int, num_buffers, q->min_buffers_needed);
+ memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
+ q->memory = memory;
+
+ /*
+ * Ask the driver how many buffers and planes per buffer it requires.
+ * Driver also sets the size and allocator context for each plane.
+ */
+ ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
+ plane_sizes, q->alloc_devs);
+ if (ret)
+ return ret;
+
+ /* Finally, allocate buffers and video memory */
+ allocated_buffers =
+ __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes);
+ if (allocated_buffers == 0) {
+ dprintk(1, "memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * There is no point in continuing if we can't allocate the minimum
+ * number of buffers needed by this vb2_queue.
+ */
+ if (allocated_buffers < q->min_buffers_needed)
+ ret = -ENOMEM;
+
+ /*
+ * Check if driver can handle the allocated number of buffers.
+ */
+ if (!ret && allocated_buffers < num_buffers) {
+ num_buffers = allocated_buffers;
+ /*
+ * num_planes is set by the previous queue_setup(), but since it
+ * signals to queue_setup() whether it is called from create_bufs()
+ * vs reqbufs() we zero it here to signal that queue_setup() is
+ * called for the reqbufs() case.
+ */
+ num_planes = 0;
+
+ ret = call_qop(q, queue_setup, q, &num_buffers,
+ &num_planes, plane_sizes, q->alloc_devs);
+
+ if (!ret && allocated_buffers < num_buffers)
+ ret = -ENOMEM;
+
+ /*
+ * Either the driver has accepted a smaller number of buffers,
+ * or .queue_setup() returned an error
+ */
+ }
+
+ mutex_lock(&q->mmap_lock);
+ q->num_buffers = allocated_buffers;
+
+ if (ret < 0) {
+ /*
+ * Note: __vb2_queue_free() will subtract 'allocated_buffers'
+ * from q->num_buffers.
+ */
+ __vb2_queue_free(q, allocated_buffers);
+ mutex_unlock(&q->mmap_lock);
+ return ret;
+ }
+ mutex_unlock(&q->mmap_lock);
+
+ /*
+ * Return the number of successfully allocated buffers
+ * to the userspace.
+ */
+ *count = allocated_buffers;
+ q->waiting_for_buffers = !q->is_output;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
+
+int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
+ unsigned int *count, unsigned requested_planes,
+ const unsigned requested_sizes[])
+{
+ unsigned int num_planes = 0, num_buffers, allocated_buffers;
+ unsigned plane_sizes[VB2_MAX_PLANES] = { };
+ int ret;
+
+ if (q->num_buffers == VB2_MAX_FRAME) {
+ dprintk(1, "maximum number of buffers already allocated\n");
+ return -ENOBUFS;
+ }
+
+ if (!q->num_buffers) {
+ memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
+ q->memory = memory;
+ q->waiting_for_buffers = !q->is_output;
+ }
+
+ num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
+
+ if (requested_planes && requested_sizes) {
+ num_planes = requested_planes;
+ memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes));
+ }
+
+ /*
+ * Ask the driver, whether the requested number of buffers, planes per
+ * buffer and their sizes are acceptable
+ */
+ ret = call_qop(q, queue_setup, q, &num_buffers,
+ &num_planes, plane_sizes, q->alloc_devs);
+ if (ret)
+ return ret;
+
+ /* Finally, allocate buffers and video memory */
+ allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
+ num_planes, plane_sizes);
+ if (allocated_buffers == 0) {
+ dprintk(1, "memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Check if driver can handle the so far allocated number of buffers.
+ */
+ if (allocated_buffers < num_buffers) {
+ num_buffers = allocated_buffers;
+
+ /*
+ * q->num_buffers contains the total number of buffers, that the
+ * queue driver has set up
+ */
+ ret = call_qop(q, queue_setup, q, &num_buffers,
+ &num_planes, plane_sizes, q->alloc_devs);
+
+ if (!ret && allocated_buffers < num_buffers)
+ ret = -ENOMEM;
+
+ /*
+ * Either the driver has accepted a smaller number of buffers,
+ * or .queue_setup() returned an error
+ */
+ }
+
+ mutex_lock(&q->mmap_lock);
+ q->num_buffers += allocated_buffers;
+
+ if (ret < 0) {
+ /*
+ * Note: __vb2_queue_free() will subtract 'allocated_buffers'
+ * from q->num_buffers.
+ */
+ __vb2_queue_free(q, allocated_buffers);
+ mutex_unlock(&q->mmap_lock);
+ return -ENOMEM;
+ }
+ mutex_unlock(&q->mmap_lock);
+
+ /*
+ * Return the number of successfully allocated buffers
+ * to the userspace.
+ */
+ *count = allocated_buffers;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
+
+void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
+{
+ if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
+ return NULL;
+
+ return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
+
+}
+EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
+
+void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
+{
+ if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
+ return NULL;
+
+ return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
+}
+EXPORT_SYMBOL_GPL(vb2_plane_cookie);
+
+void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned long flags;
+ unsigned int plane;
+
+ if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
+ return;
+
+ if (WARN_ON(state != VB2_BUF_STATE_DONE &&
+ state != VB2_BUF_STATE_ERROR &&
+ state != VB2_BUF_STATE_QUEUED &&
+ state != VB2_BUF_STATE_REQUEUEING))
+ state = VB2_BUF_STATE_ERROR;
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ /*
+ * Although this is not a callback, it still does have to balance
+ * with the buf_queue op. So update this counter manually.
+ */
+ vb->cnt_buf_done++;
+#endif
+ dprintk(4, "done processing on buffer %d, state: %d\n",
+ vb->index, state);
+
+ /* sync buffers */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_void_memop(vb, finish, vb->planes[plane].mem_priv);
+
+ spin_lock_irqsave(&q->done_lock, flags);
+ if (state == VB2_BUF_STATE_QUEUED ||
+ state == VB2_BUF_STATE_REQUEUEING) {
+ vb->state = VB2_BUF_STATE_QUEUED;
+ } else {
+ /* Add the buffer to the done buffers list */
+ list_add_tail(&vb->done_entry, &q->done_list);
+ vb->state = state;
+ }
+ atomic_dec(&q->owned_by_drv_count);
+ spin_unlock_irqrestore(&q->done_lock, flags);
+
+ trace_vb2_buf_done(q, vb);
+
+ switch (state) {
+ case VB2_BUF_STATE_QUEUED:
+ return;
+ case VB2_BUF_STATE_REQUEUEING:
+ if (q->start_streaming_called)
+ __enqueue_in_driver(vb);
+ return;
+ default:
+ /* Inform any processes that may be waiting for buffers */
+ wake_up(&q->done_wq);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(vb2_buffer_done);
+
+void vb2_discard_done(struct vb2_queue *q)
+{
+ struct vb2_buffer *vb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->done_lock, flags);
+ list_for_each_entry(vb, &q->done_list, done_entry)
+ vb->state = VB2_BUF_STATE_ERROR;
+ spin_unlock_irqrestore(&q->done_lock, flags);
+}
+EXPORT_SYMBOL_GPL(vb2_discard_done);
+
+/*
+ * __prepare_mmap() - prepare an MMAP buffer
+ */
+static int __prepare_mmap(struct vb2_buffer *vb, const void *pb)
+{
+ int ret = 0;
+
+ if (pb)
+ ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+ vb, pb, vb->planes);
+ return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
+}
+
+/*
+ * __prepare_userptr