blob: 6dfbd5b0590759e0c712094dffb96518900efa1a [file] [log] [blame]
Pawel Osciak3c18ff02010-10-11 10:58:53 -03001/*
2 * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
Pawel Osciak95072082011-03-13 15:23:32 -03006 * Author: Pawel Osciak <pawel@osciak.com>
Pawel Osciak3c18ff02010-10-11 10:58:53 -03007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
Javier Martin570d2a42012-02-16 12:19:08 -030013#include <linux/io.h>
Pawel Osciak3c18ff02010-10-11 10:58:53 -030014#include <linux/module.h>
15#include <linux/mm.h>
Elena Reshetova6c4bb652017-03-06 11:21:00 -030016#include <linux/refcount.h>
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -030017#include <linux/sched.h>
Pawel Osciak3c18ff02010-10-11 10:58:53 -030018#include <linux/slab.h>
19#include <linux/vmalloc.h>
20
Junghak Sungc1399902015-09-22 10:30:29 -030021#include <media/videobuf2-v4l2.h>
Nicolas THERYddb9fa22012-08-03 07:23:54 -030022#include <media/videobuf2-vmalloc.h>
Pawel Osciak3c18ff02010-10-11 10:58:53 -030023#include <media/videobuf2-memops.h>
24
25struct vb2_vmalloc_buf {
26 void *vaddr;
Jan Kara5a9e4de2015-07-13 11:55:48 -030027 struct frame_vector *vec;
Hans Verkuilcd474032014-11-18 09:50:58 -030028 enum dma_data_direction dma_dir;
Pawel Osciak3c18ff02010-10-11 10:58:53 -030029 unsigned long size;
Elena Reshetova6c4bb652017-03-06 11:21:00 -030030 refcount_t refcount;
Pawel Osciak3c18ff02010-10-11 10:58:53 -030031 struct vb2_vmarea_handler handler;
Tomasz Stanislawski89d2ee02012-06-14 10:37:46 -030032 struct dma_buf *dbuf;
Pawel Osciak3c18ff02010-10-11 10:58:53 -030033};
34
35static void vb2_vmalloc_put(void *buf_priv);
36
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070037static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
Hans Verkuild16e8322016-04-15 09:15:05 -030038 unsigned long size, enum dma_data_direction dma_dir,
39 gfp_t gfp_flags)
Pawel Osciak3c18ff02010-10-11 10:58:53 -030040{
41 struct vb2_vmalloc_buf *buf;
42
Hans Verkuilb6ba2052013-03-01 15:44:20 -030043 buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
Pawel Osciak3c18ff02010-10-11 10:58:53 -030044 if (!buf)
Hans Verkuil0ff657b2016-07-21 09:14:02 -030045 return ERR_PTR(-ENOMEM);
Pawel Osciak3c18ff02010-10-11 10:58:53 -030046
47 buf->size = size;
48 buf->vaddr = vmalloc_user(buf->size);
Hans Verkuild935c572014-11-18 09:50:59 -030049 buf->dma_dir = dma_dir;
Pawel Osciak3c18ff02010-10-11 10:58:53 -030050 buf->handler.refcount = &buf->refcount;
51 buf->handler.put = vb2_vmalloc_put;
52 buf->handler.arg = buf;
53
54 if (!buf->vaddr) {
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -030055 pr_debug("vmalloc of size %ld failed\n", buf->size);
Pawel Osciak3c18ff02010-10-11 10:58:53 -030056 kfree(buf);
Hans Verkuil0ff657b2016-07-21 09:14:02 -030057 return ERR_PTR(-ENOMEM);
Pawel Osciak3c18ff02010-10-11 10:58:53 -030058 }
59
Elena Reshetova6c4bb652017-03-06 11:21:00 -030060 refcount_set(&buf->refcount, 1);
Pawel Osciak3c18ff02010-10-11 10:58:53 -030061 return buf;
62}
63
64static void vb2_vmalloc_put(void *buf_priv)
65{
66 struct vb2_vmalloc_buf *buf = buf_priv;
67
Elena Reshetova6c4bb652017-03-06 11:21:00 -030068 if (refcount_dec_and_test(&buf->refcount)) {
Pawel Osciak3c18ff02010-10-11 10:58:53 -030069 vfree(buf->vaddr);
70 kfree(buf);
71 }
72}
73
Hans Verkuil36c0f8b2016-04-15 09:15:05 -030074static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
Hans Verkuilcd474032014-11-18 09:50:58 -030075 unsigned long size,
76 enum dma_data_direction dma_dir)
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -030077{
78 struct vb2_vmalloc_buf *buf;
Jan Kara5a9e4de2015-07-13 11:55:48 -030079 struct frame_vector *vec;
80 int n_pages, offset, i;
Hans Verkuil0ff657b2016-07-21 09:14:02 -030081 int ret = -ENOMEM;
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -030082
83 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
84 if (!buf)
Hans Verkuil0ff657b2016-07-21 09:14:02 -030085 return ERR_PTR(-ENOMEM);
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -030086
Hans Verkuilcd474032014-11-18 09:50:58 -030087 buf->dma_dir = dma_dir;
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -030088 offset = vaddr & ~PAGE_MASK;
89 buf->size = size;
Stanimir Varbanov5b6f9abe2017-08-21 07:34:10 -040090 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
91 dma_dir == DMA_BIDIRECTIONAL);
Hans Verkuil0ff657b2016-07-21 09:14:02 -030092 if (IS_ERR(vec)) {
93 ret = PTR_ERR(vec);
Jan Kara5a9e4de2015-07-13 11:55:48 -030094 goto fail_pfnvec_create;
Hans Verkuil0ff657b2016-07-21 09:14:02 -030095 }
Jan Kara5a9e4de2015-07-13 11:55:48 -030096 buf->vec = vec;
97 n_pages = frame_vector_count(vec);
98 if (frame_vector_to_pages(vec) < 0) {
99 unsigned long *nums = frame_vector_pfns(vec);
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300100
Jan Kara5a9e4de2015-07-13 11:55:48 -0300101 /*
102 * We cannot get page pointers for these pfns. Check memory is
103 * physically contiguous and use direct mapping.
104 */
105 for (i = 1; i < n_pages; i++)
106 if (nums[i-1] + 1 != nums[i])
107 goto fail_map;
108 buf->vaddr = (__force void *)
Masami Hiramatsud13a0132018-02-06 03:02:23 -0500109 ioremap_nocache(__pfn_to_phys(nums[0]), size + offset);
Javier Martin570d2a42012-02-16 12:19:08 -0300110 } else {
Jan Kara5a9e4de2015-07-13 11:55:48 -0300111 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
Javier Martin570d2a42012-02-16 12:19:08 -0300112 PAGE_KERNEL);
Javier Martin570d2a42012-02-16 12:19:08 -0300113 }
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300114
Jan Kara5a9e4de2015-07-13 11:55:48 -0300115 if (!buf->vaddr)
116 goto fail_map;
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300117 buf->vaddr += offset;
118 return buf;
119
Jan Kara5a9e4de2015-07-13 11:55:48 -0300120fail_map:
121 vb2_destroy_framevec(vec);
122fail_pfnvec_create:
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300123 kfree(buf);
124
Hans Verkuil0ff657b2016-07-21 09:14:02 -0300125 return ERR_PTR(ret);
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300126}
127
128static void vb2_vmalloc_put_userptr(void *buf_priv)
129{
130 struct vb2_vmalloc_buf *buf = buf_priv;
131 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
132 unsigned int i;
Jan Kara5a9e4de2015-07-13 11:55:48 -0300133 struct page **pages;
134 unsigned int n_pages;
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300135
Jan Kara5a9e4de2015-07-13 11:55:48 -0300136 if (!buf->vec->is_pfns) {
137 n_pages = frame_vector_count(buf->vec);
138 pages = frame_vector_pages(buf->vec);
Javier Martin570d2a42012-02-16 12:19:08 -0300139 if (vaddr)
Jan Kara5a9e4de2015-07-13 11:55:48 -0300140 vm_unmap_ram((void *)vaddr, n_pages);
Stanimir Varbanov5b6f9abe2017-08-21 07:34:10 -0400141 if (buf->dma_dir == DMA_FROM_DEVICE ||
142 buf->dma_dir == DMA_BIDIRECTIONAL)
Jan Kara5a9e4de2015-07-13 11:55:48 -0300143 for (i = 0; i < n_pages; i++)
144 set_page_dirty_lock(pages[i]);
Javier Martin570d2a42012-02-16 12:19:08 -0300145 } else {
Hans Verkuil7c424dd2014-12-13 08:52:54 -0300146 iounmap((__force void __iomem *)buf->vaddr);
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300147 }
Jan Kara5a9e4de2015-07-13 11:55:48 -0300148 vb2_destroy_framevec(buf->vec);
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300149 kfree(buf);
150}
151
Pawel Osciak3c18ff02010-10-11 10:58:53 -0300152static void *vb2_vmalloc_vaddr(void *buf_priv)
153{
154 struct vb2_vmalloc_buf *buf = buf_priv;
155
Pawel Osciak3c18ff02010-10-11 10:58:53 -0300156 if (!buf->vaddr) {
Mauro Carvalho Chehab87204272016-10-18 17:44:22 -0200157 pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
Pawel Osciak3c18ff02010-10-11 10:58:53 -0300158 return NULL;
159 }
160
161 return buf->vaddr;
162}
163
164static unsigned int vb2_vmalloc_num_users(void *buf_priv)
165{
166 struct vb2_vmalloc_buf *buf = buf_priv;
Elena Reshetova6c4bb652017-03-06 11:21:00 -0300167 return refcount_read(&buf->refcount);
Pawel Osciak3c18ff02010-10-11 10:58:53 -0300168}
169
170static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
171{
172 struct vb2_vmalloc_buf *buf = buf_priv;
173 int ret;
174
175 if (!buf) {
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300176 pr_err("No memory to map\n");
Pawel Osciak3c18ff02010-10-11 10:58:53 -0300177 return -EINVAL;
178 }
179
180 ret = remap_vmalloc_range(vma, buf->vaddr, 0);
181 if (ret) {
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300182 pr_err("Remapping vmalloc memory, error: %d\n", ret);
Pawel Osciak3c18ff02010-10-11 10:58:53 -0300183 return ret;
184 }
185
186 /*
187 * Make sure that vm_areas for 2 buffers won't be merged together
188 */
189 vma->vm_flags |= VM_DONTEXPAND;
190
191 /*
192 * Use common vm_area operations to track buffer refcount.
193 */
194 vma->vm_private_data = &buf->handler;
195 vma->vm_ops = &vb2_common_vm_ops;
196
197 vma->vm_ops->open(vma);
198
199 return 0;
200}
201
Geert Uytterhoeven99f3cd52014-12-15 10:40:28 -0300202#ifdef CONFIG_HAS_DMA
Tomasz Stanislawski89d2ee02012-06-14 10:37:46 -0300203/*********************************************/
Hans Verkuil46506352014-11-18 09:51:05 -0300204/* DMABUF ops for exporters */
205/*********************************************/
206
207struct vb2_vmalloc_attachment {
208 struct sg_table sgt;
209 enum dma_data_direction dma_dir;
210};
211
Christian Königa19741e2018-05-28 11:47:52 +0200212static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
Hans Verkuil46506352014-11-18 09:51:05 -0300213 struct dma_buf_attachment *dbuf_attach)
214{
215 struct vb2_vmalloc_attachment *attach;
216 struct vb2_vmalloc_buf *buf = dbuf->priv;
217 int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
218 struct sg_table *sgt;
219 struct scatterlist *sg;
220 void *vaddr = buf->vaddr;
221 int ret;
222 int i;
223
224 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
225 if (!attach)
226 return -ENOMEM;
227
228 sgt = &attach->sgt;
229 ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
230 if (ret) {
231 kfree(attach);
232 return ret;
233 }
234 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
235 struct page *page = vmalloc_to_page(vaddr);
236
237 if (!page) {
238 sg_free_table(sgt);
239 kfree(attach);
240 return -ENOMEM;
241 }
242 sg_set_page(sg, page, PAGE_SIZE, 0);
243 vaddr += PAGE_SIZE;
244 }
245
246 attach->dma_dir = DMA_NONE;
247 dbuf_attach->priv = attach;
248 return 0;
249}
250
251static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
252 struct dma_buf_attachment *db_attach)
253{
254 struct vb2_vmalloc_attachment *attach = db_attach->priv;
255 struct sg_table *sgt;
256
257 if (!attach)
258 return;
259
260 sgt = &attach->sgt;
261
262 /* release the scatterlist cache */
263 if (attach->dma_dir != DMA_NONE)
264 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
265 attach->dma_dir);
266 sg_free_table(sgt);
267 kfree(attach);
268 db_attach->priv = NULL;
269}
270
271static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
272 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
273{
274 struct vb2_vmalloc_attachment *attach = db_attach->priv;
275 /* stealing dmabuf mutex to serialize map/unmap operations */
276 struct mutex *lock = &db_attach->dmabuf->lock;
277 struct sg_table *sgt;
Hans Verkuil46506352014-11-18 09:51:05 -0300278
279 mutex_lock(lock);
280
281 sgt = &attach->sgt;
282 /* return previously mapped sg table */
283 if (attach->dma_dir == dma_dir) {
284 mutex_unlock(lock);
285 return sgt;
286 }
287
288 /* release any previous cache */
289 if (attach->dma_dir != DMA_NONE) {
290 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
291 attach->dma_dir);
292 attach->dma_dir = DMA_NONE;
293 }
294
295 /* mapping to the client with new direction */
Ricardo Ribaldaba81c6e2015-04-29 09:00:47 -0300296 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
297 dma_dir);
298 if (!sgt->nents) {
Hans Verkuil46506352014-11-18 09:51:05 -0300299 pr_err("failed to map scatterlist\n");
300 mutex_unlock(lock);
301 return ERR_PTR(-EIO);
302 }
303
304 attach->dma_dir = dma_dir;
305
306 mutex_unlock(lock);
307
308 return sgt;
309}
310
311static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
312 struct sg_table *sgt, enum dma_data_direction dma_dir)
313{
314 /* nothing to be done here */
315}
316
317static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
318{
319 /* drop reference obtained in vb2_vmalloc_get_dmabuf */
320 vb2_vmalloc_put(dbuf->priv);
321}
322
323static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
324{
325 struct vb2_vmalloc_buf *buf = dbuf->priv;
326
327 return buf->vaddr + pgnum * PAGE_SIZE;
328}
329
330static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
331{
332 struct vb2_vmalloc_buf *buf = dbuf->priv;
333
334 return buf->vaddr;
335}
336
337static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
338 struct vm_area_struct *vma)
339{
340 return vb2_vmalloc_mmap(dbuf->priv, vma);
341}
342
Arvind Yadav59310b72017-07-01 07:37:26 -0400343static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
Hans Verkuil46506352014-11-18 09:51:05 -0300344 .attach = vb2_vmalloc_dmabuf_ops_attach,
345 .detach = vb2_vmalloc_dmabuf_ops_detach,
346 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
347 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
Logan Gunthorpef9b67f02017-04-19 13:36:10 -0600348 .map = vb2_vmalloc_dmabuf_ops_kmap,
Hans Verkuil46506352014-11-18 09:51:05 -0300349 .vmap = vb2_vmalloc_dmabuf_ops_vmap,
350 .mmap = vb2_vmalloc_dmabuf_ops_mmap,
351 .release = vb2_vmalloc_dmabuf_ops_release,
352};
353
354static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
355{
356 struct vb2_vmalloc_buf *buf = buf_priv;
357 struct dma_buf *dbuf;
Sumit Semwald8fbe342015-01-23 12:53:43 +0530358 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
359
360 exp_info.ops = &vb2_vmalloc_dmabuf_ops;
361 exp_info.size = buf->size;
362 exp_info.flags = flags;
363 exp_info.priv = buf;
Hans Verkuil46506352014-11-18 09:51:05 -0300364
365 if (WARN_ON(!buf->vaddr))
366 return NULL;
367
Sumit Semwald8fbe342015-01-23 12:53:43 +0530368 dbuf = dma_buf_export(&exp_info);
Hans Verkuil46506352014-11-18 09:51:05 -0300369 if (IS_ERR(dbuf))
370 return NULL;
371
372 /* dmabuf keeps reference to vb2 buffer */
Elena Reshetova6c4bb652017-03-06 11:21:00 -0300373 refcount_inc(&buf->refcount);
Hans Verkuil46506352014-11-18 09:51:05 -0300374
375 return dbuf;
376}
Geert Uytterhoeven99f3cd52014-12-15 10:40:28 -0300377#endif /* CONFIG_HAS_DMA */
378
Hans Verkuil46506352014-11-18 09:51:05 -0300379
380/*********************************************/
Tomasz Stanislawski89d2ee02012-06-14 10:37:46 -0300381/* callbacks for DMABUF buffers */
382/*********************************************/
383
384static int vb2_vmalloc_map_dmabuf(void *mem_priv)
385{
386 struct vb2_vmalloc_buf *buf = mem_priv;
387
388 buf->vaddr = dma_buf_vmap(buf->dbuf);
389
390 return buf->vaddr ? 0 : -EFAULT;
391}
392
393static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
394{
395 struct vb2_vmalloc_buf *buf = mem_priv;
396
397 dma_buf_vunmap(buf->dbuf, buf->vaddr);
398 buf->vaddr = NULL;
399}
400
401static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
402{
403 struct vb2_vmalloc_buf *buf = mem_priv;
404
405 if (buf->vaddr)
406 dma_buf_vunmap(buf->dbuf, buf->vaddr);
407
408 kfree(buf);
409}
410
Hans Verkuil36c0f8b2016-04-15 09:15:05 -0300411static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
Hans Verkuilcd474032014-11-18 09:50:58 -0300412 unsigned long size, enum dma_data_direction dma_dir)
Tomasz Stanislawski89d2ee02012-06-14 10:37:46 -0300413{
414 struct vb2_vmalloc_buf *buf;
415
416 if (dbuf->size < size)
417 return ERR_PTR(-EFAULT);
418
419 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
420 if (!buf)
421 return ERR_PTR(-ENOMEM);
422
423 buf->dbuf = dbuf;
Hans Verkuilcd474032014-11-18 09:50:58 -0300424 buf->dma_dir = dma_dir;
Tomasz Stanislawski89d2ee02012-06-14 10:37:46 -0300425 buf->size = size;
426
427 return buf;
428}
429
430
Pawel Osciak3c18ff02010-10-11 10:58:53 -0300431const struct vb2_mem_ops vb2_vmalloc_memops = {
432 .alloc = vb2_vmalloc_alloc,
433 .put = vb2_vmalloc_put,
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300434 .get_userptr = vb2_vmalloc_get_userptr,
435 .put_userptr = vb2_vmalloc_put_userptr,
Geert Uytterhoeven99f3cd52014-12-15 10:40:28 -0300436#ifdef CONFIG_HAS_DMA
Hans Verkuil46506352014-11-18 09:51:05 -0300437 .get_dmabuf = vb2_vmalloc_get_dmabuf,
Geert Uytterhoeven99f3cd52014-12-15 10:40:28 -0300438#endif
Tomasz Stanislawski89d2ee02012-06-14 10:37:46 -0300439 .map_dmabuf = vb2_vmalloc_map_dmabuf,
440 .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
441 .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
442 .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
Pawel Osciak3c18ff02010-10-11 10:58:53 -0300443 .vaddr = vb2_vmalloc_vaddr,
444 .mmap = vb2_vmalloc_mmap,
445 .num_users = vb2_vmalloc_num_users,
446};
447EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
448
449MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
Pawel Osciak95072082011-03-13 15:23:32 -0300450MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
Pawel Osciak3c18ff02010-10-11 10:58:53 -0300451MODULE_LICENSE("GPL");