| // SPDX-License-Identifier: GPL-2.0 |
| /* |
| * ION Memory Allocator CMA heap exporter |
| * |
| * Copyright (C) Linaro 2012 |
| * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. |
| */ |
| |
| #include <linux/device.h> |
| #include <linux/slab.h> |
| #include <linux/errno.h> |
| #include <linux/err.h> |
| #include <linux/cma.h> |
| #include <linux/scatterlist.h> |
| #include <linux/highmem.h> |
| |
| #include "ion.h" |
| |
| struct ion_cma_heap { |
| struct ion_heap heap; |
| struct cma *cma; |
| }; |
| |
| #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap) |
| |
| /* ION CMA heap operations functions */ |
| static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, |
| unsigned long len, |
| unsigned long flags) |
| { |
| struct ion_cma_heap *cma_heap = to_cma_heap(heap); |
| struct sg_table *table; |
| struct page *pages; |
| unsigned long size = PAGE_ALIGN(len); |
| unsigned long nr_pages = size >> PAGE_SHIFT; |
| unsigned long align = get_order(size); |
| int ret; |
| |
| if (align > CONFIG_CMA_ALIGNMENT) |
| align = CONFIG_CMA_ALIGNMENT; |
| |
| pages = cma_alloc(cma_heap->cma, nr_pages, align, false); |
| if (!pages) |
| return -ENOMEM; |
| |
| if (PageHighMem(pages)) { |
| unsigned long nr_clear_pages = nr_pages; |
| struct page *page = pages; |
| |
| while (nr_clear_pages > 0) { |
| void *vaddr = kmap_atomic(page); |
| |
| memset(vaddr, 0, PAGE_SIZE); |
| kunmap_atomic(vaddr); |
| page++; |
| nr_clear_pages--; |
| } |
| } else { |
| memset(page_address(pages), 0, size); |
| } |
| |
| table = kmalloc(sizeof(*table), GFP_KERNEL); |
| if (!table) |
| goto err; |
| |
| ret = sg_alloc_table(table, 1, GFP_KERNEL); |
| if (ret) |
| goto free_mem; |
| |
| sg_set_page(table->sgl, pages, size, 0); |
| |
| buffer->priv_virt = pages; |
| buffer->sg_table = table; |
| return 0; |
| |
| free_mem: |
| kfree(table); |
| err: |
| cma_release(cma_heap->cma, pages, nr_pages); |
| return -ENOMEM; |
| } |
| |
| static void ion_cma_free(struct ion_buffer *buffer) |
| { |
| struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); |
| struct page *pages = buffer->priv_virt; |
| unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; |
| |
| /* release memory */ |
| cma_release(cma_heap->cma, pages, nr_pages); |
| /* release sg table */ |
| sg_free_table(buffer->sg_table); |
| kfree(buffer->sg_table); |
| } |
| |
| static struct ion_heap_ops ion_cma_ops = { |
| .allocate = ion_cma_allocate, |
| .free = ion_cma_free, |
| .map_user = ion_heap_map_user, |
| .map_kernel = ion_heap_map_kernel, |
| .unmap_kernel = ion_heap_unmap_kernel, |
| }; |
| |
| static struct ion_heap *__ion_cma_heap_create(struct cma *cma) |
| { |
| struct ion_cma_heap *cma_heap; |
| |
| cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); |
| |
| if (!cma_heap) |
| return ERR_PTR(-ENOMEM); |
| |
| cma_heap->heap.ops = &ion_cma_ops; |
| cma_heap->cma = cma; |
| cma_heap->heap.type = ION_HEAP_TYPE_DMA; |
| return &cma_heap->heap; |
| } |
| |
| static int __ion_add_cma_heaps(struct cma *cma, void *data) |
| { |
| struct ion_heap *heap; |
| |
| heap = __ion_cma_heap_create(cma); |
| if (IS_ERR(heap)) |
| return PTR_ERR(heap); |
| |
| heap->name = cma_get_name(cma); |
| |
| ion_device_add_heap(heap); |
| return 0; |
| } |
| |
| static int ion_add_cma_heaps(void) |
| { |
| cma_for_each_area(__ion_add_cma_heaps, NULL); |
| return 0; |
| } |
| device_initcall(ion_add_cma_heaps); |