2 * drivers/staging/android/ion/ion_cma_heap.c
4 * Copyright (C) Linaro 2012
5 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/slab.h>
20 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/cma.h>
23 #include <linux/scatterlist.h>
24 #include <linux/highmem.h>
33 #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
35 /* ION CMA heap operations functions */
36 static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
40 struct ion_cma_heap *cma_heap = to_cma_heap(heap);
41 struct sg_table *table;
43 unsigned long size = PAGE_ALIGN(len);
44 unsigned long nr_pages = size >> PAGE_SHIFT;
45 unsigned long align = get_order(size);
48 if (align > CONFIG_CMA_ALIGNMENT)
49 align = CONFIG_CMA_ALIGNMENT;
51 pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
55 if (PageHighMem(pages)) {
56 unsigned long nr_clear_pages = nr_pages;
57 struct page *page = pages;
59 while (nr_clear_pages > 0) {
60 void *vaddr = kmap_atomic(page);
62 memset(vaddr, 0, PAGE_SIZE);
68 memset(page_address(pages), 0, size);
71 table = kmalloc(sizeof(*table), GFP_KERNEL);
75 ret = sg_alloc_table(table, 1, GFP_KERNEL);
79 sg_set_page(table->sgl, pages, size, 0);
81 buffer->priv_virt = pages;
82 buffer->sg_table = table;
88 cma_release(cma_heap->cma, pages, nr_pages);
92 static void ion_cma_free(struct ion_buffer *buffer)
94 struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
95 struct page *pages = buffer->priv_virt;
96 unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
99 cma_release(cma_heap->cma, pages, nr_pages);
100 /* release sg table */
101 sg_free_table(buffer->sg_table);
102 kfree(buffer->sg_table);
105 static struct ion_heap_ops ion_cma_ops = {
106 .allocate = ion_cma_allocate,
107 .free = ion_cma_free,
108 .map_user = ion_heap_map_user,
109 .map_kernel = ion_heap_map_kernel,
110 .unmap_kernel = ion_heap_unmap_kernel,
113 static struct ion_heap *__ion_cma_heap_create(struct cma *cma)
115 struct ion_cma_heap *cma_heap;
117 cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
120 return ERR_PTR(-ENOMEM);
122 cma_heap->heap.ops = &ion_cma_ops;
124 * get device from private heaps data, later it will be
125 * used to make the link with reserved CMA memory
128 cma_heap->heap.type = ION_HEAP_TYPE_DMA;
129 return &cma_heap->heap;
132 static int __ion_add_cma_heaps(struct cma *cma, void *data)
134 struct ion_heap *heap;
136 heap = __ion_cma_heap_create(cma);
138 return PTR_ERR(heap);
140 heap->name = cma_get_name(cma);
142 ion_device_add_heap(heap);
146 static int ion_add_cma_heaps(void)
148 cma_for_each_area(__ion_add_cma_heaps, NULL);
151 device_initcall(ion_add_cma_heaps);