GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / staging / android / ion / ion_cma_heap.c
1 /*
2  * drivers/staging/android/ion/ion_cma_heap.c
3  *
4  * Copyright (C) Linaro 2012
5  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/slab.h>
20 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/cma.h>
23 #include <linux/scatterlist.h>
24 #include <linux/highmem.h>
25
26 #include "ion.h"
27
28 struct ion_cma_heap {
29         struct ion_heap heap;
30         struct cma *cma;
31 };
32
33 #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
34
35 /* ION CMA heap operations functions */
36 static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
37                             unsigned long len,
38                             unsigned long flags)
39 {
40         struct ion_cma_heap *cma_heap = to_cma_heap(heap);
41         struct sg_table *table;
42         struct page *pages;
43         unsigned long size = PAGE_ALIGN(len);
44         unsigned long nr_pages = size >> PAGE_SHIFT;
45         unsigned long align = get_order(size);
46         int ret;
47
48         if (align > CONFIG_CMA_ALIGNMENT)
49                 align = CONFIG_CMA_ALIGNMENT;
50
51         pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
52         if (!pages)
53                 return -ENOMEM;
54
55         if (PageHighMem(pages)) {
56                 unsigned long nr_clear_pages = nr_pages;
57                 struct page *page = pages;
58
59                 while (nr_clear_pages > 0) {
60                         void *vaddr = kmap_atomic(page);
61
62                         memset(vaddr, 0, PAGE_SIZE);
63                         kunmap_atomic(vaddr);
64                         page++;
65                         nr_clear_pages--;
66                 }
67         } else {
68                 memset(page_address(pages), 0, size);
69         }
70
71         table = kmalloc(sizeof(*table), GFP_KERNEL);
72         if (!table)
73                 goto err;
74
75         ret = sg_alloc_table(table, 1, GFP_KERNEL);
76         if (ret)
77                 goto free_mem;
78
79         sg_set_page(table->sgl, pages, size, 0);
80
81         buffer->priv_virt = pages;
82         buffer->sg_table = table;
83         return 0;
84
85 free_mem:
86         kfree(table);
87 err:
88         cma_release(cma_heap->cma, pages, nr_pages);
89         return -ENOMEM;
90 }
91
92 static void ion_cma_free(struct ion_buffer *buffer)
93 {
94         struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
95         struct page *pages = buffer->priv_virt;
96         unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
97
98         /* release memory */
99         cma_release(cma_heap->cma, pages, nr_pages);
100         /* release sg table */
101         sg_free_table(buffer->sg_table);
102         kfree(buffer->sg_table);
103 }
104
105 static struct ion_heap_ops ion_cma_ops = {
106         .allocate = ion_cma_allocate,
107         .free = ion_cma_free,
108         .map_user = ion_heap_map_user,
109         .map_kernel = ion_heap_map_kernel,
110         .unmap_kernel = ion_heap_unmap_kernel,
111 };
112
113 static struct ion_heap *__ion_cma_heap_create(struct cma *cma)
114 {
115         struct ion_cma_heap *cma_heap;
116
117         cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
118
119         if (!cma_heap)
120                 return ERR_PTR(-ENOMEM);
121
122         cma_heap->heap.ops = &ion_cma_ops;
123         /*
124          * get device from private heaps data, later it will be
125          * used to make the link with reserved CMA memory
126          */
127         cma_heap->cma = cma;
128         cma_heap->heap.type = ION_HEAP_TYPE_DMA;
129         return &cma_heap->heap;
130 }
131
132 static int __ion_add_cma_heaps(struct cma *cma, void *data)
133 {
134         struct ion_heap *heap;
135
136         heap = __ion_cma_heap_create(cma);
137         if (IS_ERR(heap))
138                 return PTR_ERR(heap);
139
140         heap->name = cma_get_name(cma);
141
142         ion_device_add_heap(heap);
143         return 0;
144 }
145
146 static int ion_add_cma_heaps(void)
147 {
148         cma_for_each_area(__ion_add_cma_heaps, NULL);
149         return 0;
150 }
151 device_initcall(ion_add_cma_heaps);