GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / staging / android / ion / ion_cma_heap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/staging/android/ion/ion_cma_heap.c
4  *
5  * Copyright (C) Linaro 2012
6  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
7  */
8
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/cma.h>
14 #include <linux/scatterlist.h>
15 #include <linux/highmem.h>
16
17 #include "ion.h"
18
19 struct ion_cma_heap {
20         struct ion_heap heap;
21         struct cma *cma;
22 };
23
24 #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
25
26 /* ION CMA heap operations functions */
27 static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
28                             unsigned long len,
29                             unsigned long flags)
30 {
31         struct ion_cma_heap *cma_heap = to_cma_heap(heap);
32         struct sg_table *table;
33         struct page *pages;
34         unsigned long size = PAGE_ALIGN(len);
35         unsigned long nr_pages = size >> PAGE_SHIFT;
36         unsigned long align = get_order(size);
37         int ret;
38
39         if (align > CONFIG_CMA_ALIGNMENT)
40                 align = CONFIG_CMA_ALIGNMENT;
41
42         pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
43         if (!pages)
44                 return -ENOMEM;
45
46         if (PageHighMem(pages)) {
47                 unsigned long nr_clear_pages = nr_pages;
48                 struct page *page = pages;
49
50                 while (nr_clear_pages > 0) {
51                         void *vaddr = kmap_atomic(page);
52
53                         memset(vaddr, 0, PAGE_SIZE);
54                         kunmap_atomic(vaddr);
55                         page++;
56                         nr_clear_pages--;
57                 }
58         } else {
59                 memset(page_address(pages), 0, size);
60         }
61
62         table = kmalloc(sizeof(*table), GFP_KERNEL);
63         if (!table)
64                 goto err;
65
66         ret = sg_alloc_table(table, 1, GFP_KERNEL);
67         if (ret)
68                 goto free_mem;
69
70         sg_set_page(table->sgl, pages, size, 0);
71
72         buffer->priv_virt = pages;
73         buffer->sg_table = table;
74         return 0;
75
76 free_mem:
77         kfree(table);
78 err:
79         cma_release(cma_heap->cma, pages, nr_pages);
80         return -ENOMEM;
81 }
82
83 static void ion_cma_free(struct ion_buffer *buffer)
84 {
85         struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
86         struct page *pages = buffer->priv_virt;
87         unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
88
89         /* release memory */
90         cma_release(cma_heap->cma, pages, nr_pages);
91         /* release sg table */
92         sg_free_table(buffer->sg_table);
93         kfree(buffer->sg_table);
94 }
95
96 static struct ion_heap_ops ion_cma_ops = {
97         .allocate = ion_cma_allocate,
98         .free = ion_cma_free,
99         .map_user = ion_heap_map_user,
100         .map_kernel = ion_heap_map_kernel,
101         .unmap_kernel = ion_heap_unmap_kernel,
102 };
103
104 static struct ion_heap *__ion_cma_heap_create(struct cma *cma)
105 {
106         struct ion_cma_heap *cma_heap;
107
108         cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
109
110         if (!cma_heap)
111                 return ERR_PTR(-ENOMEM);
112
113         cma_heap->heap.ops = &ion_cma_ops;
114         /*
115          * get device from private heaps data, later it will be
116          * used to make the link with reserved CMA memory
117          */
118         cma_heap->cma = cma;
119         cma_heap->heap.type = ION_HEAP_TYPE_DMA;
120         return &cma_heap->heap;
121 }
122
123 static int __ion_add_cma_heaps(struct cma *cma, void *data)
124 {
125         struct ion_heap *heap;
126
127         heap = __ion_cma_heap_create(cma);
128         if (IS_ERR(heap))
129                 return PTR_ERR(heap);
130
131         heap->name = cma_get_name(cma);
132
133         ion_device_add_heap(heap);
134         return 0;
135 }
136
137 static int ion_add_cma_heaps(void)
138 {
139         cma_for_each_area(__ion_add_cma_heaps, NULL);
140         return 0;
141 }
142 device_initcall(ion_add_cma_heaps);