GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / staging / android / ion / ion_system_heap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/staging/android/ion/ion_system_heap.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  */
7
8 #include <asm/page.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/highmem.h>
12 #include <linux/mm.h>
13 #include <linux/scatterlist.h>
14 #include <linux/seq_file.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include "ion.h"
18
19 #define NUM_ORDERS ARRAY_SIZE(orders)
20
21 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
22                                      __GFP_NORETRY) & ~__GFP_RECLAIM;
23 static gfp_t low_order_gfp_flags  = GFP_HIGHUSER | __GFP_ZERO;
24 static const unsigned int orders[] = {8, 4, 0};
25
26 static int order_to_index(unsigned int order)
27 {
28         int i;
29
30         for (i = 0; i < NUM_ORDERS; i++)
31                 if (order == orders[i])
32                         return i;
33         BUG();
34         return -1;
35 }
36
37 static inline unsigned int order_to_size(int order)
38 {
39         return PAGE_SIZE << order;
40 }
41
42 struct ion_system_heap {
43         struct ion_heap heap;
44         struct ion_page_pool *pools[NUM_ORDERS];
45 };
46
47 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
48                                       struct ion_buffer *buffer,
49                                       unsigned long order)
50 {
51         struct ion_page_pool *pool = heap->pools[order_to_index(order)];
52
53         return ion_page_pool_alloc(pool);
54 }
55
56 static void free_buffer_page(struct ion_system_heap *heap,
57                              struct ion_buffer *buffer, struct page *page)
58 {
59         struct ion_page_pool *pool;
60         unsigned int order = compound_order(page);
61
62         /* go to system */
63         if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
64                 __free_pages(page, order);
65                 return;
66         }
67
68         pool = heap->pools[order_to_index(order)];
69
70         ion_page_pool_free(pool, page);
71 }
72
73 static struct page *alloc_largest_available(struct ion_system_heap *heap,
74                                             struct ion_buffer *buffer,
75                                             unsigned long size,
76                                             unsigned int max_order)
77 {
78         struct page *page;
79         int i;
80
81         for (i = 0; i < NUM_ORDERS; i++) {
82                 if (size < order_to_size(orders[i]))
83                         continue;
84                 if (max_order < orders[i])
85                         continue;
86
87                 page = alloc_buffer_page(heap, buffer, orders[i]);
88                 if (!page)
89                         continue;
90
91                 return page;
92         }
93
94         return NULL;
95 }
96
97 static int ion_system_heap_allocate(struct ion_heap *heap,
98                                     struct ion_buffer *buffer,
99                                     unsigned long size,
100                                     unsigned long flags)
101 {
102         struct ion_system_heap *sys_heap = container_of(heap,
103                                                         struct ion_system_heap,
104                                                         heap);
105         struct sg_table *table;
106         struct scatterlist *sg;
107         struct list_head pages;
108         struct page *page, *tmp_page;
109         int i = 0;
110         unsigned long size_remaining = PAGE_ALIGN(size);
111         unsigned int max_order = orders[0];
112
113         if (size / PAGE_SIZE > totalram_pages / 2)
114                 return -ENOMEM;
115
116         INIT_LIST_HEAD(&pages);
117         while (size_remaining > 0) {
118                 page = alloc_largest_available(sys_heap, buffer, size_remaining,
119                                                max_order);
120                 if (!page)
121                         goto free_pages;
122                 list_add_tail(&page->lru, &pages);
123                 size_remaining -= PAGE_SIZE << compound_order(page);
124                 max_order = compound_order(page);
125                 i++;
126         }
127         table = kmalloc(sizeof(*table), GFP_KERNEL);
128         if (!table)
129                 goto free_pages;
130
131         if (sg_alloc_table(table, i, GFP_KERNEL))
132                 goto free_table;
133
134         sg = table->sgl;
135         list_for_each_entry_safe(page, tmp_page, &pages, lru) {
136                 sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
137                 sg = sg_next(sg);
138                 list_del(&page->lru);
139         }
140
141         buffer->sg_table = table;
142         return 0;
143
144 free_table:
145         kfree(table);
146 free_pages:
147         list_for_each_entry_safe(page, tmp_page, &pages, lru)
148                 free_buffer_page(sys_heap, buffer, page);
149         return -ENOMEM;
150 }
151
152 static void ion_system_heap_free(struct ion_buffer *buffer)
153 {
154         struct ion_system_heap *sys_heap = container_of(buffer->heap,
155                                                         struct ion_system_heap,
156                                                         heap);
157         struct sg_table *table = buffer->sg_table;
158         struct scatterlist *sg;
159         int i;
160
161         /* zero the buffer before goto page pool */
162         if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
163                 ion_heap_buffer_zero(buffer);
164
165         for_each_sg(table->sgl, sg, table->nents, i)
166                 free_buffer_page(sys_heap, buffer, sg_page(sg));
167         sg_free_table(table);
168         kfree(table);
169 }
170
171 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
172                                   int nr_to_scan)
173 {
174         struct ion_page_pool *pool;
175         struct ion_system_heap *sys_heap;
176         int nr_total = 0;
177         int i, nr_freed;
178         int only_scan = 0;
179
180         sys_heap = container_of(heap, struct ion_system_heap, heap);
181
182         if (!nr_to_scan)
183                 only_scan = 1;
184
185         for (i = 0; i < NUM_ORDERS; i++) {
186                 pool = sys_heap->pools[i];
187
188                 if (only_scan) {
189                         nr_total += ion_page_pool_shrink(pool,
190                                                          gfp_mask,
191                                                          nr_to_scan);
192
193                 } else {
194                         nr_freed = ion_page_pool_shrink(pool,
195                                                         gfp_mask,
196                                                         nr_to_scan);
197                         nr_to_scan -= nr_freed;
198                         nr_total += nr_freed;
199                         if (nr_to_scan <= 0)
200                                 break;
201                 }
202         }
203         return nr_total;
204 }
205
206 static struct ion_heap_ops system_heap_ops = {
207         .allocate = ion_system_heap_allocate,
208         .free = ion_system_heap_free,
209         .map_kernel = ion_heap_map_kernel,
210         .unmap_kernel = ion_heap_unmap_kernel,
211         .map_user = ion_heap_map_user,
212         .shrink = ion_system_heap_shrink,
213 };
214
215 static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
216                                       void *unused)
217 {
218         struct ion_system_heap *sys_heap = container_of(heap,
219                                                         struct ion_system_heap,
220                                                         heap);
221         int i;
222         struct ion_page_pool *pool;
223
224         for (i = 0; i < NUM_ORDERS; i++) {
225                 pool = sys_heap->pools[i];
226
227                 seq_printf(s, "%d order %u highmem pages %lu total\n",
228                            pool->high_count, pool->order,
229                            (PAGE_SIZE << pool->order) * pool->high_count);
230                 seq_printf(s, "%d order %u lowmem pages %lu total\n",
231                            pool->low_count, pool->order,
232                            (PAGE_SIZE << pool->order) * pool->low_count);
233         }
234
235         return 0;
236 }
237
238 static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
239 {
240         int i;
241
242         for (i = 0; i < NUM_ORDERS; i++)
243                 if (pools[i])
244                         ion_page_pool_destroy(pools[i]);
245 }
246
247 static int ion_system_heap_create_pools(struct ion_page_pool **pools)
248 {
249         int i;
250
251         for (i = 0; i < NUM_ORDERS; i++) {
252                 struct ion_page_pool *pool;
253                 gfp_t gfp_flags = low_order_gfp_flags;
254
255                 if (orders[i] > 4)
256                         gfp_flags = high_order_gfp_flags;
257
258                 pool = ion_page_pool_create(gfp_flags, orders[i]);
259                 if (!pool)
260                         goto err_create_pool;
261                 pools[i] = pool;
262         }
263         return 0;
264
265 err_create_pool:
266         ion_system_heap_destroy_pools(pools);
267         return -ENOMEM;
268 }
269
270 static struct ion_heap *__ion_system_heap_create(void)
271 {
272         struct ion_system_heap *heap;
273
274         heap = kzalloc(sizeof(*heap), GFP_KERNEL);
275         if (!heap)
276                 return ERR_PTR(-ENOMEM);
277         heap->heap.ops = &system_heap_ops;
278         heap->heap.type = ION_HEAP_TYPE_SYSTEM;
279         heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
280
281         if (ion_system_heap_create_pools(heap->pools))
282                 goto free_heap;
283
284         heap->heap.debug_show = ion_system_heap_debug_show;
285         return &heap->heap;
286
287 free_heap:
288         kfree(heap);
289         return ERR_PTR(-ENOMEM);
290 }
291
292 static int ion_system_heap_create(void)
293 {
294         struct ion_heap *heap;
295
296         heap = __ion_system_heap_create();
297         if (IS_ERR(heap))
298                 return PTR_ERR(heap);
299         heap->name = "ion_system_heap";
300
301         ion_device_add_heap(heap);
302         return 0;
303 }
304 device_initcall(ion_system_heap_create);
305
306 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
307                                            struct ion_buffer *buffer,
308                                            unsigned long len,
309                                            unsigned long flags)
310 {
311         int order = get_order(len);
312         struct page *page;
313         struct sg_table *table;
314         unsigned long i;
315         int ret;
316
317         page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
318         if (!page)
319                 return -ENOMEM;
320
321         split_page(page, order);
322
323         len = PAGE_ALIGN(len);
324         for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
325                 __free_page(page + i);
326
327         table = kmalloc(sizeof(*table), GFP_KERNEL);
328         if (!table) {
329                 ret = -ENOMEM;
330                 goto free_pages;
331         }
332
333         ret = sg_alloc_table(table, 1, GFP_KERNEL);
334         if (ret)
335                 goto free_table;
336
337         sg_set_page(table->sgl, page, len, 0);
338
339         buffer->sg_table = table;
340
341         return 0;
342
343 free_table:
344         kfree(table);
345 free_pages:
346         for (i = 0; i < len >> PAGE_SHIFT; i++)
347                 __free_page(page + i);
348
349         return ret;
350 }
351
352 static void ion_system_contig_heap_free(struct ion_buffer *buffer)
353 {
354         struct sg_table *table = buffer->sg_table;
355         struct page *page = sg_page(table->sgl);
356         unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
357         unsigned long i;
358
359         for (i = 0; i < pages; i++)
360                 __free_page(page + i);
361         sg_free_table(table);
362         kfree(table);
363 }
364
365 static struct ion_heap_ops kmalloc_ops = {
366         .allocate = ion_system_contig_heap_allocate,
367         .free = ion_system_contig_heap_free,
368         .map_kernel = ion_heap_map_kernel,
369         .unmap_kernel = ion_heap_unmap_kernel,
370         .map_user = ion_heap_map_user,
371 };
372
373 static struct ion_heap *__ion_system_contig_heap_create(void)
374 {
375         struct ion_heap *heap;
376
377         heap = kzalloc(sizeof(*heap), GFP_KERNEL);
378         if (!heap)
379                 return ERR_PTR(-ENOMEM);
380         heap->ops = &kmalloc_ops;
381         heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
382         heap->name = "ion_system_contig_heap";
383         return heap;
384 }
385
386 static int ion_system_contig_heap_create(void)
387 {
388         struct ion_heap *heap;
389
390         heap = __ion_system_contig_heap_create();
391         if (IS_ERR(heap))
392                 return PTR_ERR(heap);
393
394         ion_device_add_heap(heap);
395         return 0;
396 }
397 device_initcall(ion_system_contig_heap_create);
398