GNU Linux-libre 4.9-gnu1
[releases.git] / drivers / staging / android / ion / ion_system_heap.c
1 /*
2  * drivers/staging/android/ion/ion_system_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <asm/page.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
21 #include <linux/mm.h>
22 #include <linux/scatterlist.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include "ion.h"
27 #include "ion_priv.h"
28
29 #define NUM_ORDERS ARRAY_SIZE(orders)
30
31 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
32                                      __GFP_NORETRY) & ~__GFP_RECLAIM;
33 static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO);
34 static const unsigned int orders[] = {8, 4, 0};
35
36 static int order_to_index(unsigned int order)
37 {
38         int i;
39
40         for (i = 0; i < NUM_ORDERS; i++)
41                 if (order == orders[i])
42                         return i;
43         BUG();
44         return -1;
45 }
46
47 static inline unsigned int order_to_size(int order)
48 {
49         return PAGE_SIZE << order;
50 }
51
52 struct ion_system_heap {
53         struct ion_heap heap;
54         struct ion_page_pool *uncached_pools[NUM_ORDERS];
55         struct ion_page_pool *cached_pools[NUM_ORDERS];
56 };
57
58 /**
59  * The page from page-pool are all zeroed before. We need do cache
60  * clean for cached buffer. The uncached buffer are always non-cached
61  * since it's allocated. So no need for non-cached pages.
62  */
63 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
64                                       struct ion_buffer *buffer,
65                                       unsigned long order)
66 {
67         bool cached = ion_buffer_cached(buffer);
68         struct ion_page_pool *pool;
69         struct page *page;
70
71         if (!cached)
72                 pool = heap->uncached_pools[order_to_index(order)];
73         else
74                 pool = heap->cached_pools[order_to_index(order)];
75
76         page = ion_page_pool_alloc(pool);
77
78         if (cached)
79                 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
80                                           DMA_BIDIRECTIONAL);
81         return page;
82 }
83
84 static void free_buffer_page(struct ion_system_heap *heap,
85                              struct ion_buffer *buffer, struct page *page)
86 {
87         struct ion_page_pool *pool;
88         unsigned int order = compound_order(page);
89         bool cached = ion_buffer_cached(buffer);
90
91         /* go to system */
92         if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
93                 __free_pages(page, order);
94                 return;
95         }
96
97         if (!cached)
98                 pool = heap->uncached_pools[order_to_index(order)];
99         else
100                 pool = heap->cached_pools[order_to_index(order)];
101
102         ion_page_pool_free(pool, page);
103 }
104
105
106 static struct page *alloc_largest_available(struct ion_system_heap *heap,
107                                             struct ion_buffer *buffer,
108                                             unsigned long size,
109                                             unsigned int max_order)
110 {
111         struct page *page;
112         int i;
113
114         for (i = 0; i < NUM_ORDERS; i++) {
115                 if (size < order_to_size(orders[i]))
116                         continue;
117                 if (max_order < orders[i])
118                         continue;
119
120                 page = alloc_buffer_page(heap, buffer, orders[i]);
121                 if (!page)
122                         continue;
123
124                 return page;
125         }
126
127         return NULL;
128 }
129
130 static int ion_system_heap_allocate(struct ion_heap *heap,
131                                     struct ion_buffer *buffer,
132                                     unsigned long size, unsigned long align,
133                                     unsigned long flags)
134 {
135         struct ion_system_heap *sys_heap = container_of(heap,
136                                                         struct ion_system_heap,
137                                                         heap);
138         struct sg_table *table;
139         struct scatterlist *sg;
140         struct list_head pages;
141         struct page *page, *tmp_page;
142         int i = 0;
143         unsigned long size_remaining = PAGE_ALIGN(size);
144         unsigned int max_order = orders[0];
145
146         if (align > PAGE_SIZE)
147                 return -EINVAL;
148
149         if (size / PAGE_SIZE > totalram_pages / 2)
150                 return -ENOMEM;
151
152         INIT_LIST_HEAD(&pages);
153         while (size_remaining > 0) {
154                 page = alloc_largest_available(sys_heap, buffer, size_remaining,
155                                                max_order);
156                 if (!page)
157                         goto free_pages;
158                 list_add_tail(&page->lru, &pages);
159                 size_remaining -= PAGE_SIZE << compound_order(page);
160                 max_order = compound_order(page);
161                 i++;
162         }
163         table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
164         if (!table)
165                 goto free_pages;
166
167         if (sg_alloc_table(table, i, GFP_KERNEL))
168                 goto free_table;
169
170         sg = table->sgl;
171         list_for_each_entry_safe(page, tmp_page, &pages, lru) {
172                 sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
173                 sg = sg_next(sg);
174                 list_del(&page->lru);
175         }
176
177         buffer->sg_table = table;
178         return 0;
179
180 free_table:
181         kfree(table);
182 free_pages:
183         list_for_each_entry_safe(page, tmp_page, &pages, lru)
184                 free_buffer_page(sys_heap, buffer, page);
185         return -ENOMEM;
186 }
187
188 static void ion_system_heap_free(struct ion_buffer *buffer)
189 {
190         struct ion_system_heap *sys_heap = container_of(buffer->heap,
191                                                         struct ion_system_heap,
192                                                         heap);
193         struct sg_table *table = buffer->sg_table;
194         struct scatterlist *sg;
195         int i;
196
197         /* zero the buffer before goto page pool */
198         if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
199                 ion_heap_buffer_zero(buffer);
200
201         for_each_sg(table->sgl, sg, table->nents, i)
202                 free_buffer_page(sys_heap, buffer, sg_page(sg));
203         sg_free_table(table);
204         kfree(table);
205 }
206
207 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
208                                   int nr_to_scan)
209 {
210         struct ion_page_pool *uncached_pool;
211         struct ion_page_pool *cached_pool;
212         struct ion_system_heap *sys_heap;
213         int nr_total = 0;
214         int i, nr_freed;
215         int only_scan = 0;
216
217         sys_heap = container_of(heap, struct ion_system_heap, heap);
218
219         if (!nr_to_scan)
220                 only_scan = 1;
221
222         for (i = 0; i < NUM_ORDERS; i++) {
223                 uncached_pool = sys_heap->uncached_pools[i];
224                 cached_pool = sys_heap->cached_pools[i];
225
226                 if (only_scan) {
227                         nr_total += ion_page_pool_shrink(uncached_pool,
228                                                          gfp_mask,
229                                                          nr_to_scan);
230
231                         nr_total += ion_page_pool_shrink(cached_pool,
232                                                          gfp_mask,
233                                                          nr_to_scan);
234                 } else {
235                         nr_freed = ion_page_pool_shrink(uncached_pool,
236                                                         gfp_mask,
237                                                         nr_to_scan);
238                         nr_to_scan -= nr_freed;
239                         nr_total += nr_freed;
240                         if (nr_to_scan <= 0)
241                                 break;
242                         nr_freed = ion_page_pool_shrink(cached_pool,
243                                                         gfp_mask,
244                                                         nr_to_scan);
245                         nr_to_scan -= nr_freed;
246                         nr_total += nr_freed;
247                         if (nr_to_scan <= 0)
248                                 break;
249                 }
250         }
251         return nr_total;
252 }
253
254 static struct ion_heap_ops system_heap_ops = {
255         .allocate = ion_system_heap_allocate,
256         .free = ion_system_heap_free,
257         .map_kernel = ion_heap_map_kernel,
258         .unmap_kernel = ion_heap_unmap_kernel,
259         .map_user = ion_heap_map_user,
260         .shrink = ion_system_heap_shrink,
261 };
262
263 static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
264                                       void *unused)
265 {
266
267         struct ion_system_heap *sys_heap = container_of(heap,
268                                                         struct ion_system_heap,
269                                                         heap);
270         int i;
271         struct ion_page_pool *pool;
272
273         for (i = 0; i < NUM_ORDERS; i++) {
274                 pool = sys_heap->uncached_pools[i];
275
276                 seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
277                            pool->high_count, pool->order,
278                            (PAGE_SIZE << pool->order) * pool->high_count);
279                 seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
280                            pool->low_count, pool->order,
281                            (PAGE_SIZE << pool->order) * pool->low_count);
282         }
283
284         for (i = 0; i < NUM_ORDERS; i++) {
285                 pool = sys_heap->cached_pools[i];
286
287                 seq_printf(s, "%d order %u highmem pages cached %lu total\n",
288                            pool->high_count, pool->order,
289                            (PAGE_SIZE << pool->order) * pool->high_count);
290                 seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
291                            pool->low_count, pool->order,
292                            (PAGE_SIZE << pool->order) * pool->low_count);
293         }
294         return 0;
295 }
296
297 static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
298 {
299         int i;
300
301         for (i = 0; i < NUM_ORDERS; i++)
302                 if (pools[i])
303                         ion_page_pool_destroy(pools[i]);
304 }
305
306 static int ion_system_heap_create_pools(struct ion_page_pool **pools,
307                                         bool cached)
308 {
309         int i;
310         gfp_t gfp_flags = low_order_gfp_flags;
311
312         for (i = 0; i < NUM_ORDERS; i++) {
313                 struct ion_page_pool *pool;
314
315                 if (orders[i] > 4)
316                         gfp_flags = high_order_gfp_flags;
317
318                 pool = ion_page_pool_create(gfp_flags, orders[i], cached);
319                 if (!pool)
320                         goto err_create_pool;
321                 pools[i] = pool;
322         }
323         return 0;
324
325 err_create_pool:
326         ion_system_heap_destroy_pools(pools);
327         return -ENOMEM;
328 }
329
330 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
331 {
332         struct ion_system_heap *heap;
333
334         heap = kzalloc(sizeof(*heap), GFP_KERNEL);
335         if (!heap)
336                 return ERR_PTR(-ENOMEM);
337         heap->heap.ops = &system_heap_ops;
338         heap->heap.type = ION_HEAP_TYPE_SYSTEM;
339         heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
340
341         if (ion_system_heap_create_pools(heap->uncached_pools, false))
342                 goto free_heap;
343
344         if (ion_system_heap_create_pools(heap->cached_pools, true))
345                 goto destroy_uncached_pools;
346
347         heap->heap.debug_show = ion_system_heap_debug_show;
348         return &heap->heap;
349
350 destroy_uncached_pools:
351         ion_system_heap_destroy_pools(heap->uncached_pools);
352
353 free_heap:
354         kfree(heap);
355         return ERR_PTR(-ENOMEM);
356 }
357
358 void ion_system_heap_destroy(struct ion_heap *heap)
359 {
360         struct ion_system_heap *sys_heap = container_of(heap,
361                                                         struct ion_system_heap,
362                                                         heap);
363         int i;
364
365         for (i = 0; i < NUM_ORDERS; i++) {
366                 ion_page_pool_destroy(sys_heap->uncached_pools[i]);
367                 ion_page_pool_destroy(sys_heap->cached_pools[i]);
368         }
369         kfree(sys_heap);
370 }
371
372 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
373                                            struct ion_buffer *buffer,
374                                            unsigned long len,
375                                            unsigned long align,
376                                            unsigned long flags)
377 {
378         int order = get_order(len);
379         struct page *page;
380         struct sg_table *table;
381         unsigned long i;
382         int ret;
383
384         if (align > (PAGE_SIZE << order))
385                 return -EINVAL;
386
387         page = alloc_pages(low_order_gfp_flags, order);
388         if (!page)
389                 return -ENOMEM;
390
391         split_page(page, order);
392
393         len = PAGE_ALIGN(len);
394         for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
395                 __free_page(page + i);
396
397         table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
398         if (!table) {
399                 ret = -ENOMEM;
400                 goto free_pages;
401         }
402
403         ret = sg_alloc_table(table, 1, GFP_KERNEL);
404         if (ret)
405                 goto free_table;
406
407         sg_set_page(table->sgl, page, len, 0);
408
409         buffer->sg_table = table;
410
411         ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
412
413         return 0;
414
415 free_table:
416         kfree(table);
417 free_pages:
418         for (i = 0; i < len >> PAGE_SHIFT; i++)
419                 __free_page(page + i);
420
421         return ret;
422 }
423
424 static void ion_system_contig_heap_free(struct ion_buffer *buffer)
425 {
426         struct sg_table *table = buffer->sg_table;
427         struct page *page = sg_page(table->sgl);
428         unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
429         unsigned long i;
430
431         for (i = 0; i < pages; i++)
432                 __free_page(page + i);
433         sg_free_table(table);
434         kfree(table);
435 }
436
437 static struct ion_heap_ops kmalloc_ops = {
438         .allocate = ion_system_contig_heap_allocate,
439         .free = ion_system_contig_heap_free,
440         .map_kernel = ion_heap_map_kernel,
441         .unmap_kernel = ion_heap_unmap_kernel,
442         .map_user = ion_heap_map_user,
443 };
444
445 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
446 {
447         struct ion_heap *heap;
448
449         heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
450         if (!heap)
451                 return ERR_PTR(-ENOMEM);
452         heap->ops = &kmalloc_ops;
453         heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
454         return heap;
455 }
456
457 void ion_system_contig_heap_destroy(struct ion_heap *heap)
458 {
459         kfree(heap);
460 }