1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/staging/android/ion/ion_system_heap.c
5 * Copyright (C) 2011 Google, Inc.
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/highmem.h>
13 #include <linux/scatterlist.h>
14 #include <linux/seq_file.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
19 #define NUM_ORDERS ARRAY_SIZE(orders)
21 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
22 __GFP_NORETRY) & ~__GFP_RECLAIM;
23 static gfp_t low_order_gfp_flags = GFP_HIGHUSER | __GFP_ZERO;
24 static const unsigned int orders[] = {8, 4, 0};
26 static int order_to_index(unsigned int order)
30 for (i = 0; i < NUM_ORDERS; i++)
31 if (order == orders[i])
37 static inline unsigned int order_to_size(int order)
39 return PAGE_SIZE << order;
42 struct ion_system_heap {
44 struct ion_page_pool *pools[NUM_ORDERS];
47 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
48 struct ion_buffer *buffer,
51 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
53 return ion_page_pool_alloc(pool);
56 static void free_buffer_page(struct ion_system_heap *heap,
57 struct ion_buffer *buffer, struct page *page)
59 struct ion_page_pool *pool;
60 unsigned int order = compound_order(page);
63 if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
64 __free_pages(page, order);
68 pool = heap->pools[order_to_index(order)];
70 ion_page_pool_free(pool, page);
73 static struct page *alloc_largest_available(struct ion_system_heap *heap,
74 struct ion_buffer *buffer,
76 unsigned int max_order)
81 for (i = 0; i < NUM_ORDERS; i++) {
82 if (size < order_to_size(orders[i]))
84 if (max_order < orders[i])
87 page = alloc_buffer_page(heap, buffer, orders[i]);
97 static int ion_system_heap_allocate(struct ion_heap *heap,
98 struct ion_buffer *buffer,
102 struct ion_system_heap *sys_heap = container_of(heap,
103 struct ion_system_heap,
105 struct sg_table *table;
106 struct scatterlist *sg;
107 struct list_head pages;
108 struct page *page, *tmp_page;
110 unsigned long size_remaining = PAGE_ALIGN(size);
111 unsigned int max_order = orders[0];
113 if (size / PAGE_SIZE > totalram_pages / 2)
116 INIT_LIST_HEAD(&pages);
117 while (size_remaining > 0) {
118 page = alloc_largest_available(sys_heap, buffer, size_remaining,
122 list_add_tail(&page->lru, &pages);
123 size_remaining -= PAGE_SIZE << compound_order(page);
124 max_order = compound_order(page);
127 table = kmalloc(sizeof(*table), GFP_KERNEL);
131 if (sg_alloc_table(table, i, GFP_KERNEL))
135 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
136 sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
138 list_del(&page->lru);
141 buffer->sg_table = table;
147 list_for_each_entry_safe(page, tmp_page, &pages, lru)
148 free_buffer_page(sys_heap, buffer, page);
152 static void ion_system_heap_free(struct ion_buffer *buffer)
154 struct ion_system_heap *sys_heap = container_of(buffer->heap,
155 struct ion_system_heap,
157 struct sg_table *table = buffer->sg_table;
158 struct scatterlist *sg;
161 /* zero the buffer before goto page pool */
162 if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
163 ion_heap_buffer_zero(buffer);
165 for_each_sg(table->sgl, sg, table->nents, i)
166 free_buffer_page(sys_heap, buffer, sg_page(sg));
167 sg_free_table(table);
171 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
174 struct ion_page_pool *pool;
175 struct ion_system_heap *sys_heap;
180 sys_heap = container_of(heap, struct ion_system_heap, heap);
185 for (i = 0; i < NUM_ORDERS; i++) {
186 pool = sys_heap->pools[i];
189 nr_total += ion_page_pool_shrink(pool,
194 nr_freed = ion_page_pool_shrink(pool,
197 nr_to_scan -= nr_freed;
198 nr_total += nr_freed;
206 static struct ion_heap_ops system_heap_ops = {
207 .allocate = ion_system_heap_allocate,
208 .free = ion_system_heap_free,
209 .map_kernel = ion_heap_map_kernel,
210 .unmap_kernel = ion_heap_unmap_kernel,
211 .map_user = ion_heap_map_user,
212 .shrink = ion_system_heap_shrink,
215 static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
218 struct ion_system_heap *sys_heap = container_of(heap,
219 struct ion_system_heap,
222 struct ion_page_pool *pool;
224 for (i = 0; i < NUM_ORDERS; i++) {
225 pool = sys_heap->pools[i];
227 seq_printf(s, "%d order %u highmem pages %lu total\n",
228 pool->high_count, pool->order,
229 (PAGE_SIZE << pool->order) * pool->high_count);
230 seq_printf(s, "%d order %u lowmem pages %lu total\n",
231 pool->low_count, pool->order,
232 (PAGE_SIZE << pool->order) * pool->low_count);
238 static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
242 for (i = 0; i < NUM_ORDERS; i++)
244 ion_page_pool_destroy(pools[i]);
247 static int ion_system_heap_create_pools(struct ion_page_pool **pools)
251 for (i = 0; i < NUM_ORDERS; i++) {
252 struct ion_page_pool *pool;
253 gfp_t gfp_flags = low_order_gfp_flags;
256 gfp_flags = high_order_gfp_flags;
258 pool = ion_page_pool_create(gfp_flags, orders[i]);
260 goto err_create_pool;
266 ion_system_heap_destroy_pools(pools);
270 static struct ion_heap *__ion_system_heap_create(void)
272 struct ion_system_heap *heap;
274 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
276 return ERR_PTR(-ENOMEM);
277 heap->heap.ops = &system_heap_ops;
278 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
279 heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
281 if (ion_system_heap_create_pools(heap->pools))
284 heap->heap.debug_show = ion_system_heap_debug_show;
289 return ERR_PTR(-ENOMEM);
292 static int ion_system_heap_create(void)
294 struct ion_heap *heap;
296 heap = __ion_system_heap_create();
298 return PTR_ERR(heap);
299 heap->name = "ion_system_heap";
301 ion_device_add_heap(heap);
304 device_initcall(ion_system_heap_create);
306 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
307 struct ion_buffer *buffer,
311 int order = get_order(len);
313 struct sg_table *table;
317 page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
321 split_page(page, order);
323 len = PAGE_ALIGN(len);
324 for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
325 __free_page(page + i);
327 table = kmalloc(sizeof(*table), GFP_KERNEL);
333 ret = sg_alloc_table(table, 1, GFP_KERNEL);
337 sg_set_page(table->sgl, page, len, 0);
339 buffer->sg_table = table;
346 for (i = 0; i < len >> PAGE_SHIFT; i++)
347 __free_page(page + i);
352 static void ion_system_contig_heap_free(struct ion_buffer *buffer)
354 struct sg_table *table = buffer->sg_table;
355 struct page *page = sg_page(table->sgl);
356 unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
359 for (i = 0; i < pages; i++)
360 __free_page(page + i);
361 sg_free_table(table);
365 static struct ion_heap_ops kmalloc_ops = {
366 .allocate = ion_system_contig_heap_allocate,
367 .free = ion_system_contig_heap_free,
368 .map_kernel = ion_heap_map_kernel,
369 .unmap_kernel = ion_heap_unmap_kernel,
370 .map_user = ion_heap_map_user,
373 static struct ion_heap *__ion_system_contig_heap_create(void)
375 struct ion_heap *heap;
377 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
379 return ERR_PTR(-ENOMEM);
380 heap->ops = &kmalloc_ops;
381 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
382 heap->name = "ion_system_contig_heap";
386 static int ion_system_contig_heap_create(void)
388 struct ion_heap *heap;
390 heap = __ion_system_contig_heap_create();
392 return PTR_ERR(heap);
394 ion_device_add_heap(heap);
397 device_initcall(ion_system_contig_heap_create);