GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / gpu / drm / i915 / selftests / huge_gem_object.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include "huge_gem_object.h"
26
27 static void huge_free_pages(struct drm_i915_gem_object *obj,
28                             struct sg_table *pages)
29 {
30         unsigned long nreal = obj->scratch / PAGE_SIZE;
31         struct scatterlist *sg;
32
33         for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg))
34                 __free_page(sg_page(sg));
35
36         sg_free_table(pages);
37         kfree(pages);
38 }
39
40 static int huge_get_pages(struct drm_i915_gem_object *obj)
41 {
42 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
43         const unsigned long nreal = obj->scratch / PAGE_SIZE;
44         const unsigned long npages = obj->base.size / PAGE_SIZE;
45         struct scatterlist *sg, *src, *end;
46         struct sg_table *pages;
47         unsigned long n;
48
49         pages = kmalloc(sizeof(*pages), GFP);
50         if (!pages)
51                 return -ENOMEM;
52
53         if (sg_alloc_table(pages, npages, GFP)) {
54                 kfree(pages);
55                 return -ENOMEM;
56         }
57
58         sg = pages->sgl;
59         for (n = 0; n < nreal; n++) {
60                 struct page *page;
61
62                 page = alloc_page(GFP | __GFP_HIGHMEM);
63                 if (!page) {
64                         sg_mark_end(sg);
65                         goto err;
66                 }
67
68                 sg_set_page(sg, page, PAGE_SIZE, 0);
69                 sg = __sg_next(sg);
70         }
71         if (nreal < npages) {
72                 for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) {
73                         sg_set_page(sg, sg_page(src), PAGE_SIZE, 0);
74                         src = __sg_next(src);
75                         if (src == end)
76                                 src = pages->sgl;
77                 }
78         }
79
80         if (i915_gem_gtt_prepare_pages(obj, pages))
81                 goto err;
82
83         __i915_gem_object_set_pages(obj, pages, PAGE_SIZE);
84
85         return 0;
86
87 err:
88         huge_free_pages(obj, pages);
89
90         return -ENOMEM;
91 #undef GFP
92 }
93
94 static void huge_put_pages(struct drm_i915_gem_object *obj,
95                            struct sg_table *pages)
96 {
97         i915_gem_gtt_finish_pages(obj, pages);
98         huge_free_pages(obj, pages);
99
100         obj->mm.dirty = false;
101 }
102
103 static const struct drm_i915_gem_object_ops huge_ops = {
104         .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
105                  I915_GEM_OBJECT_IS_SHRINKABLE,
106         .get_pages = huge_get_pages,
107         .put_pages = huge_put_pages,
108 };
109
110 struct drm_i915_gem_object *
111 huge_gem_object(struct drm_i915_private *i915,
112                 phys_addr_t phys_size,
113                 dma_addr_t dma_size)
114 {
115         struct drm_i915_gem_object *obj;
116         unsigned int cache_level;
117
118         GEM_BUG_ON(!phys_size || phys_size > dma_size);
119         GEM_BUG_ON(!IS_ALIGNED(phys_size, PAGE_SIZE));
120         GEM_BUG_ON(!IS_ALIGNED(dma_size, I915_GTT_PAGE_SIZE));
121
122         if (overflows_type(dma_size, obj->base.size))
123                 return ERR_PTR(-E2BIG);
124
125         obj = i915_gem_object_alloc(i915);
126         if (!obj)
127                 return ERR_PTR(-ENOMEM);
128
129         drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
130         i915_gem_object_init(obj, &huge_ops);
131
132         obj->read_domains = I915_GEM_DOMAIN_CPU;
133         obj->write_domain = I915_GEM_DOMAIN_CPU;
134         cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
135         i915_gem_object_set_cache_coherency(obj, cache_level);
136         obj->scratch = phys_size;
137
138         return obj;
139 }