GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / gpu / drm / etnaviv / etnaviv_mmu.c
1 /*
2  * Copyright (C) 2015 Etnaviv Project
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include "common.xml.h"
18 #include "etnaviv_cmdbuf.h"
19 #include "etnaviv_drv.h"
20 #include "etnaviv_gem.h"
21 #include "etnaviv_gpu.h"
22 #include "etnaviv_iommu.h"
23 #include "etnaviv_mmu.h"
24
25 static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
26                 unsigned long iova, int flags, void *arg)
27 {
28         DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
29         return 0;
30 }
31
32 int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
33                 struct sg_table *sgt, unsigned len, int prot)
34 {
35         struct iommu_domain *domain = iommu->domain;
36         struct scatterlist *sg;
37         unsigned int da = iova;
38         unsigned int i, j;
39         int ret;
40
41         if (!domain || !sgt)
42                 return -EINVAL;
43
44         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
45                 u32 pa = sg_dma_address(sg) - sg->offset;
46                 size_t bytes = sg_dma_len(sg) + sg->offset;
47
48                 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
49
50                 ret = iommu_map(domain, da, pa, bytes, prot);
51                 if (ret)
52                         goto fail;
53
54                 da += bytes;
55         }
56
57         return 0;
58
59 fail:
60         da = iova;
61
62         for_each_sg(sgt->sgl, sg, i, j) {
63                 size_t bytes = sg_dma_len(sg) + sg->offset;
64
65                 iommu_unmap(domain, da, bytes);
66                 da += bytes;
67         }
68         return ret;
69 }
70
71 int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
72                 struct sg_table *sgt, unsigned len)
73 {
74         struct iommu_domain *domain = iommu->domain;
75         struct scatterlist *sg;
76         unsigned int da = iova;
77         int i;
78
79         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
80                 size_t bytes = sg_dma_len(sg) + sg->offset;
81                 size_t unmapped;
82
83                 unmapped = iommu_unmap(domain, da, bytes);
84                 if (unmapped < bytes)
85                         return unmapped;
86
87                 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
88
89                 BUG_ON(!PAGE_ALIGNED(bytes));
90
91                 da += bytes;
92         }
93
94         return 0;
95 }
96
97 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
98         struct etnaviv_vram_mapping *mapping)
99 {
100         struct etnaviv_gem_object *etnaviv_obj = mapping->object;
101
102         etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
103                             etnaviv_obj->sgt, etnaviv_obj->base.size);
104         drm_mm_remove_node(&mapping->vram_node);
105 }
106
107 static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
108                                    struct drm_mm_node *node, size_t size)
109 {
110         struct etnaviv_vram_mapping *free = NULL;
111         enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
112         int ret;
113
114         lockdep_assert_held(&mmu->lock);
115
116         while (1) {
117                 struct etnaviv_vram_mapping *m, *n;
118                 struct drm_mm_scan scan;
119                 struct list_head list;
120                 bool found;
121
122                 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
123                                                   size, 0, 0,
124                                                   mmu->last_iova, U64_MAX,
125                                                   mode);
126                 if (ret != -ENOSPC)
127                         break;
128
129                 /*
130                  * If we did not search from the start of the MMU region,
131                  * try again in case there are free slots.
132                  */
133                 if (mmu->last_iova) {
134                         mmu->last_iova = 0;
135                         mmu->flush_seq++;
136                         continue;
137                 }
138
139                 /* Try to retire some entries */
140                 drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
141
142                 found = 0;
143                 INIT_LIST_HEAD(&list);
144                 list_for_each_entry(free, &mmu->mappings, mmu_node) {
145                         /* If this vram node has not been used, skip this. */
146                         if (!free->vram_node.mm)
147                                 continue;
148
149                         /*
150                          * If the iova is pinned, then it's in-use,
151                          * so we must keep its mapping.
152                          */
153                         if (free->use)
154                                 continue;
155
156                         list_add(&free->scan_node, &list);
157                         if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
158                                 found = true;
159                                 break;
160                         }
161                 }
162
163                 if (!found) {
164                         /* Nothing found, clean up and fail */
165                         list_for_each_entry_safe(m, n, &list, scan_node)
166                                 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
167                         break;
168                 }
169
170                 /*
171                  * drm_mm does not allow any other operations while
172                  * scanning, so we have to remove all blocks first.
173                  * If drm_mm_scan_remove_block() returns false, we
174                  * can leave the block pinned.
175                  */
176                 list_for_each_entry_safe(m, n, &list, scan_node)
177                         if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
178                                 list_del_init(&m->scan_node);
179
180                 /*
181                  * Unmap the blocks which need to be reaped from the MMU.
182                  * Clear the mmu pointer to prevent the mapping_get finding
183                  * this mapping.
184                  */
185                 list_for_each_entry_safe(m, n, &list, scan_node) {
186                         etnaviv_iommu_remove_mapping(mmu, m);
187                         m->mmu = NULL;
188                         list_del_init(&m->mmu_node);
189                         list_del_init(&m->scan_node);
190                 }
191
192                 mode = DRM_MM_INSERT_EVICT;
193
194                 /*
195                  * We removed enough mappings so that the new allocation will
196                  * succeed, retry the allocation one more time.
197                  */
198         }
199
200         return ret;
201 }
202
203 int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
204         struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
205         struct etnaviv_vram_mapping *mapping)
206 {
207         struct sg_table *sgt = etnaviv_obj->sgt;
208         struct drm_mm_node *node;
209         int ret;
210
211         lockdep_assert_held(&etnaviv_obj->lock);
212
213         mutex_lock(&mmu->lock);
214
215         /* v1 MMU can optimize single entry (contiguous) scatterlists */
216         if (mmu->version == ETNAVIV_IOMMU_V1 &&
217             sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
218                 u32 iova;
219
220                 iova = sg_dma_address(sgt->sgl) - memory_base;
221                 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
222                         mapping->iova = iova;
223                         list_add_tail(&mapping->mmu_node, &mmu->mappings);
224                         mutex_unlock(&mmu->lock);
225                         return 0;
226                 }
227         }
228
229         node = &mapping->vram_node;
230
231         ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
232         if (ret < 0) {
233                 mutex_unlock(&mmu->lock);
234                 return ret;
235         }
236
237         mmu->last_iova = node->start + etnaviv_obj->base.size;
238         mapping->iova = node->start;
239         ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
240                                 IOMMU_READ | IOMMU_WRITE);
241
242         if (ret < 0) {
243                 drm_mm_remove_node(node);
244                 mutex_unlock(&mmu->lock);
245                 return ret;
246         }
247
248         list_add_tail(&mapping->mmu_node, &mmu->mappings);
249         mmu->flush_seq++;
250         mutex_unlock(&mmu->lock);
251
252         return ret;
253 }
254
255 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
256         struct etnaviv_vram_mapping *mapping)
257 {
258         WARN_ON(mapping->use);
259
260         mutex_lock(&mmu->lock);
261
262         /* If the vram node is on the mm, unmap and remove the node */
263         if (mapping->vram_node.mm == &mmu->mm)
264                 etnaviv_iommu_remove_mapping(mmu, mapping);
265
266         list_del(&mapping->mmu_node);
267         mmu->flush_seq++;
268         mutex_unlock(&mmu->lock);
269 }
270
271 void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
272 {
273         drm_mm_takedown(&mmu->mm);
274         iommu_domain_free(mmu->domain);
275         kfree(mmu);
276 }
277
278 struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
279 {
280         enum etnaviv_iommu_version version;
281         struct etnaviv_iommu *mmu;
282
283         mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
284         if (!mmu)
285                 return ERR_PTR(-ENOMEM);
286
287         if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
288                 mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
289                 version = ETNAVIV_IOMMU_V1;
290         } else {
291                 mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
292                 version = ETNAVIV_IOMMU_V2;
293         }
294
295         if (!mmu->domain) {
296                 dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
297                 kfree(mmu);
298                 return ERR_PTR(-ENOMEM);
299         }
300
301         mmu->gpu = gpu;
302         mmu->version = version;
303         mutex_init(&mmu->lock);
304         INIT_LIST_HEAD(&mmu->mappings);
305
306         drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start,
307                     mmu->domain->geometry.aperture_end -
308                     mmu->domain->geometry.aperture_start + 1);
309
310         iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
311
312         return mmu;
313 }
314
315 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
316 {
317         if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
318                 etnaviv_iommuv1_restore(gpu);
319         else
320                 etnaviv_iommuv2_restore(gpu);
321 }
322
323 int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
324                                   struct drm_mm_node *vram_node, size_t size,
325                                   u32 *iova)
326 {
327         struct etnaviv_iommu *mmu = gpu->mmu;
328
329         if (mmu->version == ETNAVIV_IOMMU_V1) {
330                 *iova = paddr - gpu->memory_base;
331                 return 0;
332         } else {
333                 int ret;
334
335                 mutex_lock(&mmu->lock);
336                 ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
337                 if (ret < 0) {
338                         mutex_unlock(&mmu->lock);
339                         return ret;
340                 }
341                 ret = iommu_map(mmu->domain, vram_node->start, paddr, size,
342                                 IOMMU_READ);
343                 if (ret < 0) {
344                         drm_mm_remove_node(vram_node);
345                         mutex_unlock(&mmu->lock);
346                         return ret;
347                 }
348                 mmu->last_iova = vram_node->start + size;
349                 mmu->flush_seq++;
350                 mutex_unlock(&mmu->lock);
351
352                 *iova = (u32)vram_node->start;
353                 return 0;
354         }
355 }
356
357 void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
358                                    struct drm_mm_node *vram_node, size_t size,
359                                    u32 iova)
360 {
361         struct etnaviv_iommu *mmu = gpu->mmu;
362
363         if (mmu->version == ETNAVIV_IOMMU_V2) {
364                 mutex_lock(&mmu->lock);
365                 iommu_unmap(mmu->domain,iova, size);
366                 drm_mm_remove_node(vram_node);
367                 mutex_unlock(&mmu->lock);
368         }
369 }
370 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
371 {
372         struct etnaviv_iommu_ops *ops;
373
374         ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
375
376         return ops->dump_size(iommu->domain);
377 }
378
379 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
380 {
381         struct etnaviv_iommu_ops *ops;
382
383         ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
384
385         ops->dump(iommu->domain, buf);
386 }