GNU Linux-libre 4.9.309-gnu1
[releases.git] / drivers / gpu / drm / etnaviv / etnaviv_mmu.c
1 /*
2  * Copyright (C) 2015 Etnaviv Project
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include "common.xml.h"
18 #include "etnaviv_drv.h"
19 #include "etnaviv_gem.h"
20 #include "etnaviv_gpu.h"
21 #include "etnaviv_iommu.h"
22 #include "etnaviv_mmu.h"
23
24 static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
25                 unsigned long iova, int flags, void *arg)
26 {
27         DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
28         return 0;
29 }
30
31 int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
32                 struct sg_table *sgt, unsigned len, int prot)
33 {
34         struct iommu_domain *domain = iommu->domain;
35         struct scatterlist *sg;
36         unsigned int da = iova;
37         unsigned int i, j;
38         int ret;
39
40         if (!domain || !sgt)
41                 return -EINVAL;
42
43         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
44                 u32 pa = sg_dma_address(sg) - sg->offset;
45                 size_t bytes = sg_dma_len(sg) + sg->offset;
46
47                 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
48
49                 ret = iommu_map(domain, da, pa, bytes, prot);
50                 if (ret)
51                         goto fail;
52
53                 da += bytes;
54         }
55
56         return 0;
57
58 fail:
59         da = iova;
60
61         for_each_sg(sgt->sgl, sg, i, j) {
62                 size_t bytes = sg_dma_len(sg) + sg->offset;
63
64                 iommu_unmap(domain, da, bytes);
65                 da += bytes;
66         }
67         return ret;
68 }
69
70 int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
71                 struct sg_table *sgt, unsigned len)
72 {
73         struct iommu_domain *domain = iommu->domain;
74         struct scatterlist *sg;
75         unsigned int da = iova;
76         int i;
77
78         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
79                 size_t bytes = sg_dma_len(sg) + sg->offset;
80                 size_t unmapped;
81
82                 unmapped = iommu_unmap(domain, da, bytes);
83                 if (unmapped < bytes)
84                         return unmapped;
85
86                 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
87
88                 BUG_ON(!PAGE_ALIGNED(bytes));
89
90                 da += bytes;
91         }
92
93         return 0;
94 }
95
96 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
97         struct etnaviv_vram_mapping *mapping)
98 {
99         struct etnaviv_gem_object *etnaviv_obj = mapping->object;
100
101         etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
102                             etnaviv_obj->sgt, etnaviv_obj->base.size);
103         drm_mm_remove_node(&mapping->vram_node);
104 }
105
106 static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
107                                    struct drm_mm_node *node, size_t size)
108 {
109         struct etnaviv_vram_mapping *free = NULL;
110         int ret;
111
112         lockdep_assert_held(&mmu->lock);
113
114         while (1) {
115                 struct etnaviv_vram_mapping *m, *n;
116                 struct list_head list;
117                 bool found;
118
119                 /*
120                  * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
121                  * drm_mm into giving out a low IOVA after address space
122                  * rollover. This needs a proper fix.
123                  */
124                 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
125                         size, 0, mmu->last_iova, ~0UL,
126                         mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
127
128                 if (ret != -ENOSPC)
129                         break;
130
131                 /*
132                  * If we did not search from the start of the MMU region,
133                  * try again in case there are free slots.
134                  */
135                 if (mmu->last_iova) {
136                         mmu->last_iova = 0;
137                         mmu->flush_seq++;
138                         continue;
139                 }
140
141                 /* Try to retire some entries */
142                 drm_mm_init_scan(&mmu->mm, size, 0, 0);
143
144                 found = 0;
145                 INIT_LIST_HEAD(&list);
146                 list_for_each_entry(free, &mmu->mappings, mmu_node) {
147                         /* If this vram node has not been used, skip this. */
148                         if (!free->vram_node.mm)
149                                 continue;
150
151                         /*
152                          * If the iova is pinned, then it's in-use,
153                          * so we must keep its mapping.
154                          */
155                         if (free->use)
156                                 continue;
157
158                         list_add(&free->scan_node, &list);
159                         if (drm_mm_scan_add_block(&free->vram_node)) {
160                                 found = true;
161                                 break;
162                         }
163                 }
164
165                 if (!found) {
166                         /* Nothing found, clean up and fail */
167                         list_for_each_entry_safe(m, n, &list, scan_node)
168                                 BUG_ON(drm_mm_scan_remove_block(&m->vram_node));
169                         break;
170                 }
171
172                 /*
173                  * drm_mm does not allow any other operations while
174                  * scanning, so we have to remove all blocks first.
175                  * If drm_mm_scan_remove_block() returns false, we
176                  * can leave the block pinned.
177                  */
178                 list_for_each_entry_safe(m, n, &list, scan_node)
179                         if (!drm_mm_scan_remove_block(&m->vram_node))
180                                 list_del_init(&m->scan_node);
181
182                 /*
183                  * Unmap the blocks which need to be reaped from the MMU.
184                  * Clear the mmu pointer to prevent the mapping_get finding
185                  * this mapping.
186                  */
187                 list_for_each_entry_safe(m, n, &list, scan_node) {
188                         etnaviv_iommu_remove_mapping(mmu, m);
189                         m->mmu = NULL;
190                         list_del_init(&m->mmu_node);
191                         list_del_init(&m->scan_node);
192                 }
193
194                 /*
195                  * We removed enough mappings so that the new allocation will
196                  * succeed.  Ensure that the MMU will be flushed before the
197                  * associated commit requesting this mapping, and retry the
198                  * allocation one more time.
199                  */
200                 mmu->flush_seq++;
201         }
202
203         return ret;
204 }
205
206 int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
207         struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
208         struct etnaviv_vram_mapping *mapping)
209 {
210         struct sg_table *sgt = etnaviv_obj->sgt;
211         struct drm_mm_node *node;
212         int ret;
213
214         lockdep_assert_held(&etnaviv_obj->lock);
215
216         mutex_lock(&mmu->lock);
217
218         /* v1 MMU can optimize single entry (contiguous) scatterlists */
219         if (mmu->version == ETNAVIV_IOMMU_V1 &&
220             sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
221                 u32 iova;
222
223                 iova = sg_dma_address(sgt->sgl) - memory_base;
224                 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
225                         mapping->iova = iova;
226                         list_add_tail(&mapping->mmu_node, &mmu->mappings);
227                         mutex_unlock(&mmu->lock);
228                         return 0;
229                 }
230         }
231
232         node = &mapping->vram_node;
233
234         ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
235         if (ret < 0) {
236                 mutex_unlock(&mmu->lock);
237                 return ret;
238         }
239
240         mmu->last_iova = node->start + etnaviv_obj->base.size;
241         mapping->iova = node->start;
242         ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
243                                 IOMMU_READ | IOMMU_WRITE);
244
245         if (ret < 0) {
246                 drm_mm_remove_node(node);
247                 mutex_unlock(&mmu->lock);
248                 return ret;
249         }
250
251         list_add_tail(&mapping->mmu_node, &mmu->mappings);
252         mutex_unlock(&mmu->lock);
253
254         return ret;
255 }
256
257 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
258         struct etnaviv_vram_mapping *mapping)
259 {
260         WARN_ON(mapping->use);
261
262         mutex_lock(&mmu->lock);
263
264         /* If the vram node is on the mm, unmap and remove the node */
265         if (mapping->vram_node.mm == &mmu->mm)
266                 etnaviv_iommu_remove_mapping(mmu, mapping);
267
268         list_del(&mapping->mmu_node);
269         mutex_unlock(&mmu->lock);
270 }
271
272 void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
273 {
274         drm_mm_takedown(&mmu->mm);
275         iommu_domain_free(mmu->domain);
276         kfree(mmu);
277 }
278
279 struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
280 {
281         enum etnaviv_iommu_version version;
282         struct etnaviv_iommu *mmu;
283
284         mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
285         if (!mmu)
286                 return ERR_PTR(-ENOMEM);
287
288         if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
289                 mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
290                 version = ETNAVIV_IOMMU_V1;
291         } else {
292                 mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
293                 version = ETNAVIV_IOMMU_V2;
294         }
295
296         if (!mmu->domain) {
297                 dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
298                 kfree(mmu);
299                 return ERR_PTR(-ENOMEM);
300         }
301
302         mmu->gpu = gpu;
303         mmu->version = version;
304         mutex_init(&mmu->lock);
305         INIT_LIST_HEAD(&mmu->mappings);
306
307         drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start,
308                     mmu->domain->geometry.aperture_end -
309                     mmu->domain->geometry.aperture_start + 1);
310
311         iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
312
313         return mmu;
314 }
315
316 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
317 {
318         if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
319                 etnaviv_iommuv1_restore(gpu);
320         else
321                 etnaviv_iommuv2_restore(gpu);
322 }
323
324 u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
325                                 struct etnaviv_cmdbuf *buf)
326 {
327         struct etnaviv_iommu *mmu = gpu->mmu;
328
329         if (mmu->version == ETNAVIV_IOMMU_V1) {
330                 return buf->paddr - gpu->memory_base;
331         } else {
332                 int ret;
333
334                 if (buf->vram_node.allocated)
335                         return (u32)buf->vram_node.start;
336
337                 mutex_lock(&mmu->lock);
338                 ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
339                                               buf->size + SZ_64K);
340                 if (ret < 0) {
341                         mutex_unlock(&mmu->lock);
342                         return 0;
343                 }
344                 ret = iommu_map(mmu->domain, buf->vram_node.start, buf->paddr,
345                                 buf->size, IOMMU_READ);
346                 if (ret < 0) {
347                         drm_mm_remove_node(&buf->vram_node);
348                         mutex_unlock(&mmu->lock);
349                         return 0;
350                 }
351                 /*
352                  * At least on GC3000 the FE MMU doesn't properly flush old TLB
353                  * entries. Make sure to space the command buffers out in a way
354                  * that the FE MMU prefetch won't load invalid entries.
355                  */
356                 mmu->last_iova = buf->vram_node.start + buf->size + SZ_64K;
357                 mmu->flush_seq++;
358                 mutex_unlock(&mmu->lock);
359
360                 return (u32)buf->vram_node.start;
361         }
362 }
363
364 void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
365                                  struct etnaviv_cmdbuf *buf)
366 {
367         struct etnaviv_iommu *mmu = gpu->mmu;
368
369         if (mmu->version == ETNAVIV_IOMMU_V2 && buf->vram_node.allocated) {
370                 mutex_lock(&mmu->lock);
371                 iommu_unmap(mmu->domain, buf->vram_node.start, buf->size);
372                 drm_mm_remove_node(&buf->vram_node);
373                 mutex_unlock(&mmu->lock);
374         }
375 }
376 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
377 {
378         struct etnaviv_iommu_ops *ops;
379
380         ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
381
382         return ops->dump_size(iommu->domain);
383 }
384
385 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
386 {
387         struct etnaviv_iommu_ops *ops;
388
389         ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
390
391         ops->dump(iommu->domain, buf);
392 }