2 * Copyright (C) 2015 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include "common.xml.h"
18 #include "etnaviv_cmdbuf.h"
19 #include "etnaviv_drv.h"
20 #include "etnaviv_gem.h"
21 #include "etnaviv_gpu.h"
22 #include "etnaviv_iommu.h"
23 #include "etnaviv_mmu.h"
25 static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
26 unsigned long iova, int flags, void *arg)
28 DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
32 int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
33 struct sg_table *sgt, unsigned len, int prot)
35 struct iommu_domain *domain = iommu->domain;
36 struct scatterlist *sg;
37 unsigned int da = iova;
44 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
45 u32 pa = sg_dma_address(sg) - sg->offset;
46 size_t bytes = sg_dma_len(sg) + sg->offset;
48 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
50 ret = iommu_map(domain, da, pa, bytes, prot);
62 for_each_sg(sgt->sgl, sg, i, j) {
63 size_t bytes = sg_dma_len(sg) + sg->offset;
65 iommu_unmap(domain, da, bytes);
71 int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
72 struct sg_table *sgt, unsigned len)
74 struct iommu_domain *domain = iommu->domain;
75 struct scatterlist *sg;
76 unsigned int da = iova;
79 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
80 size_t bytes = sg_dma_len(sg) + sg->offset;
83 unmapped = iommu_unmap(domain, da, bytes);
87 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
89 BUG_ON(!PAGE_ALIGNED(bytes));
97 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
98 struct etnaviv_vram_mapping *mapping)
100 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
102 etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
103 etnaviv_obj->sgt, etnaviv_obj->base.size);
104 drm_mm_remove_node(&mapping->vram_node);
107 static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
108 struct drm_mm_node *node, size_t size)
110 struct etnaviv_vram_mapping *free = NULL;
111 enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
114 lockdep_assert_held(&mmu->lock);
117 struct etnaviv_vram_mapping *m, *n;
118 struct drm_mm_scan scan;
119 struct list_head list;
122 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
124 mmu->last_iova, U64_MAX,
130 * If we did not search from the start of the MMU region,
131 * try again in case there are free slots.
133 if (mmu->last_iova) {
139 /* Try to retire some entries */
140 drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
143 INIT_LIST_HEAD(&list);
144 list_for_each_entry(free, &mmu->mappings, mmu_node) {
145 /* If this vram node has not been used, skip this. */
146 if (!free->vram_node.mm)
150 * If the iova is pinned, then it's in-use,
151 * so we must keep its mapping.
156 list_add(&free->scan_node, &list);
157 if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
164 /* Nothing found, clean up and fail */
165 list_for_each_entry_safe(m, n, &list, scan_node)
166 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
171 * drm_mm does not allow any other operations while
172 * scanning, so we have to remove all blocks first.
173 * If drm_mm_scan_remove_block() returns false, we
174 * can leave the block pinned.
176 list_for_each_entry_safe(m, n, &list, scan_node)
177 if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
178 list_del_init(&m->scan_node);
181 * Unmap the blocks which need to be reaped from the MMU.
182 * Clear the mmu pointer to prevent the mapping_get finding
185 list_for_each_entry_safe(m, n, &list, scan_node) {
186 etnaviv_iommu_remove_mapping(mmu, m);
188 list_del_init(&m->mmu_node);
189 list_del_init(&m->scan_node);
192 mode = DRM_MM_INSERT_EVICT;
195 * We removed enough mappings so that the new allocation will
196 * succeed, retry the allocation one more time.
203 int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
204 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
205 struct etnaviv_vram_mapping *mapping)
207 struct sg_table *sgt = etnaviv_obj->sgt;
208 struct drm_mm_node *node;
211 lockdep_assert_held(&etnaviv_obj->lock);
213 mutex_lock(&mmu->lock);
215 /* v1 MMU can optimize single entry (contiguous) scatterlists */
216 if (mmu->version == ETNAVIV_IOMMU_V1 &&
217 sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
220 iova = sg_dma_address(sgt->sgl) - memory_base;
221 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
222 mapping->iova = iova;
223 list_add_tail(&mapping->mmu_node, &mmu->mappings);
224 mutex_unlock(&mmu->lock);
229 node = &mapping->vram_node;
231 ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
233 mutex_unlock(&mmu->lock);
237 mmu->last_iova = node->start + etnaviv_obj->base.size;
238 mapping->iova = node->start;
239 ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
240 IOMMU_READ | IOMMU_WRITE);
243 drm_mm_remove_node(node);
244 mutex_unlock(&mmu->lock);
248 list_add_tail(&mapping->mmu_node, &mmu->mappings);
250 mutex_unlock(&mmu->lock);
255 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
256 struct etnaviv_vram_mapping *mapping)
258 WARN_ON(mapping->use);
260 mutex_lock(&mmu->lock);
262 /* If the vram node is on the mm, unmap and remove the node */
263 if (mapping->vram_node.mm == &mmu->mm)
264 etnaviv_iommu_remove_mapping(mmu, mapping);
266 list_del(&mapping->mmu_node);
268 mutex_unlock(&mmu->lock);
271 void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
273 drm_mm_takedown(&mmu->mm);
274 iommu_domain_free(mmu->domain);
278 struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
280 enum etnaviv_iommu_version version;
281 struct etnaviv_iommu *mmu;
283 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
285 return ERR_PTR(-ENOMEM);
287 if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
288 mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
289 version = ETNAVIV_IOMMU_V1;
291 mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
292 version = ETNAVIV_IOMMU_V2;
296 dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
298 return ERR_PTR(-ENOMEM);
302 mmu->version = version;
303 mutex_init(&mmu->lock);
304 INIT_LIST_HEAD(&mmu->mappings);
306 drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start,
307 mmu->domain->geometry.aperture_end -
308 mmu->domain->geometry.aperture_start + 1);
310 iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
315 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
317 if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
318 etnaviv_iommuv1_restore(gpu);
320 etnaviv_iommuv2_restore(gpu);
323 int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
324 struct drm_mm_node *vram_node, size_t size,
327 struct etnaviv_iommu *mmu = gpu->mmu;
329 if (mmu->version == ETNAVIV_IOMMU_V1) {
330 *iova = paddr - gpu->memory_base;
335 mutex_lock(&mmu->lock);
336 ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
338 mutex_unlock(&mmu->lock);
341 ret = iommu_map(mmu->domain, vram_node->start, paddr, size,
344 drm_mm_remove_node(vram_node);
345 mutex_unlock(&mmu->lock);
348 mmu->last_iova = vram_node->start + size;
350 mutex_unlock(&mmu->lock);
352 *iova = (u32)vram_node->start;
357 void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
358 struct drm_mm_node *vram_node, size_t size,
361 struct etnaviv_iommu *mmu = gpu->mmu;
363 if (mmu->version == ETNAVIV_IOMMU_V2) {
364 mutex_lock(&mmu->lock);
365 iommu_unmap(mmu->domain,iova, size);
366 drm_mm_remove_node(vram_node);
367 mutex_unlock(&mmu->lock);
370 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
372 struct etnaviv_iommu_ops *ops;
374 ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
376 return ops->dump_size(iommu->domain);
379 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
381 struct etnaviv_iommu_ops *ops;
383 ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
385 ops->dump(iommu->domain, buf);