GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / gpu / drm / etnaviv / etnaviv_iommu_v2.c
1 /*
2  * Copyright (C) 2016 Etnaviv Project
3   *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/iommu.h>
18 #include <linux/platform_device.h>
19 #include <linux/sizes.h>
20 #include <linux/slab.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/bitops.h>
23
24 #include "etnaviv_cmdbuf.h"
25 #include "etnaviv_gpu.h"
26 #include "etnaviv_mmu.h"
27 #include "etnaviv_iommu.h"
28 #include "state.xml.h"
29 #include "state_hi.xml.h"
30
31 #define MMUv2_PTE_PRESENT               BIT(0)
32 #define MMUv2_PTE_EXCEPTION             BIT(1)
33 #define MMUv2_PTE_WRITEABLE             BIT(2)
34
35 #define MMUv2_MTLB_MASK                 0xffc00000
36 #define MMUv2_MTLB_SHIFT                22
37 #define MMUv2_STLB_MASK                 0x003ff000
38 #define MMUv2_STLB_SHIFT                12
39
40 #define MMUv2_MAX_STLB_ENTRIES          1024
41
42 struct etnaviv_iommuv2_domain {
43         struct iommu_domain domain;
44         struct device *dev;
45         void *bad_page_cpu;
46         dma_addr_t bad_page_dma;
47         /* M(aster) TLB aka first level pagetable */
48         u32 *mtlb_cpu;
49         dma_addr_t mtlb_dma;
50         /* S(lave) TLB aka second level pagetable */
51         u32 *stlb_cpu[1024];
52         dma_addr_t stlb_dma[1024];
53 };
54
55 static struct etnaviv_iommuv2_domain *to_etnaviv_domain(struct iommu_domain *domain)
56 {
57         return container_of(domain, struct etnaviv_iommuv2_domain, domain);
58 }
59
60 static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
61            phys_addr_t paddr, size_t size, int prot)
62 {
63         struct etnaviv_iommuv2_domain *etnaviv_domain =
64                         to_etnaviv_domain(domain);
65         int mtlb_entry, stlb_entry;
66         u32 entry = (u32)paddr | MMUv2_PTE_PRESENT;
67
68         if (size != SZ_4K)
69                 return -EINVAL;
70
71         if (prot & IOMMU_WRITE)
72                 entry |= MMUv2_PTE_WRITEABLE;
73
74         mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
75         stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
76
77         etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
78
79         return 0;
80 }
81
82 static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
83         unsigned long iova, size_t size)
84 {
85         struct etnaviv_iommuv2_domain *etnaviv_domain =
86                         to_etnaviv_domain(domain);
87         int mtlb_entry, stlb_entry;
88
89         if (size != SZ_4K)
90                 return -EINVAL;
91
92         mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
93         stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
94
95         etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
96
97         return SZ_4K;
98 }
99
100 static phys_addr_t etnaviv_iommuv2_iova_to_phys(struct iommu_domain *domain,
101         dma_addr_t iova)
102 {
103         struct etnaviv_iommuv2_domain *etnaviv_domain =
104                         to_etnaviv_domain(domain);
105         int mtlb_entry, stlb_entry;
106
107         mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
108         stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
109
110         return etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] & ~(SZ_4K - 1);
111 }
112
113 static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
114 {
115         u32 *p;
116         int ret, i, j;
117
118         /* allocate scratch page */
119         etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
120                                                   SZ_4K,
121                                                   &etnaviv_domain->bad_page_dma,
122                                                   GFP_KERNEL);
123         if (!etnaviv_domain->bad_page_cpu) {
124                 ret = -ENOMEM;
125                 goto fail_mem;
126         }
127         p = etnaviv_domain->bad_page_cpu;
128         for (i = 0; i < SZ_4K / 4; i++)
129                 *p++ = 0xdead55aa;
130
131         etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->dev,
132                                                   SZ_4K,
133                                                   &etnaviv_domain->mtlb_dma,
134                                                   GFP_KERNEL);
135         if (!etnaviv_domain->mtlb_cpu) {
136                 ret = -ENOMEM;
137                 goto fail_mem;
138         }
139
140         /* pre-populate STLB pages (may want to switch to on-demand later) */
141         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
142                 etnaviv_domain->stlb_cpu[i] =
143                                 dma_alloc_coherent(etnaviv_domain->dev,
144                                                    SZ_4K,
145                                                    &etnaviv_domain->stlb_dma[i],
146                                                    GFP_KERNEL);
147                 if (!etnaviv_domain->stlb_cpu[i]) {
148                         ret = -ENOMEM;
149                         goto fail_mem;
150                 }
151                 p = etnaviv_domain->stlb_cpu[i];
152                 for (j = 0; j < SZ_4K / 4; j++)
153                         *p++ = MMUv2_PTE_EXCEPTION;
154
155                 etnaviv_domain->mtlb_cpu[i] = etnaviv_domain->stlb_dma[i] |
156                                               MMUv2_PTE_PRESENT;
157         }
158
159         return 0;
160
161 fail_mem:
162         if (etnaviv_domain->bad_page_cpu)
163                 dma_free_coherent(etnaviv_domain->dev, SZ_4K,
164                                   etnaviv_domain->bad_page_cpu,
165                                   etnaviv_domain->bad_page_dma);
166
167         if (etnaviv_domain->mtlb_cpu)
168                 dma_free_coherent(etnaviv_domain->dev, SZ_4K,
169                                   etnaviv_domain->mtlb_cpu,
170                                   etnaviv_domain->mtlb_dma);
171
172         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
173                 if (etnaviv_domain->stlb_cpu[i])
174                         dma_free_coherent(etnaviv_domain->dev, SZ_4K,
175                                           etnaviv_domain->stlb_cpu[i],
176                                           etnaviv_domain->stlb_dma[i]);
177         }
178
179         return ret;
180 }
181
182 static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
183 {
184         struct etnaviv_iommuv2_domain *etnaviv_domain =
185                         to_etnaviv_domain(domain);
186         int i;
187
188         dma_free_coherent(etnaviv_domain->dev, SZ_4K,
189                           etnaviv_domain->bad_page_cpu,
190                           etnaviv_domain->bad_page_dma);
191
192         dma_free_coherent(etnaviv_domain->dev, SZ_4K,
193                           etnaviv_domain->mtlb_cpu,
194                           etnaviv_domain->mtlb_dma);
195
196         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
197                 if (etnaviv_domain->stlb_cpu[i])
198                         dma_free_coherent(etnaviv_domain->dev, SZ_4K,
199                                           etnaviv_domain->stlb_cpu[i],
200                                           etnaviv_domain->stlb_dma[i]);
201         }
202
203         vfree(etnaviv_domain);
204 }
205
206 static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
207 {
208         struct etnaviv_iommuv2_domain *etnaviv_domain =
209                         to_etnaviv_domain(domain);
210         size_t dump_size = SZ_4K;
211         int i;
212
213         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
214                 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
215                         dump_size += SZ_4K;
216
217         return dump_size;
218 }
219
220 static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
221 {
222         struct etnaviv_iommuv2_domain *etnaviv_domain =
223                         to_etnaviv_domain(domain);
224         int i;
225
226         memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
227         buf += SZ_4K;
228         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
229                 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
230                         memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
231 }
232
233 static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
234         .ops = {
235                 .domain_free = etnaviv_iommuv2_domain_free,
236                 .map = etnaviv_iommuv2_map,
237                 .unmap = etnaviv_iommuv2_unmap,
238                 .iova_to_phys = etnaviv_iommuv2_iova_to_phys,
239                 .pgsize_bitmap = SZ_4K,
240         },
241         .dump_size = etnaviv_iommuv2_dump_size,
242         .dump = etnaviv_iommuv2_dump,
243 };
244
245 void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
246 {
247         struct etnaviv_iommuv2_domain *etnaviv_domain =
248                         to_etnaviv_domain(gpu->mmu->domain);
249         u16 prefetch;
250
251         /* If the MMU is already enabled the state is still there. */
252         if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
253                 return;
254
255         prefetch = etnaviv_buffer_config_mmuv2(gpu,
256                                 (u32)etnaviv_domain->mtlb_dma,
257                                 (u32)etnaviv_domain->bad_page_dma);
258         etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
259                              prefetch);
260         etnaviv_gpu_wait_idle(gpu, 100);
261
262         gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
263 }
264 struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
265 {
266         struct etnaviv_iommuv2_domain *etnaviv_domain;
267         int ret;
268
269         etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
270         if (!etnaviv_domain)
271                 return NULL;
272
273         etnaviv_domain->dev = gpu->dev;
274
275         etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
276         etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
277         etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
278         etnaviv_domain->domain.geometry.aperture_start = 0;
279         etnaviv_domain->domain.geometry.aperture_end = ~0UL & ~(SZ_4K - 1);
280
281         ret = etnaviv_iommuv2_init(etnaviv_domain);
282         if (ret)
283                 goto out_free;
284
285         return &etnaviv_domain->domain;
286
287 out_free:
288         vfree(etnaviv_domain);
289         return NULL;
290 }