GNU Linux-libre 4.9.337-gnu1
[releases.git] / drivers / gpu / drm / nouveau / nvkm / engine / device / tegra.c
1 /*
2  * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 #include <core/tegra.h>
23 #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
24 #include "priv.h"
25
26 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
27 #include <asm/dma-iommu.h>
28 #endif
29
30 static int
31 nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
32 {
33         int ret;
34
35         ret = regulator_enable(tdev->vdd);
36         if (ret)
37                 goto err_power;
38
39         ret = clk_prepare_enable(tdev->clk);
40         if (ret)
41                 goto err_clk;
42         if (tdev->clk_ref) {
43                 ret = clk_prepare_enable(tdev->clk_ref);
44                 if (ret)
45                         goto err_clk_ref;
46         }
47         ret = clk_prepare_enable(tdev->clk_pwr);
48         if (ret)
49                 goto err_clk_pwr;
50         clk_set_rate(tdev->clk_pwr, 204000000);
51         udelay(10);
52
53         reset_control_assert(tdev->rst);
54         udelay(10);
55
56         ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
57         if (ret)
58                 goto err_clamp;
59         udelay(10);
60
61         reset_control_deassert(tdev->rst);
62         udelay(10);
63
64         return 0;
65
66 err_clamp:
67         clk_disable_unprepare(tdev->clk_pwr);
68 err_clk_pwr:
69         if (tdev->clk_ref)
70                 clk_disable_unprepare(tdev->clk_ref);
71 err_clk_ref:
72         clk_disable_unprepare(tdev->clk);
73 err_clk:
74         regulator_disable(tdev->vdd);
75 err_power:
76         return ret;
77 }
78
79 static int
80 nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
81 {
82         reset_control_assert(tdev->rst);
83         udelay(10);
84
85         clk_disable_unprepare(tdev->clk_pwr);
86         if (tdev->clk_ref)
87                 clk_disable_unprepare(tdev->clk_ref);
88         clk_disable_unprepare(tdev->clk);
89         udelay(10);
90
91         return regulator_disable(tdev->vdd);
92 }
93
94 static void
95 nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
96 {
97 #if IS_ENABLED(CONFIG_IOMMU_API)
98         struct device *dev = &tdev->pdev->dev;
99         unsigned long pgsize_bitmap;
100         int ret;
101
102 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
103         if (dev->archdata.mapping) {
104                 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
105
106                 arm_iommu_detach_device(dev);
107                 arm_iommu_release_mapping(mapping);
108         }
109 #endif
110
111         if (!tdev->func->iommu_bit)
112                 return;
113
114         mutex_init(&tdev->iommu.mutex);
115
116         if (iommu_present(&platform_bus_type)) {
117                 tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
118                 if (IS_ERR(tdev->iommu.domain))
119                         goto error;
120
121                 /*
122                  * A IOMMU is only usable if it supports page sizes smaller
123                  * or equal to the system's PAGE_SIZE, with a preference if
124                  * both are equal.
125                  */
126                 pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
127                 if (pgsize_bitmap & PAGE_SIZE) {
128                         tdev->iommu.pgshift = PAGE_SHIFT;
129                 } else {
130                         tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
131                         if (tdev->iommu.pgshift == 0) {
132                                 dev_warn(dev, "unsupported IOMMU page size\n");
133                                 goto free_domain;
134                         }
135                         tdev->iommu.pgshift -= 1;
136                 }
137
138                 ret = iommu_attach_device(tdev->iommu.domain, dev);
139                 if (ret)
140                         goto free_domain;
141
142                 ret = nvkm_mm_init(&tdev->iommu.mm, 0,
143                                    (1ULL << tdev->func->iommu_bit) >>
144                                    tdev->iommu.pgshift, 1);
145                 if (ret)
146                         goto detach_device;
147         }
148
149         return;
150
151 detach_device:
152         iommu_detach_device(tdev->iommu.domain, dev);
153
154 free_domain:
155         iommu_domain_free(tdev->iommu.domain);
156
157 error:
158         tdev->iommu.domain = NULL;
159         tdev->iommu.pgshift = 0;
160         dev_err(dev, "cannot initialize IOMMU MM\n");
161 #endif
162 }
163
164 static void
165 nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev)
166 {
167 #if IS_ENABLED(CONFIG_IOMMU_API)
168         if (tdev->iommu.domain) {
169                 nvkm_mm_fini(&tdev->iommu.mm);
170                 iommu_detach_device(tdev->iommu.domain, tdev->device.dev);
171                 iommu_domain_free(tdev->iommu.domain);
172         }
173 #endif
174 }
175
176 static struct nvkm_device_tegra *
177 nvkm_device_tegra(struct nvkm_device *device)
178 {
179         return container_of(device, struct nvkm_device_tegra, device);
180 }
181
182 static struct resource *
183 nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
184 {
185         struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
186         return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
187 }
188
189 static resource_size_t
190 nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
191 {
192         struct resource *res = nvkm_device_tegra_resource(device, bar);
193         return res ? res->start : 0;
194 }
195
196 static resource_size_t
197 nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
198 {
199         struct resource *res = nvkm_device_tegra_resource(device, bar);
200         return res ? resource_size(res) : 0;
201 }
202
203 static irqreturn_t
204 nvkm_device_tegra_intr(int irq, void *arg)
205 {
206         struct nvkm_device_tegra *tdev = arg;
207         struct nvkm_device *device = &tdev->device;
208         bool handled = false;
209         nvkm_mc_intr_unarm(device);
210         nvkm_mc_intr(device, &handled);
211         nvkm_mc_intr_rearm(device);
212         return handled ? IRQ_HANDLED : IRQ_NONE;
213 }
214
215 static void
216 nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
217 {
218         struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
219         if (tdev->irq) {
220                 free_irq(tdev->irq, tdev);
221                 tdev->irq = 0;
222         };
223 }
224
225 static int
226 nvkm_device_tegra_init(struct nvkm_device *device)
227 {
228         struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
229         int irq, ret;
230
231         irq = platform_get_irq_byname(tdev->pdev, "stall");
232         if (irq < 0)
233                 return irq;
234
235         ret = request_irq(irq, nvkm_device_tegra_intr,
236                           IRQF_SHARED, "nvkm", tdev);
237         if (ret)
238                 return ret;
239
240         tdev->irq = irq;
241         return 0;
242 }
243
244 static void *
245 nvkm_device_tegra_dtor(struct nvkm_device *device)
246 {
247         struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
248         nvkm_device_tegra_power_down(tdev);
249         nvkm_device_tegra_remove_iommu(tdev);
250         return tdev;
251 }
252
253 static const struct nvkm_device_func
254 nvkm_device_tegra_func = {
255         .tegra = nvkm_device_tegra,
256         .dtor = nvkm_device_tegra_dtor,
257         .init = nvkm_device_tegra_init,
258         .fini = nvkm_device_tegra_fini,
259         .resource_addr = nvkm_device_tegra_resource_addr,
260         .resource_size = nvkm_device_tegra_resource_size,
261         .cpu_coherent = false,
262 };
263
264 int
265 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
266                       struct platform_device *pdev,
267                       const char *cfg, const char *dbg,
268                       bool detect, bool mmio, u64 subdev_mask,
269                       struct nvkm_device **pdevice)
270 {
271         struct nvkm_device_tegra *tdev;
272         int ret;
273
274         if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
275                 return -ENOMEM;
276
277         tdev->func = func;
278         tdev->pdev = pdev;
279
280         tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
281         if (IS_ERR(tdev->vdd)) {
282                 ret = PTR_ERR(tdev->vdd);
283                 goto free;
284         }
285
286         tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
287         if (IS_ERR(tdev->rst)) {
288                 ret = PTR_ERR(tdev->rst);
289                 goto free;
290         }
291
292         tdev->clk = devm_clk_get(&pdev->dev, "gpu");
293         if (IS_ERR(tdev->clk)) {
294                 ret = PTR_ERR(tdev->clk);
295                 goto free;
296         }
297
298         if (func->require_ref_clk)
299                 tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
300         if (IS_ERR(tdev->clk_ref)) {
301                 ret = PTR_ERR(tdev->clk_ref);
302                 goto free;
303         }
304
305         tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
306         if (IS_ERR(tdev->clk_pwr)) {
307                 ret = PTR_ERR(tdev->clk_pwr);
308                 goto free;
309         }
310
311         /**
312          * The IOMMU bit defines the upper limit of the GPU-addressable space.
313          * This will be refined in nouveau_ttm_init but we need to do it early
314          * for instmem to behave properly
315          */
316         ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
317         if (ret)
318                 goto free;
319
320         nvkm_device_tegra_probe_iommu(tdev);
321
322         ret = nvkm_device_tegra_power_up(tdev);
323         if (ret)
324                 goto remove;
325
326         tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
327         tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
328         ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
329                                NVKM_DEVICE_TEGRA, pdev->id, NULL,
330                                cfg, dbg, detect, mmio, subdev_mask,
331                                &tdev->device);
332         if (ret)
333                 goto powerdown;
334
335         *pdevice = &tdev->device;
336
337         return 0;
338
339 powerdown:
340         nvkm_device_tegra_power_down(tdev);
341 remove:
342         nvkm_device_tegra_remove_iommu(tdev);
343 free:
344         kfree(tdev);
345         return ret;
346 }
347 #else
348 int
349 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
350                       struct platform_device *pdev,
351                       const char *cfg, const char *dbg,
352                       bool detect, bool mmio, u64 subdev_mask,
353                       struct nvkm_device **pdevice)
354 {
355         return -ENOSYS;
356 }
357 #endif