GNU Linux-libre 4.19.264-gnu1
[releases.git] / drivers / gpu / drm / nouveau / nvkm / engine / disp / gv100.c
1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "nv50.h"
23 #include "head.h"
24 #include "ior.h"
25 #include "channv50.h"
26 #include "rootnv50.h"
27
28 #include <core/gpuobj.h>
29 #include <subdev/timer.h>
30
31 static int
32 gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask)
33 {
34         struct nvkm_device *device = disp->engine.subdev.device;
35         *pmask = nvkm_rd32(device, 0x610064);
36         return (nvkm_rd32(device, 0x610074) & 0x03f00000) >> 20;
37 }
38
39 static void
40 gv100_disp_super(struct work_struct *work)
41 {
42         struct nv50_disp *disp =
43                 container_of(work, struct nv50_disp, supervisor);
44         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
45         struct nvkm_device *device = subdev->device;
46         struct nvkm_head *head;
47         u32 stat = nvkm_rd32(device, 0x6107a8);
48         u32 mask[4];
49
50         nvkm_debug(subdev, "supervisor %d: %08x\n", ffs(disp->super), stat);
51         list_for_each_entry(head, &disp->base.head, head) {
52                 mask[head->id] = nvkm_rd32(device, 0x6107ac + (head->id * 4));
53                 HEAD_DBG(head, "%08x", mask[head->id]);
54         }
55
56         if (disp->super & 0x00000001) {
57                 nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
58                 nv50_disp_super_1(disp);
59                 list_for_each_entry(head, &disp->base.head, head) {
60                         if (!(mask[head->id] & 0x00001000))
61                                 continue;
62                         nv50_disp_super_1_0(disp, head);
63                 }
64         } else
65         if (disp->super & 0x00000002) {
66                 list_for_each_entry(head, &disp->base.head, head) {
67                         if (!(mask[head->id] & 0x00001000))
68                                 continue;
69                         nv50_disp_super_2_0(disp, head);
70                 }
71                 nvkm_outp_route(&disp->base);
72                 list_for_each_entry(head, &disp->base.head, head) {
73                         if (!(mask[head->id] & 0x00010000))
74                                 continue;
75                         nv50_disp_super_2_1(disp, head);
76                 }
77                 list_for_each_entry(head, &disp->base.head, head) {
78                         if (!(mask[head->id] & 0x00001000))
79                                 continue;
80                         nv50_disp_super_2_2(disp, head);
81                 }
82         } else
83         if (disp->super & 0x00000004) {
84                 list_for_each_entry(head, &disp->base.head, head) {
85                         if (!(mask[head->id] & 0x00001000))
86                                 continue;
87                         nv50_disp_super_3_0(disp, head);
88                 }
89         }
90
91         list_for_each_entry(head, &disp->base.head, head)
92                 nvkm_wr32(device, 0x6107ac + (head->id * 4), 0x00000000);
93         nvkm_wr32(device, 0x6107a8, 0x80000000);
94 }
95
96 static void
97 gv100_disp_exception(struct nv50_disp *disp, int chid)
98 {
99         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
100         struct nvkm_device *device = subdev->device;
101         u32 stat = nvkm_rd32(device, 0x611020 + (chid * 12));
102         u32 type = (stat & 0x00007000) >> 12;
103         u32 mthd = (stat & 0x00000fff) << 2;
104         u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
105         u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
106
107         nvkm_error(subdev, "chid %d %08x [type %d mthd %04x] "
108                            "data %08x code %08x\n",
109                    chid, stat, type, mthd, data, code);
110
111         if (chid < ARRAY_SIZE(disp->chan) && disp->chan[chid]) {
112                 switch (mthd) {
113                 case 0x0200:
114                         nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
115                         break;
116                 default:
117                         break;
118                 }
119         }
120
121         nvkm_wr32(device, 0x611020 + (chid * 12), 0x90000000);
122 }
123
124 static void
125 gv100_disp_intr_ctrl_disp(struct nv50_disp *disp)
126 {
127         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
128         struct nvkm_device *device = subdev->device;
129         u32 stat = nvkm_rd32(device, 0x611c30);
130
131         if (stat & 0x00000007) {
132                 disp->super = (stat & 0x00000007);
133                 queue_work(disp->wq, &disp->supervisor);
134                 nvkm_wr32(device, 0x611860, disp->super);
135                 stat &= ~0x00000007;
136         }
137
138         /*TODO: I would guess this is VBIOS_RELEASE, however, NFI how to
139          *      ACK it, nor does RM appear to bother.
140          */
141         if (stat & 0x00000008)
142                 stat &= ~0x00000008;
143
144         if (stat & 0x00000100) {
145                 unsigned long wndws = nvkm_rd32(device, 0x611858);
146                 unsigned long other = nvkm_rd32(device, 0x61185c);
147                 int wndw;
148
149                 nvkm_wr32(device, 0x611858, wndws);
150                 nvkm_wr32(device, 0x61185c, other);
151
152                 /* AWAKEN_OTHER_CORE. */
153                 if (other & 0x00000001)
154                         nv50_disp_chan_uevent_send(disp, 0);
155
156                 /* AWAKEN_WIN_CH(n). */
157                 for_each_set_bit(wndw, &wndws, disp->wndw.nr) {
158                         nv50_disp_chan_uevent_send(disp, 1 + wndw);
159                 }
160         }
161
162         if (stat)
163                 nvkm_warn(subdev, "ctrl %08x\n", stat);
164 }
165
166 static void
167 gv100_disp_intr_exc_other(struct nv50_disp *disp)
168 {
169         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
170         struct nvkm_device *device = subdev->device;
171         u32 stat = nvkm_rd32(device, 0x611854);
172         unsigned long mask;
173         int head;
174
175         if (stat & 0x00000001) {
176                 nvkm_wr32(device, 0x611854, 0x00000001);
177                 gv100_disp_exception(disp, 0);
178                 stat &= ~0x00000001;
179         }
180
181         if ((mask = (stat & 0x00ff0000) >> 16)) {
182                 for_each_set_bit(head, &mask, disp->wndw.nr) {
183                         nvkm_wr32(device, 0x611854, 0x00010000 << head);
184                         gv100_disp_exception(disp, 73 + head);
185                         stat &= ~(0x00010000 << head);
186                 }
187         }
188
189         if (stat) {
190                 nvkm_warn(subdev, "exception %08x\n", stat);
191                 nvkm_wr32(device, 0x611854, stat);
192         }
193 }
194
195 static void
196 gv100_disp_intr_exc_winim(struct nv50_disp *disp)
197 {
198         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
199         struct nvkm_device *device = subdev->device;
200         unsigned long stat = nvkm_rd32(device, 0x611850);
201         int wndw;
202
203         for_each_set_bit(wndw, &stat, disp->wndw.nr) {
204                 nvkm_wr32(device, 0x611850, BIT(wndw));
205                 gv100_disp_exception(disp, 33 + wndw);
206                 stat &= ~BIT(wndw);
207         }
208
209         if (stat) {
210                 nvkm_warn(subdev, "wimm %08x\n", (u32)stat);
211                 nvkm_wr32(device, 0x611850, stat);
212         }
213 }
214
215 static void
216 gv100_disp_intr_exc_win(struct nv50_disp *disp)
217 {
218         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
219         struct nvkm_device *device = subdev->device;
220         unsigned long stat = nvkm_rd32(device, 0x61184c);
221         int wndw;
222
223         for_each_set_bit(wndw, &stat, disp->wndw.nr) {
224                 nvkm_wr32(device, 0x61184c, BIT(wndw));
225                 gv100_disp_exception(disp, 1 + wndw);
226                 stat &= ~BIT(wndw);
227         }
228
229         if (stat) {
230                 nvkm_warn(subdev, "wndw %08x\n", (u32)stat);
231                 nvkm_wr32(device, 0x61184c, stat);
232         }
233 }
234
235 static void
236 gv100_disp_intr_head_timing(struct nv50_disp *disp, int head)
237 {
238         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
239         struct nvkm_device *device = subdev->device;
240         u32 stat = nvkm_rd32(device, 0x611800 + (head * 0x04));
241
242         /* LAST_DATA, LOADV. */
243         if (stat & 0x00000003) {
244                 nvkm_wr32(device, 0x611800 + (head * 0x04), stat & 0x00000003);
245                 stat &= ~0x00000003;
246         }
247
248         if (stat & 0x00000004) {
249                 nvkm_disp_vblank(&disp->base, head);
250                 nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000004);
251                 stat &= ~0x00000004;
252         }
253
254         if (stat) {
255                 nvkm_warn(subdev, "head %08x\n", stat);
256                 nvkm_wr32(device, 0x611800 + (head * 0x04), stat);
257         }
258 }
259
260 static void
261 gv100_disp_intr(struct nv50_disp *disp)
262 {
263         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
264         struct nvkm_device *device = subdev->device;
265         u32 stat = nvkm_rd32(device, 0x611ec0);
266         unsigned long mask;
267         int head;
268
269         if ((mask = (stat & 0x000000ff))) {
270                 for_each_set_bit(head, &mask, 8) {
271                         gv100_disp_intr_head_timing(disp, head);
272                         stat &= ~BIT(head);
273                 }
274         }
275
276         if (stat & 0x00000200) {
277                 gv100_disp_intr_exc_win(disp);
278                 stat &= ~0x00000200;
279         }
280
281         if (stat & 0x00000400) {
282                 gv100_disp_intr_exc_winim(disp);
283                 stat &= ~0x00000400;
284         }
285
286         if (stat & 0x00000800) {
287                 gv100_disp_intr_exc_other(disp);
288                 stat &= ~0x00000800;
289         }
290
291         if (stat & 0x00001000) {
292                 gv100_disp_intr_ctrl_disp(disp);
293                 stat &= ~0x00001000;
294         }
295
296         if (stat)
297                 nvkm_warn(subdev, "intr %08x\n", stat);
298 }
299
300 static void
301 gv100_disp_fini(struct nv50_disp *disp)
302 {
303         struct nvkm_device *device = disp->base.engine.subdev.device;
304         nvkm_wr32(device, 0x611db0, 0x00000000);
305 }
306
307 static int
308 gv100_disp_init(struct nv50_disp *disp)
309 {
310         struct nvkm_device *device = disp->base.engine.subdev.device;
311         struct nvkm_head *head;
312         int i, j;
313         u32 tmp;
314
315         /* Claim ownership of display. */
316         if (nvkm_rd32(device, 0x6254e8) & 0x00000002) {
317                 nvkm_mask(device, 0x6254e8, 0x00000001, 0x00000000);
318                 if (nvkm_msec(device, 2000,
319                         if (!(nvkm_rd32(device, 0x6254e8) & 0x00000002))
320                                 break;
321                 ) < 0)
322                         return -EBUSY;
323         }
324
325         /* Lock pin capabilities. */
326         tmp = nvkm_rd32(device, 0x610068);
327         nvkm_wr32(device, 0x640008, tmp);
328
329         /* SOR capabilities. */
330         for (i = 0; i < disp->sor.nr; i++) {
331                 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
332                 nvkm_mask(device, 0x640000, 0x00000100 << i, 0x00000100 << i);
333                 nvkm_wr32(device, 0x640144 + (i * 0x08), tmp);
334         }
335
336         /* Head capabilities. */
337         list_for_each_entry(head, &disp->base.head, head) {
338                 const int id = head->id;
339
340                 /* RG. */
341                 tmp = nvkm_rd32(device, 0x616300 + (id * 0x800));
342                 nvkm_wr32(device, 0x640048 + (id * 0x020), tmp);
343
344                 /* POSTCOMP. */
345                 for (j = 0; j < 6 * 4; j += 4) {
346                         tmp = nvkm_rd32(device, 0x616100 + (id * 0x800) + j);
347                         nvkm_wr32(device, 0x640030 + (id * 0x20) + j, tmp);
348                 }
349         }
350
351         /* Window capabilities. */
352         for (i = 0; i < disp->wndw.nr; i++) {
353                 nvkm_mask(device, 0x640004, 1 << i, 1 << i);
354                 for (j = 0; j < 6 * 4; j += 4) {
355                         tmp = nvkm_rd32(device, 0x630050 + (i * 0x800) + j);
356                         nvkm_wr32(device, 0x6401e4 + (i * 0x20) + j, tmp);
357                 }
358         }
359
360         /* IHUB capabilities. */
361         for (i = 0; i < 4; i++) {
362                 tmp = nvkm_rd32(device, 0x62e000 + (i * 0x04));
363                 nvkm_wr32(device, 0x640010 + (i * 0x04), tmp);
364         }
365
366         nvkm_mask(device, 0x610078, 0x00000001, 0x00000001);
367
368         /* Setup instance memory. */
369         switch (nvkm_memory_target(disp->inst->memory)) {
370         case NVKM_MEM_TARGET_VRAM: tmp = 0x00000001; break;
371         case NVKM_MEM_TARGET_NCOH: tmp = 0x00000002; break;
372         case NVKM_MEM_TARGET_HOST: tmp = 0x00000003; break;
373         default:
374                 break;
375         }
376         nvkm_wr32(device, 0x610010, 0x00000008 | tmp);
377         nvkm_wr32(device, 0x610014, disp->inst->addr >> 16);
378
379         /* CTRL_DISP: AWAKEN, ERROR, SUPERVISOR[1-3]. */
380         nvkm_wr32(device, 0x611cf0, 0x00000187); /* MSK. */
381         nvkm_wr32(device, 0x611db0, 0x00000187); /* EN. */
382
383         /* EXC_OTHER: CURSn, CORE. */
384         nvkm_wr32(device, 0x611cec, disp->head.mask << 16 |
385                                     0x00000001); /* MSK. */
386         nvkm_wr32(device, 0x611dac, 0x00000000); /* EN. */
387
388         /* EXC_WINIM. */
389         nvkm_wr32(device, 0x611ce8, disp->wndw.mask); /* MSK. */
390         nvkm_wr32(device, 0x611da8, 0x00000000); /* EN. */
391
392         /* EXC_WIN. */
393         nvkm_wr32(device, 0x611ce4, disp->wndw.mask); /* MSK. */
394         nvkm_wr32(device, 0x611da4, 0x00000000); /* EN. */
395
396         /* HEAD_TIMING(n): VBLANK. */
397         list_for_each_entry(head, &disp->base.head, head) {
398                 const u32 hoff = head->id * 4;
399                 nvkm_wr32(device, 0x611cc0 + hoff, 0x00000004); /* MSK. */
400                 nvkm_wr32(device, 0x611d80 + hoff, 0x00000000); /* EN. */
401         }
402
403         /* OR. */
404         nvkm_wr32(device, 0x611cf4, 0x00000000); /* MSK. */
405         nvkm_wr32(device, 0x611db4, 0x00000000); /* EN. */
406         return 0;
407 }
408
409 static const struct nv50_disp_func
410 gv100_disp = {
411         .init = gv100_disp_init,
412         .fini = gv100_disp_fini,
413         .intr = gv100_disp_intr,
414         .uevent = &gv100_disp_chan_uevent,
415         .super = gv100_disp_super,
416         .root = &gv100_disp_root_oclass,
417         .wndw = { .cnt = gv100_disp_wndw_cnt },
418         .head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
419         .sor = { .cnt = gv100_sor_cnt, .new = gv100_sor_new },
420         .ramht_size = 0x2000,
421 };
422
423 int
424 gv100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
425 {
426         return nv50_disp_new_(&gv100_disp, device, index, pdisp);
427 }