GNU Linux-libre 4.19.264-gnu1
[releases.git] / drivers / gpu / drm / nouveau / nvkm / engine / disp / gf119.c
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv50.h"
25 #include "head.h"
26 #include "ior.h"
27 #include "channv50.h"
28 #include "rootnv50.h"
29
30 #include <core/ramht.h>
31 #include <subdev/timer.h>
32
33 void
34 gf119_disp_super(struct work_struct *work)
35 {
36         struct nv50_disp *disp =
37                 container_of(work, struct nv50_disp, supervisor);
38         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
39         struct nvkm_device *device = subdev->device;
40         struct nvkm_head *head;
41         u32 mask[4];
42
43         nvkm_debug(subdev, "supervisor %d\n", ffs(disp->super));
44         list_for_each_entry(head, &disp->base.head, head) {
45                 mask[head->id] = nvkm_rd32(device, 0x6101d4 + (head->id * 0x800));
46                 HEAD_DBG(head, "%08x", mask[head->id]);
47         }
48
49         if (disp->super & 0x00000001) {
50                 nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
51                 nv50_disp_super_1(disp);
52                 list_for_each_entry(head, &disp->base.head, head) {
53                         if (!(mask[head->id] & 0x00001000))
54                                 continue;
55                         nv50_disp_super_1_0(disp, head);
56                 }
57         } else
58         if (disp->super & 0x00000002) {
59                 list_for_each_entry(head, &disp->base.head, head) {
60                         if (!(mask[head->id] & 0x00001000))
61                                 continue;
62                         nv50_disp_super_2_0(disp, head);
63                 }
64                 nvkm_outp_route(&disp->base);
65                 list_for_each_entry(head, &disp->base.head, head) {
66                         if (!(mask[head->id] & 0x00010000))
67                                 continue;
68                         nv50_disp_super_2_1(disp, head);
69                 }
70                 list_for_each_entry(head, &disp->base.head, head) {
71                         if (!(mask[head->id] & 0x00001000))
72                                 continue;
73                         nv50_disp_super_2_2(disp, head);
74                 }
75         } else
76         if (disp->super & 0x00000004) {
77                 list_for_each_entry(head, &disp->base.head, head) {
78                         if (!(mask[head->id] & 0x00001000))
79                                 continue;
80                         nv50_disp_super_3_0(disp, head);
81                 }
82         }
83
84         list_for_each_entry(head, &disp->base.head, head)
85                 nvkm_wr32(device, 0x6101d4 + (head->id * 0x800), 0x00000000);
86         nvkm_wr32(device, 0x6101d0, 0x80000000);
87 }
88
89 void
90 gf119_disp_intr_error(struct nv50_disp *disp, int chid)
91 {
92         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
93         struct nvkm_device *device = subdev->device;
94         u32 mthd = nvkm_rd32(device, 0x6101f0 + (chid * 12));
95         u32 data = nvkm_rd32(device, 0x6101f4 + (chid * 12));
96         u32 unkn = nvkm_rd32(device, 0x6101f8 + (chid * 12));
97
98         nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
99                    chid, (mthd & 0x0000ffc), data, mthd, unkn);
100
101         if (chid < ARRAY_SIZE(disp->chan)) {
102                 switch (mthd & 0xffc) {
103                 case 0x0080:
104                         nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
105                         break;
106                 default:
107                         break;
108                 }
109         }
110
111         nvkm_wr32(device, 0x61009c, (1 << chid));
112         nvkm_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
113 }
114
115 void
116 gf119_disp_intr(struct nv50_disp *disp)
117 {
118         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
119         struct nvkm_device *device = subdev->device;
120         struct nvkm_head *head;
121         u32 intr = nvkm_rd32(device, 0x610088);
122
123         if (intr & 0x00000001) {
124                 u32 stat = nvkm_rd32(device, 0x61008c);
125                 while (stat) {
126                         int chid = __ffs(stat); stat &= ~(1 << chid);
127                         nv50_disp_chan_uevent_send(disp, chid);
128                         nvkm_wr32(device, 0x61008c, 1 << chid);
129                 }
130                 intr &= ~0x00000001;
131         }
132
133         if (intr & 0x00000002) {
134                 u32 stat = nvkm_rd32(device, 0x61009c);
135                 int chid = ffs(stat) - 1;
136                 if (chid >= 0)
137                         disp->func->intr_error(disp, chid);
138                 intr &= ~0x00000002;
139         }
140
141         if (intr & 0x00100000) {
142                 u32 stat = nvkm_rd32(device, 0x6100ac);
143                 if (stat & 0x00000007) {
144                         disp->super = (stat & 0x00000007);
145                         queue_work(disp->wq, &disp->supervisor);
146                         nvkm_wr32(device, 0x6100ac, disp->super);
147                         stat &= ~0x00000007;
148                 }
149
150                 if (stat) {
151                         nvkm_warn(subdev, "intr24 %08x\n", stat);
152                         nvkm_wr32(device, 0x6100ac, stat);
153                 }
154
155                 intr &= ~0x00100000;
156         }
157
158         list_for_each_entry(head, &disp->base.head, head) {
159                 const u32 hoff = head->id * 0x800;
160                 u32 mask = 0x01000000 << head->id;
161                 if (mask & intr) {
162                         u32 stat = nvkm_rd32(device, 0x6100bc + hoff);
163                         if (stat & 0x00000001)
164                                 nvkm_disp_vblank(&disp->base, head->id);
165                         nvkm_mask(device, 0x6100bc + hoff, 0, 0);
166                         nvkm_rd32(device, 0x6100c0 + hoff);
167                 }
168         }
169 }
170
171 void
172 gf119_disp_fini(struct nv50_disp *disp)
173 {
174         struct nvkm_device *device = disp->base.engine.subdev.device;
175         /* disable all interrupts */
176         nvkm_wr32(device, 0x6100b0, 0x00000000);
177 }
178
179 int
180 gf119_disp_init(struct nv50_disp *disp)
181 {
182         struct nvkm_device *device = disp->base.engine.subdev.device;
183         struct nvkm_head *head;
184         u32 tmp;
185         int i;
186
187         /* The below segments of code copying values from one register to
188          * another appear to inform EVO of the display capabilities or
189          * something similar.
190          */
191
192         /* ... CRTC caps */
193         list_for_each_entry(head, &disp->base.head, head) {
194                 const u32 hoff = head->id * 0x800;
195                 tmp = nvkm_rd32(device, 0x616104 + hoff);
196                 nvkm_wr32(device, 0x6101b4 + hoff, tmp);
197                 tmp = nvkm_rd32(device, 0x616108 + hoff);
198                 nvkm_wr32(device, 0x6101b8 + hoff, tmp);
199                 tmp = nvkm_rd32(device, 0x61610c + hoff);
200                 nvkm_wr32(device, 0x6101bc + hoff, tmp);
201         }
202
203         /* ... DAC caps */
204         for (i = 0; i < disp->dac.nr; i++) {
205                 tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
206                 nvkm_wr32(device, 0x6101c0 + (i * 0x800), tmp);
207         }
208
209         /* ... SOR caps */
210         for (i = 0; i < disp->sor.nr; i++) {
211                 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
212                 nvkm_wr32(device, 0x6301c4 + (i * 0x800), tmp);
213         }
214
215         /* steal display away from vbios, or something like that */
216         if (nvkm_rd32(device, 0x6100ac) & 0x00000100) {
217                 nvkm_wr32(device, 0x6100ac, 0x00000100);
218                 nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
219                 if (nvkm_msec(device, 2000,
220                         if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
221                                 break;
222                 ) < 0)
223                         return -EBUSY;
224         }
225
226         /* point at display engine memory area (hash table, objects) */
227         nvkm_wr32(device, 0x610010, (disp->inst->addr >> 8) | 9);
228
229         /* enable supervisor interrupts, disable everything else */
230         nvkm_wr32(device, 0x610090, 0x00000000);
231         nvkm_wr32(device, 0x6100a0, 0x00000000);
232         nvkm_wr32(device, 0x6100b0, 0x00000307);
233
234         /* disable underflow reporting, preventing an intermittent issue
235          * on some gk104 boards where the production vbios left this
236          * setting enabled by default.
237          *
238          * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
239          */
240         list_for_each_entry(head, &disp->base.head, head) {
241                 const u32 hoff = head->id * 0x800;
242                 nvkm_mask(device, 0x616308 + hoff, 0x00000111, 0x00000010);
243         }
244
245         return 0;
246 }
247
248 static const struct nv50_disp_func
249 gf119_disp = {
250         .init = gf119_disp_init,
251         .fini = gf119_disp_fini,
252         .intr = gf119_disp_intr,
253         .intr_error = gf119_disp_intr_error,
254         .uevent = &gf119_disp_chan_uevent,
255         .super = gf119_disp_super,
256         .root = &gf119_disp_root_oclass,
257         .head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
258         .dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
259         .sor = { .cnt = gf119_sor_cnt, .new = gf119_sor_new },
260 };
261
262 int
263 gf119_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
264 {
265         return nv50_disp_new_(&gf119_disp, device, index, pdisp);
266 }