GNU Linux-libre 4.4.284-gnu1
[releases.git] / drivers / gpu / drm / amd / amdgpu / dce_v8_0.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "drmP.h"
24 #include "amdgpu.h"
25 #include "amdgpu_pm.h"
26 #include "amdgpu_i2c.h"
27 #include "cikd.h"
28 #include "atom.h"
29 #include "amdgpu_atombios.h"
30 #include "atombios_crtc.h"
31 #include "atombios_encoders.h"
32 #include "amdgpu_pll.h"
33 #include "amdgpu_connectors.h"
34
35 #include "dce/dce_8_0_d.h"
36 #include "dce/dce_8_0_sh_mask.h"
37
38 #include "gca/gfx_7_2_enum.h"
39
40 #include "gmc/gmc_7_1_d.h"
41 #include "gmc/gmc_7_1_sh_mask.h"
42
43 #include "oss/oss_2_0_d.h"
44 #include "oss/oss_2_0_sh_mask.h"
45
46 static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
47 static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
48
49 static const u32 crtc_offsets[6] =
50 {
51         CRTC0_REGISTER_OFFSET,
52         CRTC1_REGISTER_OFFSET,
53         CRTC2_REGISTER_OFFSET,
54         CRTC3_REGISTER_OFFSET,
55         CRTC4_REGISTER_OFFSET,
56         CRTC5_REGISTER_OFFSET
57 };
58
59 static const uint32_t dig_offsets[] = {
60         CRTC0_REGISTER_OFFSET,
61         CRTC1_REGISTER_OFFSET,
62         CRTC2_REGISTER_OFFSET,
63         CRTC3_REGISTER_OFFSET,
64         CRTC4_REGISTER_OFFSET,
65         CRTC5_REGISTER_OFFSET,
66         (0x13830 - 0x7030) >> 2,
67 };
68
69 static const struct {
70         uint32_t        reg;
71         uint32_t        vblank;
72         uint32_t        vline;
73         uint32_t        hpd;
74
75 } interrupt_status_offsets[6] = { {
76         .reg = mmDISP_INTERRUPT_STATUS,
77         .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
78         .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
79         .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
80 }, {
81         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
82         .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
83         .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
84         .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
85 }, {
86         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
87         .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
88         .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
89         .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
90 }, {
91         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
92         .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
93         .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
94         .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
95 }, {
96         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
97         .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
98         .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
99         .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
100 }, {
101         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
102         .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
103         .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
104         .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
105 } };
106
107 static const uint32_t hpd_int_control_offsets[6] = {
108         mmDC_HPD1_INT_CONTROL,
109         mmDC_HPD2_INT_CONTROL,
110         mmDC_HPD3_INT_CONTROL,
111         mmDC_HPD4_INT_CONTROL,
112         mmDC_HPD5_INT_CONTROL,
113         mmDC_HPD6_INT_CONTROL,
114 };
115
116 static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
117                                      u32 block_offset, u32 reg)
118 {
119         unsigned long flags;
120         u32 r;
121
122         spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
123         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
124         r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
125         spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
126
127         return r;
128 }
129
130 static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
131                                       u32 block_offset, u32 reg, u32 v)
132 {
133         unsigned long flags;
134
135         spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
136         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
137         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
138         spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
139 }
140
141 static bool dce_v8_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
142 {
143         if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
144                         CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
145                 return true;
146         else
147                 return false;
148 }
149
150 static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
151 {
152         u32 pos1, pos2;
153
154         pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
155         pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
156
157         if (pos1 != pos2)
158                 return true;
159         else
160                 return false;
161 }
162
163 /**
164  * dce_v8_0_vblank_wait - vblank wait asic callback.
165  *
166  * @adev: amdgpu_device pointer
167  * @crtc: crtc to wait for vblank on
168  *
169  * Wait for vblank on the requested crtc (evergreen+).
170  */
171 static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
172 {
173         unsigned i = 0;
174
175         if (crtc >= adev->mode_info.num_crtc)
176                 return;
177
178         if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
179                 return;
180
181         /* depending on when we hit vblank, we may be close to active; if so,
182          * wait for another frame.
183          */
184         while (dce_v8_0_is_in_vblank(adev, crtc)) {
185                 if (i++ % 100 == 0) {
186                         if (!dce_v8_0_is_counter_moving(adev, crtc))
187                                 break;
188                 }
189         }
190
191         while (!dce_v8_0_is_in_vblank(adev, crtc)) {
192                 if (i++ % 100 == 0) {
193                         if (!dce_v8_0_is_counter_moving(adev, crtc))
194                                 break;
195                 }
196         }
197 }
198
199 static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
200 {
201         if (crtc >= adev->mode_info.num_crtc)
202                 return 0;
203         else
204                 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
205 }
206
207 static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
208 {
209         unsigned i;
210
211         /* Enable pflip interrupts */
212         for (i = 0; i < adev->mode_info.num_crtc; i++)
213                 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
214 }
215
216 static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
217 {
218         unsigned i;
219
220         /* Disable pflip interrupts */
221         for (i = 0; i < adev->mode_info.num_crtc; i++)
222                 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
223 }
224
225 /**
226  * dce_v8_0_page_flip - pageflip callback.
227  *
228  * @adev: amdgpu_device pointer
229  * @crtc_id: crtc to cleanup pageflip on
230  * @crtc_base: new address of the crtc (GPU MC address)
231  *
232  * Triggers the actual pageflip by updating the primary
233  * surface base address.
234  */
235 static void dce_v8_0_page_flip(struct amdgpu_device *adev,
236                               int crtc_id, u64 crtc_base)
237 {
238         struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
239
240         /* update the primary scanout addresses */
241         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
242                upper_32_bits(crtc_base));
243         /* writing to the low address triggers the update */
244         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
245                lower_32_bits(crtc_base));
246         /* post the write */
247         RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
248 }
249
250 static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
251                                         u32 *vbl, u32 *position)
252 {
253         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
254                 return -EINVAL;
255
256         *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
257         *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
258
259         return 0;
260 }
261
262 /**
263  * dce_v8_0_hpd_sense - hpd sense callback.
264  *
265  * @adev: amdgpu_device pointer
266  * @hpd: hpd (hotplug detect) pin
267  *
268  * Checks if a digital monitor is connected (evergreen+).
269  * Returns true if connected, false if not connected.
270  */
271 static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
272                                enum amdgpu_hpd_id hpd)
273 {
274         bool connected = false;
275
276         switch (hpd) {
277         case AMDGPU_HPD_1:
278                 if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
279                         connected = true;
280                 break;
281         case AMDGPU_HPD_2:
282                 if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK)
283                         connected = true;
284                 break;
285         case AMDGPU_HPD_3:
286                 if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK)
287                         connected = true;
288                 break;
289         case AMDGPU_HPD_4:
290                 if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK)
291                         connected = true;
292                 break;
293         case AMDGPU_HPD_5:
294                 if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK)
295                         connected = true;
296                 break;
297         case AMDGPU_HPD_6:
298                 if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK)
299                         connected = true;
300                 break;
301         default:
302                 break;
303         }
304
305         return connected;
306 }
307
308 /**
309  * dce_v8_0_hpd_set_polarity - hpd set polarity callback.
310  *
311  * @adev: amdgpu_device pointer
312  * @hpd: hpd (hotplug detect) pin
313  *
314  * Set the polarity of the hpd pin (evergreen+).
315  */
316 static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
317                                       enum amdgpu_hpd_id hpd)
318 {
319         u32 tmp;
320         bool connected = dce_v8_0_hpd_sense(adev, hpd);
321
322         switch (hpd) {
323         case AMDGPU_HPD_1:
324                 tmp = RREG32(mmDC_HPD1_INT_CONTROL);
325                 if (connected)
326                         tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
327                 else
328                         tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
329                 WREG32(mmDC_HPD1_INT_CONTROL, tmp);
330                 break;
331         case AMDGPU_HPD_2:
332                 tmp = RREG32(mmDC_HPD2_INT_CONTROL);
333                 if (connected)
334                         tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
335                 else
336                         tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
337                 WREG32(mmDC_HPD2_INT_CONTROL, tmp);
338                 break;
339         case AMDGPU_HPD_3:
340                 tmp = RREG32(mmDC_HPD3_INT_CONTROL);
341                 if (connected)
342                         tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
343                 else
344                         tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
345                 WREG32(mmDC_HPD3_INT_CONTROL, tmp);
346                 break;
347         case AMDGPU_HPD_4:
348                 tmp = RREG32(mmDC_HPD4_INT_CONTROL);
349                 if (connected)
350                         tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
351                 else
352                         tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
353                 WREG32(mmDC_HPD4_INT_CONTROL, tmp);
354                 break;
355         case AMDGPU_HPD_5:
356                 tmp = RREG32(mmDC_HPD5_INT_CONTROL);
357                 if (connected)
358                         tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
359                 else
360                         tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
361                 WREG32(mmDC_HPD5_INT_CONTROL, tmp);
362                         break;
363         case AMDGPU_HPD_6:
364                 tmp = RREG32(mmDC_HPD6_INT_CONTROL);
365                 if (connected)
366                         tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
367                 else
368                         tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
369                 WREG32(mmDC_HPD6_INT_CONTROL, tmp);
370                 break;
371         default:
372                 break;
373         }
374 }
375
376 /**
377  * dce_v8_0_hpd_init - hpd setup callback.
378  *
379  * @adev: amdgpu_device pointer
380  *
381  * Setup the hpd pins used by the card (evergreen+).
382  * Enable the pin, set the polarity, and enable the hpd interrupts.
383  */
384 static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
385 {
386         struct drm_device *dev = adev->ddev;
387         struct drm_connector *connector;
388         u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) |
389                 (0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) |
390                 DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
391
392         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
393                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
394
395                 switch (amdgpu_connector->hpd.hpd) {
396                 case AMDGPU_HPD_1:
397                         WREG32(mmDC_HPD1_CONTROL, tmp);
398                         break;
399                 case AMDGPU_HPD_2:
400                         WREG32(mmDC_HPD2_CONTROL, tmp);
401                         break;
402                 case AMDGPU_HPD_3:
403                         WREG32(mmDC_HPD3_CONTROL, tmp);
404                         break;
405                 case AMDGPU_HPD_4:
406                         WREG32(mmDC_HPD4_CONTROL, tmp);
407                         break;
408                 case AMDGPU_HPD_5:
409                         WREG32(mmDC_HPD5_CONTROL, tmp);
410                         break;
411                 case AMDGPU_HPD_6:
412                         WREG32(mmDC_HPD6_CONTROL, tmp);
413                         break;
414                 default:
415                         break;
416                 }
417
418                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
419                     connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
420                         /* don't try to enable hpd on eDP or LVDS avoid breaking the
421                          * aux dp channel on imac and help (but not completely fix)
422                          * https://bugzilla.redhat.com/show_bug.cgi?id=726143
423                          * also avoid interrupt storms during dpms.
424                          */
425                         u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
426
427                         switch (amdgpu_connector->hpd.hpd) {
428                         case AMDGPU_HPD_1:
429                                 dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
430                                 break;
431                         case AMDGPU_HPD_2:
432                                 dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
433                                 break;
434                         case AMDGPU_HPD_3:
435                                 dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
436                                 break;
437                         case AMDGPU_HPD_4:
438                                 dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
439                                 break;
440                         case AMDGPU_HPD_5:
441                                 dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
442                                 break;
443                         case AMDGPU_HPD_6:
444                                 dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
445                                 break;
446                         default:
447                                 continue;
448                         }
449
450                         dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
451                         dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
452                         WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
453                         continue;
454                 }
455
456                 dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
457                 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
458         }
459 }
460
461 /**
462  * dce_v8_0_hpd_fini - hpd tear down callback.
463  *
464  * @adev: amdgpu_device pointer
465  *
466  * Tear down the hpd pins used by the card (evergreen+).
467  * Disable the hpd interrupts.
468  */
469 static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
470 {
471         struct drm_device *dev = adev->ddev;
472         struct drm_connector *connector;
473
474         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
475                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
476
477                 switch (amdgpu_connector->hpd.hpd) {
478                 case AMDGPU_HPD_1:
479                         WREG32(mmDC_HPD1_CONTROL, 0);
480                         break;
481                 case AMDGPU_HPD_2:
482                         WREG32(mmDC_HPD2_CONTROL, 0);
483                         break;
484                 case AMDGPU_HPD_3:
485                         WREG32(mmDC_HPD3_CONTROL, 0);
486                         break;
487                 case AMDGPU_HPD_4:
488                         WREG32(mmDC_HPD4_CONTROL, 0);
489                         break;
490                 case AMDGPU_HPD_5:
491                         WREG32(mmDC_HPD5_CONTROL, 0);
492                         break;
493                 case AMDGPU_HPD_6:
494                         WREG32(mmDC_HPD6_CONTROL, 0);
495                         break;
496                 default:
497                         break;
498                 }
499                 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
500         }
501 }
502
503 static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
504 {
505         return mmDC_GPIO_HPD_A;
506 }
507
508 static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
509 {
510         u32 crtc_hung = 0;
511         u32 crtc_status[6];
512         u32 i, j, tmp;
513
514         for (i = 0; i < adev->mode_info.num_crtc; i++) {
515                 if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
516                         crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
517                         crtc_hung |= (1 << i);
518                 }
519         }
520
521         for (j = 0; j < 10; j++) {
522                 for (i = 0; i < adev->mode_info.num_crtc; i++) {
523                         if (crtc_hung & (1 << i)) {
524                                 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
525                                 if (tmp != crtc_status[i])
526                                         crtc_hung &= ~(1 << i);
527                         }
528                 }
529                 if (crtc_hung == 0)
530                         return false;
531                 udelay(100);
532         }
533
534         return true;
535 }
536
537 static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
538                                     struct amdgpu_mode_mc_save *save)
539 {
540         u32 crtc_enabled, tmp;
541         int i;
542
543         save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
544         save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
545
546         /* disable VGA render */
547         tmp = RREG32(mmVGA_RENDER_CONTROL);
548         tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
549         WREG32(mmVGA_RENDER_CONTROL, tmp);
550
551         /* blank the display controllers */
552         for (i = 0; i < adev->mode_info.num_crtc; i++) {
553                 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
554                                              CRTC_CONTROL, CRTC_MASTER_EN);
555                 if (crtc_enabled) {
556 #if 0
557                         u32 frame_count;
558                         int j;
559
560                         save->crtc_enabled[i] = true;
561                         tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
562                         if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
563                                 amdgpu_display_vblank_wait(adev, i);
564                                 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
565                                 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
566                                 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
567                                 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
568                         }
569                         /* wait for the next frame */
570                         frame_count = amdgpu_display_vblank_get_counter(adev, i);
571                         for (j = 0; j < adev->usec_timeout; j++) {
572                                 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
573                                         break;
574                                 udelay(1);
575                         }
576                         tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
577                         if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
578                                 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
579                                 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
580                         }
581                         tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
582                         if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
583                                 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
584                                 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
585                         }
586 #else
587                         /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
588                         WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
589                         tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
590                         tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
591                         WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
592                         WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
593                         save->crtc_enabled[i] = false;
594                         /* ***** */
595 #endif
596                 } else {
597                         save->crtc_enabled[i] = false;
598                 }
599         }
600 }
601
602 static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
603                                       struct amdgpu_mode_mc_save *save)
604 {
605         u32 tmp, frame_count;
606         int i, j;
607
608         /* update crtc base addresses */
609         for (i = 0; i < adev->mode_info.num_crtc; i++) {
610                 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
611                        upper_32_bits(adev->mc.vram_start));
612                 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
613                        upper_32_bits(adev->mc.vram_start));
614                 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
615                        (u32)adev->mc.vram_start);
616                 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
617                        (u32)adev->mc.vram_start);
618
619                 if (save->crtc_enabled[i]) {
620                         tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
621                         if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
622                                 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
623                                 WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
624                         }
625                         tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
626                         if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
627                                 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
628                                 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
629                         }
630                         tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
631                         if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
632                                 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
633                                 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
634                         }
635                         for (j = 0; j < adev->usec_timeout; j++) {
636                                 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
637                                 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
638                                         break;
639                                 udelay(1);
640                         }
641                         tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
642                         tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
643                         WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
644                         WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
645                         WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
646                         /* wait for the next frame */
647                         frame_count = amdgpu_display_vblank_get_counter(adev, i);
648                         for (j = 0; j < adev->usec_timeout; j++) {
649                                 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
650                                         break;
651                                 udelay(1);
652                         }
653                 }
654         }
655
656         WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
657         WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
658
659         /* Unlock vga access */
660         WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
661         mdelay(1);
662         WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
663 }
664
665 static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
666                                           bool render)
667 {
668         u32 tmp;
669
670         /* Lockout access through VGA aperture*/
671         tmp = RREG32(mmVGA_HDP_CONTROL);
672         if (render)
673                 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
674         else
675                 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
676         WREG32(mmVGA_HDP_CONTROL, tmp);
677
678         /* disable VGA render */
679         tmp = RREG32(mmVGA_RENDER_CONTROL);
680         if (render)
681                 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
682         else
683                 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
684         WREG32(mmVGA_RENDER_CONTROL, tmp);
685 }
686
687 static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
688 {
689         struct drm_device *dev = encoder->dev;
690         struct amdgpu_device *adev = dev->dev_private;
691         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
692         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
693         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
694         int bpc = 0;
695         u32 tmp = 0;
696         enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
697
698         if (connector) {
699                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
700                 bpc = amdgpu_connector_get_monitor_bpc(connector);
701                 dither = amdgpu_connector->dither;
702         }
703
704         /* LVDS/eDP FMT is set up by atom */
705         if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
706                 return;
707
708         /* not needed for analog */
709         if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
710             (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
711                 return;
712
713         if (bpc == 0)
714                 return;
715
716         switch (bpc) {
717         case 6:
718                 if (dither == AMDGPU_FMT_DITHER_ENABLE)
719                         /* XXX sort out optimal dither settings */
720                         tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
721                                 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
722                                 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
723                                 (0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
724                 else
725                         tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
726                         (0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
727                 break;
728         case 8:
729                 if (dither == AMDGPU_FMT_DITHER_ENABLE)
730                         /* XXX sort out optimal dither settings */
731                         tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
732                                 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
733                                 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
734                                 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
735                                 (1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
736                 else
737                         tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
738                         (1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
739                 break;
740         case 10:
741                 if (dither == AMDGPU_FMT_DITHER_ENABLE)
742                         /* XXX sort out optimal dither settings */
743                         tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
744                                 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
745                                 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
746                                 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
747                                 (2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
748                 else
749                         tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
750                         (2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
751                 break;
752         default:
753                 /* not needed */
754                 break;
755         }
756
757         WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
758 }
759
760
761 /* display watermark setup */
762 /**
763  * dce_v8_0_line_buffer_adjust - Set up the line buffer
764  *
765  * @adev: amdgpu_device pointer
766  * @amdgpu_crtc: the selected display controller
767  * @mode: the current display mode on the selected display
768  * controller
769  *
770  * Setup up the line buffer allocation for
771  * the selected display controller (CIK).
772  * Returns the line buffer size in pixels.
773  */
774 static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
775                                        struct amdgpu_crtc *amdgpu_crtc,
776                                        struct drm_display_mode *mode)
777 {
778         u32 tmp, buffer_alloc, i;
779         u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
780         /*
781          * Line Buffer Setup
782          * There are 6 line buffers, one for each display controllers.
783          * There are 3 partitions per LB. Select the number of partitions
784          * to enable based on the display width.  For display widths larger
785          * than 4096, you need use to use 2 display controllers and combine
786          * them using the stereo blender.
787          */
788         if (amdgpu_crtc->base.enabled && mode) {
789                 if (mode->crtc_hdisplay < 1920) {
790                         tmp = 1;
791                         buffer_alloc = 2;
792                 } else if (mode->crtc_hdisplay < 2560) {
793                         tmp = 2;
794                         buffer_alloc = 2;
795                 } else if (mode->crtc_hdisplay < 4096) {
796                         tmp = 0;
797                         buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
798                 } else {
799                         DRM_DEBUG_KMS("Mode too big for LB!\n");
800                         tmp = 0;
801                         buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
802                 }
803         } else {
804                 tmp = 1;
805                 buffer_alloc = 0;
806         }
807
808         WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
809               (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
810               (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
811
812         WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
813                (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
814         for (i = 0; i < adev->usec_timeout; i++) {
815                 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
816                     PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
817                         break;
818                 udelay(1);
819         }
820
821         if (amdgpu_crtc->base.enabled && mode) {
822                 switch (tmp) {
823                 case 0:
824                 default:
825                         return 4096 * 2;
826                 case 1:
827                         return 1920 * 2;
828                 case 2:
829                         return 2560 * 2;
830                 }
831         }
832
833         /* controller not enabled, so no lb used */
834         return 0;
835 }
836
837 /**
838  * cik_get_number_of_dram_channels - get the number of dram channels
839  *
840  * @adev: amdgpu_device pointer
841  *
842  * Look up the number of video ram channels (CIK).
843  * Used for display watermark bandwidth calculations
844  * Returns the number of dram channels
845  */
846 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
847 {
848         u32 tmp = RREG32(mmMC_SHARED_CHMAP);
849
850         switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
851         case 0:
852         default:
853                 return 1;
854         case 1:
855                 return 2;
856         case 2:
857                 return 4;
858         case 3:
859                 return 8;
860         case 4:
861                 return 3;
862         case 5:
863                 return 6;
864         case 6:
865                 return 10;
866         case 7:
867                 return 12;
868         case 8:
869                 return 16;
870         }
871 }
872
873 struct dce8_wm_params {
874         u32 dram_channels; /* number of dram channels */
875         u32 yclk;          /* bandwidth per dram data pin in kHz */
876         u32 sclk;          /* engine clock in kHz */
877         u32 disp_clk;      /* display clock in kHz */
878         u32 src_width;     /* viewport width */
879         u32 active_time;   /* active display time in ns */
880         u32 blank_time;    /* blank time in ns */
881         bool interlaced;    /* mode is interlaced */
882         fixed20_12 vsc;    /* vertical scale ratio */
883         u32 num_heads;     /* number of active crtcs */
884         u32 bytes_per_pixel; /* bytes per pixel display + overlay */
885         u32 lb_size;       /* line buffer allocated to pipe */
886         u32 vtaps;         /* vertical scaler taps */
887 };
888
889 /**
890  * dce_v8_0_dram_bandwidth - get the dram bandwidth
891  *
892  * @wm: watermark calculation data
893  *
894  * Calculate the raw dram bandwidth (CIK).
895  * Used for display watermark bandwidth calculations
896  * Returns the dram bandwidth in MBytes/s
897  */
898 static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
899 {
900         /* Calculate raw DRAM Bandwidth */
901         fixed20_12 dram_efficiency; /* 0.7 */
902         fixed20_12 yclk, dram_channels, bandwidth;
903         fixed20_12 a;
904
905         a.full = dfixed_const(1000);
906         yclk.full = dfixed_const(wm->yclk);
907         yclk.full = dfixed_div(yclk, a);
908         dram_channels.full = dfixed_const(wm->dram_channels * 4);
909         a.full = dfixed_const(10);
910         dram_efficiency.full = dfixed_const(7);
911         dram_efficiency.full = dfixed_div(dram_efficiency, a);
912         bandwidth.full = dfixed_mul(dram_channels, yclk);
913         bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
914
915         return dfixed_trunc(bandwidth);
916 }
917
918 /**
919  * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display
920  *
921  * @wm: watermark calculation data
922  *
923  * Calculate the dram bandwidth used for display (CIK).
924  * Used for display watermark bandwidth calculations
925  * Returns the dram bandwidth for display in MBytes/s
926  */
927 static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
928 {
929         /* Calculate DRAM Bandwidth and the part allocated to display. */
930         fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
931         fixed20_12 yclk, dram_channels, bandwidth;
932         fixed20_12 a;
933
934         a.full = dfixed_const(1000);
935         yclk.full = dfixed_const(wm->yclk);
936         yclk.full = dfixed_div(yclk, a);
937         dram_channels.full = dfixed_const(wm->dram_channels * 4);
938         a.full = dfixed_const(10);
939         disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
940         disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
941         bandwidth.full = dfixed_mul(dram_channels, yclk);
942         bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
943
944         return dfixed_trunc(bandwidth);
945 }
946
947 /**
948  * dce_v8_0_data_return_bandwidth - get the data return bandwidth
949  *
950  * @wm: watermark calculation data
951  *
952  * Calculate the data return bandwidth used for display (CIK).
953  * Used for display watermark bandwidth calculations
954  * Returns the data return bandwidth in MBytes/s
955  */
956 static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
957 {
958         /* Calculate the display Data return Bandwidth */
959         fixed20_12 return_efficiency; /* 0.8 */
960         fixed20_12 sclk, bandwidth;
961         fixed20_12 a;
962
963         a.full = dfixed_const(1000);
964         sclk.full = dfixed_const(wm->sclk);
965         sclk.full = dfixed_div(sclk, a);
966         a.full = dfixed_const(10);
967         return_efficiency.full = dfixed_const(8);
968         return_efficiency.full = dfixed_div(return_efficiency, a);
969         a.full = dfixed_const(32);
970         bandwidth.full = dfixed_mul(a, sclk);
971         bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
972
973         return dfixed_trunc(bandwidth);
974 }
975
976 /**
977  * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth
978  *
979  * @wm: watermark calculation data
980  *
981  * Calculate the dmif bandwidth used for display (CIK).
982  * Used for display watermark bandwidth calculations
983  * Returns the dmif bandwidth in MBytes/s
984  */
985 static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
986 {
987         /* Calculate the DMIF Request Bandwidth */
988         fixed20_12 disp_clk_request_efficiency; /* 0.8 */
989         fixed20_12 disp_clk, bandwidth;
990         fixed20_12 a, b;
991
992         a.full = dfixed_const(1000);
993         disp_clk.full = dfixed_const(wm->disp_clk);
994         disp_clk.full = dfixed_div(disp_clk, a);
995         a.full = dfixed_const(32);
996         b.full = dfixed_mul(a, disp_clk);
997
998         a.full = dfixed_const(10);
999         disp_clk_request_efficiency.full = dfixed_const(8);
1000         disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
1001
1002         bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
1003
1004         return dfixed_trunc(bandwidth);
1005 }
1006
1007 /**
1008  * dce_v8_0_available_bandwidth - get the min available bandwidth
1009  *
1010  * @wm: watermark calculation data
1011  *
1012  * Calculate the min available bandwidth used for display (CIK).
1013  * Used for display watermark bandwidth calculations
1014  * Returns the min available bandwidth in MBytes/s
1015  */
1016 static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
1017 {
1018         /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
1019         u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
1020         u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
1021         u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
1022
1023         return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
1024 }
1025
1026 /**
1027  * dce_v8_0_average_bandwidth - get the average available bandwidth
1028  *
1029  * @wm: watermark calculation data
1030  *
1031  * Calculate the average available bandwidth used for display (CIK).
1032  * Used for display watermark bandwidth calculations
1033  * Returns the average available bandwidth in MBytes/s
1034  */
1035 static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
1036 {
1037         /* Calculate the display mode Average Bandwidth
1038          * DisplayMode should contain the source and destination dimensions,
1039          * timing, etc.
1040          */
1041         fixed20_12 bpp;
1042         fixed20_12 line_time;
1043         fixed20_12 src_width;
1044         fixed20_12 bandwidth;
1045         fixed20_12 a;
1046
1047         a.full = dfixed_const(1000);
1048         line_time.full = dfixed_const(wm->active_time + wm->blank_time);
1049         line_time.full = dfixed_div(line_time, a);
1050         bpp.full = dfixed_const(wm->bytes_per_pixel);
1051         src_width.full = dfixed_const(wm->src_width);
1052         bandwidth.full = dfixed_mul(src_width, bpp);
1053         bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
1054         bandwidth.full = dfixed_div(bandwidth, line_time);
1055
1056         return dfixed_trunc(bandwidth);
1057 }
1058
1059 /**
1060  * dce_v8_0_latency_watermark - get the latency watermark
1061  *
1062  * @wm: watermark calculation data
1063  *
1064  * Calculate the latency watermark (CIK).
1065  * Used for display watermark bandwidth calculations
1066  * Returns the latency watermark in ns
1067  */
1068 static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
1069 {
1070         /* First calculate the latency in ns */
1071         u32 mc_latency = 2000; /* 2000 ns. */
1072         u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
1073         u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
1074         u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
1075         u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
1076         u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
1077                 (wm->num_heads * cursor_line_pair_return_time);
1078         u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
1079         u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
1080         u32 tmp, dmif_size = 12288;
1081         fixed20_12 a, b, c;
1082
1083         if (wm->num_heads == 0)
1084                 return 0;
1085
1086         a.full = dfixed_const(2);
1087         b.full = dfixed_const(1);
1088         if ((wm->vsc.full > a.full) ||
1089             ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
1090             (wm->vtaps >= 5) ||
1091             ((wm->vsc.full >= a.full) && wm->interlaced))
1092                 max_src_lines_per_dst_line = 4;
1093         else
1094                 max_src_lines_per_dst_line = 2;
1095
1096         a.full = dfixed_const(available_bandwidth);
1097         b.full = dfixed_const(wm->num_heads);
1098         a.full = dfixed_div(a, b);
1099         tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
1100         tmp = min(dfixed_trunc(a), tmp);
1101
1102         lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
1103
1104         a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1105         b.full = dfixed_const(1000);
1106         c.full = dfixed_const(lb_fill_bw);
1107         b.full = dfixed_div(c, b);
1108         a.full = dfixed_div(a, b);
1109         line_fill_time = dfixed_trunc(a);
1110
1111         if (line_fill_time < wm->active_time)
1112                 return latency;
1113         else
1114                 return latency + (line_fill_time - wm->active_time);
1115
1116 }
1117
1118 /**
1119  * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check
1120  * average and available dram bandwidth
1121  *
1122  * @wm: watermark calculation data
1123  *
1124  * Check if the display average bandwidth fits in the display
1125  * dram bandwidth (CIK).
1126  * Used for display watermark bandwidth calculations
1127  * Returns true if the display fits, false if not.
1128  */
1129 static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
1130 {
1131         if (dce_v8_0_average_bandwidth(wm) <=
1132             (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
1133                 return true;
1134         else
1135                 return false;
1136 }
1137
1138 /**
1139  * dce_v8_0_average_bandwidth_vs_available_bandwidth - check
1140  * average and available bandwidth
1141  *
1142  * @wm: watermark calculation data
1143  *
1144  * Check if the display average bandwidth fits in the display
1145  * available bandwidth (CIK).
1146  * Used for display watermark bandwidth calculations
1147  * Returns true if the display fits, false if not.
1148  */
1149 static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
1150 {
1151         if (dce_v8_0_average_bandwidth(wm) <=
1152             (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
1153                 return true;
1154         else
1155                 return false;
1156 }
1157
1158 /**
1159  * dce_v8_0_check_latency_hiding - check latency hiding
1160  *
1161  * @wm: watermark calculation data
1162  *
1163  * Check latency hiding (CIK).
1164  * Used for display watermark bandwidth calculations
1165  * Returns true if the display fits, false if not.
1166  */
1167 static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
1168 {
1169         u32 lb_partitions = wm->lb_size / wm->src_width;
1170         u32 line_time = wm->active_time + wm->blank_time;
1171         u32 latency_tolerant_lines;
1172         u32 latency_hiding;
1173         fixed20_12 a;
1174
1175         a.full = dfixed_const(1);
1176         if (wm->vsc.full > a.full)
1177                 latency_tolerant_lines = 1;
1178         else {
1179                 if (lb_partitions <= (wm->vtaps + 1))
1180                         latency_tolerant_lines = 1;
1181                 else
1182                         latency_tolerant_lines = 2;
1183         }
1184
1185         latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1186
1187         if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
1188                 return true;
1189         else
1190                 return false;
1191 }
1192
1193 /**
1194  * dce_v8_0_program_watermarks - program display watermarks
1195  *
1196  * @adev: amdgpu_device pointer
1197  * @amdgpu_crtc: the selected display controller
1198  * @lb_size: line buffer size
1199  * @num_heads: number of display controllers in use
1200  *
1201  * Calculate and program the display watermarks for the
1202  * selected display controller (CIK).
1203  */
1204 static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1205                                         struct amdgpu_crtc *amdgpu_crtc,
1206                                         u32 lb_size, u32 num_heads)
1207 {
1208         struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1209         struct dce8_wm_params wm_low, wm_high;
1210         u32 active_time;
1211         u32 line_time = 0;
1212         u32 latency_watermark_a = 0, latency_watermark_b = 0;
1213         u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1214
1215         if (amdgpu_crtc->base.enabled && num_heads && mode) {
1216                 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
1217                 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
1218
1219                 /* watermark for high clocks */
1220                 if (adev->pm.dpm_enabled) {
1221                         wm_high.yclk =
1222                                 amdgpu_dpm_get_mclk(adev, false) * 10;
1223                         wm_high.sclk =
1224                                 amdgpu_dpm_get_sclk(adev, false) * 10;
1225                 } else {
1226                         wm_high.yclk = adev->pm.current_mclk * 10;
1227                         wm_high.sclk = adev->pm.current_sclk * 10;
1228                 }
1229
1230                 wm_high.disp_clk = mode->clock;
1231                 wm_high.src_width = mode->crtc_hdisplay;
1232                 wm_high.active_time = active_time;
1233                 wm_high.blank_time = line_time - wm_high.active_time;
1234                 wm_high.interlaced = false;
1235                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1236                         wm_high.interlaced = true;
1237                 wm_high.vsc = amdgpu_crtc->vsc;
1238                 wm_high.vtaps = 1;
1239                 if (amdgpu_crtc->rmx_type != RMX_OFF)
1240                         wm_high.vtaps = 2;
1241                 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1242                 wm_high.lb_size = lb_size;
1243                 wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1244                 wm_high.num_heads = num_heads;
1245
1246                 /* set for high clocks */
1247                 latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535);
1248
1249                 /* possibly force display priority to high */
1250                 /* should really do this at mode validation time... */
1251                 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1252                     !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1253                     !dce_v8_0_check_latency_hiding(&wm_high) ||
1254                     (adev->mode_info.disp_priority == 2)) {
1255                         DRM_DEBUG_KMS("force priority to high\n");
1256                 }
1257
1258                 /* watermark for low clocks */
1259                 if (adev->pm.dpm_enabled) {
1260                         wm_low.yclk =
1261                                 amdgpu_dpm_get_mclk(adev, true) * 10;
1262                         wm_low.sclk =
1263                                 amdgpu_dpm_get_sclk(adev, true) * 10;
1264                 } else {
1265                         wm_low.yclk = adev->pm.current_mclk * 10;
1266                         wm_low.sclk = adev->pm.current_sclk * 10;
1267                 }
1268
1269                 wm_low.disp_clk = mode->clock;
1270                 wm_low.src_width = mode->crtc_hdisplay;
1271                 wm_low.active_time = active_time;
1272                 wm_low.blank_time = line_time - wm_low.active_time;
1273                 wm_low.interlaced = false;
1274                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1275                         wm_low.interlaced = true;
1276                 wm_low.vsc = amdgpu_crtc->vsc;
1277                 wm_low.vtaps = 1;
1278                 if (amdgpu_crtc->rmx_type != RMX_OFF)
1279                         wm_low.vtaps = 2;
1280                 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1281                 wm_low.lb_size = lb_size;
1282                 wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1283                 wm_low.num_heads = num_heads;
1284
1285                 /* set for low clocks */
1286                 latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535);
1287
1288                 /* possibly force display priority to high */
1289                 /* should really do this at mode validation time... */
1290                 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1291                     !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1292                     !dce_v8_0_check_latency_hiding(&wm_low) ||
1293                     (adev->mode_info.disp_priority == 2)) {
1294                         DRM_DEBUG_KMS("force priority to high\n");
1295                 }
1296                 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1297         }
1298
1299         /* select wm A */
1300         wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1301         tmp = wm_mask;
1302         tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1303         tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1304         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1305         WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1306                ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1307                 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1308         /* select wm B */
1309         tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1310         tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1311         tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1312         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1313         WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1314                ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1315                 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1316         /* restore original selection */
1317         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1318
1319         /* save values for DPM */
1320         amdgpu_crtc->line_time = line_time;
1321         amdgpu_crtc->wm_high = latency_watermark_a;
1322         amdgpu_crtc->wm_low = latency_watermark_b;
1323         /* Save number of lines the linebuffer leads before the scanout */
1324         amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1325 }
1326
1327 /**
1328  * dce_v8_0_bandwidth_update - program display watermarks
1329  *
1330  * @adev: amdgpu_device pointer
1331  *
1332  * Calculate and program the display watermarks and line
1333  * buffer allocation (CIK).
1334  */
1335 static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1336 {
1337         struct drm_display_mode *mode = NULL;
1338         u32 num_heads = 0, lb_size;
1339         int i;
1340
1341         amdgpu_update_display_priority(adev);
1342
1343         for (i = 0; i < adev->mode_info.num_crtc; i++) {
1344                 if (adev->mode_info.crtcs[i]->base.enabled)
1345                         num_heads++;
1346         }
1347         for (i = 0; i < adev->mode_info.num_crtc; i++) {
1348                 mode = &adev->mode_info.crtcs[i]->base.mode;
1349                 lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1350                 dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1351                                             lb_size, num_heads);
1352         }
1353 }
1354
1355 static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1356 {
1357         int i;
1358         u32 offset, tmp;
1359
1360         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1361                 offset = adev->mode_info.audio.pin[i].offset;
1362                 tmp = RREG32_AUDIO_ENDPT(offset,
1363                                          ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1364                 if (((tmp &
1365                 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1366                 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1367                         adev->mode_info.audio.pin[i].connected = false;
1368                 else
1369                         adev->mode_info.audio.pin[i].connected = true;
1370         }
1371 }
1372
1373 static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1374 {
1375         int i;
1376
1377         dce_v8_0_audio_get_connected_pins(adev);
1378
1379         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1380                 if (adev->mode_info.audio.pin[i].connected)
1381                         return &adev->mode_info.audio.pin[i];
1382         }
1383         DRM_ERROR("No connected audio pins found!\n");
1384         return NULL;
1385 }
1386
1387 static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1388 {
1389         struct amdgpu_device *adev = encoder->dev->dev_private;
1390         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1391         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1392         u32 offset;
1393
1394         if (!dig || !dig->afmt || !dig->afmt->pin)
1395                 return;
1396
1397         offset = dig->afmt->offset;
1398
1399         WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1400                (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1401 }
1402
1403 static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1404                                                 struct drm_display_mode *mode)
1405 {
1406         struct amdgpu_device *adev = encoder->dev->dev_private;
1407         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1408         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1409         struct drm_connector *connector;
1410         struct amdgpu_connector *amdgpu_connector = NULL;
1411         u32 tmp = 0, offset;
1412
1413         if (!dig || !dig->afmt || !dig->afmt->pin)
1414                 return;
1415
1416         offset = dig->afmt->pin->offset;
1417
1418         list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1419                 if (connector->encoder == encoder) {
1420                         amdgpu_connector = to_amdgpu_connector(connector);
1421                         break;
1422                 }
1423         }
1424
1425         if (!amdgpu_connector) {
1426                 DRM_ERROR("Couldn't find encoder's connector\n");
1427                 return;
1428         }
1429
1430         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1431                 if (connector->latency_present[1])
1432                         tmp =
1433                         (connector->video_latency[1] <<
1434                          AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1435                         (connector->audio_latency[1] <<
1436                          AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1437                 else
1438                         tmp =
1439                         (0 <<
1440                          AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1441                         (0 <<
1442                          AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1443         } else {
1444                 if (connector->latency_present[0])
1445                         tmp =
1446                         (connector->video_latency[0] <<
1447                          AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1448                         (connector->audio_latency[0] <<
1449                          AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1450                 else
1451                         tmp =
1452                         (0 <<
1453                          AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1454                         (0 <<
1455                          AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1456
1457         }
1458         WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1459 }
1460
1461 static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1462 {
1463         struct amdgpu_device *adev = encoder->dev->dev_private;
1464         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1465         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1466         struct drm_connector *connector;
1467         struct amdgpu_connector *amdgpu_connector = NULL;
1468         u32 offset, tmp;
1469         u8 *sadb = NULL;
1470         int sad_count;
1471
1472         if (!dig || !dig->afmt || !dig->afmt->pin)
1473                 return;
1474
1475         offset = dig->afmt->pin->offset;
1476
1477         list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1478                 if (connector->encoder == encoder) {
1479                         amdgpu_connector = to_amdgpu_connector(connector);
1480                         break;
1481                 }
1482         }
1483
1484         if (!amdgpu_connector) {
1485                 DRM_ERROR("Couldn't find encoder's connector\n");
1486                 return;
1487         }
1488
1489         sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1490         if (sad_count < 0) {
1491                 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1492                 sad_count = 0;
1493         }
1494
1495         /* program the speaker allocation */
1496         tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1497         tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1498                 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1499         /* set HDMI mode */
1500         tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1501         if (sad_count)
1502                 tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1503         else
1504                 tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */
1505         WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1506
1507         kfree(sadb);
1508 }
1509
1510 static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1511 {
1512         struct amdgpu_device *adev = encoder->dev->dev_private;
1513         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1514         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1515         u32 offset;
1516         struct drm_connector *connector;
1517         struct amdgpu_connector *amdgpu_connector = NULL;
1518         struct cea_sad *sads;
1519         int i, sad_count;
1520
1521         static const u16 eld_reg_to_type[][2] = {
1522                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1523                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1524                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1525                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1526                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1527                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1528                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1529                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1530                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1531                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1532                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1533                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1534         };
1535
1536         if (!dig || !dig->afmt || !dig->afmt->pin)
1537                 return;
1538
1539         offset = dig->afmt->pin->offset;
1540
1541         list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1542                 if (connector->encoder == encoder) {
1543                         amdgpu_connector = to_amdgpu_connector(connector);
1544                         break;
1545                 }
1546         }
1547
1548         if (!amdgpu_connector) {
1549                 DRM_ERROR("Couldn't find encoder's connector\n");
1550                 return;
1551         }
1552
1553         sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1554         if (sad_count <= 0) {
1555                 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1556                 return;
1557         }
1558         BUG_ON(!sads);
1559
1560         for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1561                 u32 value = 0;
1562                 u8 stereo_freqs = 0;
1563                 int max_channels = -1;
1564                 int j;
1565
1566                 for (j = 0; j < sad_count; j++) {
1567                         struct cea_sad *sad = &sads[j];
1568
1569                         if (sad->format == eld_reg_to_type[i][1]) {
1570                                 if (sad->channels > max_channels) {
1571                                 value = (sad->channels <<
1572                                  AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1573                                 (sad->byte2 <<
1574                                  AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1575                                 (sad->freq <<
1576                                  AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1577                                 max_channels = sad->channels;
1578                                 }
1579
1580                                 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1581                                         stereo_freqs |= sad->freq;
1582                                 else
1583                                         break;
1584                         }
1585                 }
1586
1587                 value |= (stereo_freqs <<
1588                         AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1589
1590                 WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1591         }
1592
1593         kfree(sads);
1594 }
1595
1596 static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1597                                   struct amdgpu_audio_pin *pin,
1598                                   bool enable)
1599 {
1600         if (!pin)
1601                 return;
1602
1603         WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1604                 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1605 }
1606
1607 static const u32 pin_offsets[7] =
1608 {
1609         (0x1780 - 0x1780),
1610         (0x1786 - 0x1780),
1611         (0x178c - 0x1780),
1612         (0x1792 - 0x1780),
1613         (0x1798 - 0x1780),
1614         (0x179d - 0x1780),
1615         (0x17a4 - 0x1780),
1616 };
1617
1618 static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1619 {
1620         int i;
1621
1622         if (!amdgpu_audio)
1623                 return 0;
1624
1625         adev->mode_info.audio.enabled = true;
1626
1627         if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */
1628                 adev->mode_info.audio.num_pins = 7;
1629         else if ((adev->asic_type == CHIP_KABINI) ||
1630                  (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */
1631                 adev->mode_info.audio.num_pins = 3;
1632         else if ((adev->asic_type == CHIP_BONAIRE) ||
1633                  (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */
1634                 adev->mode_info.audio.num_pins = 7;
1635         else
1636                 adev->mode_info.audio.num_pins = 3;
1637
1638         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1639                 adev->mode_info.audio.pin[i].channels = -1;
1640                 adev->mode_info.audio.pin[i].rate = -1;
1641                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1642                 adev->mode_info.audio.pin[i].status_bits = 0;
1643                 adev->mode_info.audio.pin[i].category_code = 0;
1644                 adev->mode_info.audio.pin[i].connected = false;
1645                 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1646                 adev->mode_info.audio.pin[i].id = i;
1647                 /* disable audio.  it will be set up later */
1648                 /* XXX remove once we switch to ip funcs */
1649                 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1650         }
1651
1652         return 0;
1653 }
1654
1655 static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1656 {
1657         int i;
1658
1659         if (!adev->mode_info.audio.enabled)
1660                 return;
1661
1662         for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1663                 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1664
1665         adev->mode_info.audio.enabled = false;
1666 }
1667
1668 /*
1669  * update the N and CTS parameters for a given pixel clock rate
1670  */
1671 static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1672 {
1673         struct drm_device *dev = encoder->dev;
1674         struct amdgpu_device *adev = dev->dev_private;
1675         struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1676         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1677         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1678         uint32_t offset = dig->afmt->offset;
1679
1680         WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1681         WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1682
1683         WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1684         WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1685
1686         WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1687         WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1688 }
1689
1690 /*
1691  * build a HDMI Video Info Frame
1692  */
1693 static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1694                                                void *buffer, size_t size)
1695 {
1696         struct drm_device *dev = encoder->dev;
1697         struct amdgpu_device *adev = dev->dev_private;
1698         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1699         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1700         uint32_t offset = dig->afmt->offset;
1701         uint8_t *frame = buffer + 3;
1702         uint8_t *header = buffer;
1703
1704         WREG32(mmAFMT_AVI_INFO0 + offset,
1705                 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1706         WREG32(mmAFMT_AVI_INFO1 + offset,
1707                 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1708         WREG32(mmAFMT_AVI_INFO2 + offset,
1709                 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1710         WREG32(mmAFMT_AVI_INFO3 + offset,
1711                 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1712 }
1713
1714 static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1715 {
1716         struct drm_device *dev = encoder->dev;
1717         struct amdgpu_device *adev = dev->dev_private;
1718         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1719         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1720         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1721         u32 dto_phase = 24 * 1000;
1722         u32 dto_modulo = clock;
1723
1724         if (!dig || !dig->afmt)
1725                 return;
1726
1727         /* XXX two dtos; generally use dto0 for hdmi */
1728         /* Express [24MHz / target pixel clock] as an exact rational
1729          * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1730          * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1731          */
1732         WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1733         WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1734         WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1735 }
1736
1737 /*
1738  * update the info frames with the data from the current display mode
1739  */
1740 static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1741                                   struct drm_display_mode *mode)
1742 {
1743         struct drm_device *dev = encoder->dev;
1744         struct amdgpu_device *adev = dev->dev_private;
1745         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1746         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1747         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1748         u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1749         struct hdmi_avi_infoframe frame;
1750         uint32_t offset, val;
1751         ssize_t err;
1752         int bpc = 8;
1753
1754         if (!dig || !dig->afmt)
1755                 return;
1756
1757         /* Silent, r600_hdmi_enable will raise WARN for us */
1758         if (!dig->afmt->enabled)
1759                 return;
1760         offset = dig->afmt->offset;
1761
1762         /* hdmi deep color mode general control packets setup, if bpc > 8 */
1763         if (encoder->crtc) {
1764                 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1765                 bpc = amdgpu_crtc->bpc;
1766         }
1767
1768         /* disable audio prior to setting up hw */
1769         dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1770         dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1771
1772         dce_v8_0_audio_set_dto(encoder, mode->clock);
1773
1774         WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1775                HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */
1776
1777         WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1778
1779         val = RREG32(mmHDMI_CONTROL + offset);
1780         val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1781         val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1782
1783         switch (bpc) {
1784         case 0:
1785         case 6:
1786         case 8:
1787         case 16:
1788         default:
1789                 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1790                           connector->name, bpc);
1791                 break;
1792         case 10:
1793                 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1794                 val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1795                 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1796                           connector->name);
1797                 break;
1798         case 12:
1799                 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1800                 val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1801                 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1802                           connector->name);
1803                 break;
1804         }
1805
1806         WREG32(mmHDMI_CONTROL + offset, val);
1807
1808         WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1809                HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */
1810                HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */
1811                HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */
1812
1813         WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1814                HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */
1815                HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */
1816
1817         WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1818                AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */
1819
1820         WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1821                (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */
1822
1823         WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
1824
1825         WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1826                (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */
1827                (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */
1828
1829         WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1830                AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */
1831
1832         /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
1833
1834         if (bpc > 8)
1835                 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1836                        HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1837         else
1838                 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1839                        HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */
1840                        HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1841
1842         dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1843
1844         WREG32(mmAFMT_60958_0 + offset,
1845                (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1846
1847         WREG32(mmAFMT_60958_1 + offset,
1848                (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1849
1850         WREG32(mmAFMT_60958_2 + offset,
1851                (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1852                (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1853                (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1854                (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1855                (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1856                (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1857
1858         dce_v8_0_audio_write_speaker_allocation(encoder);
1859
1860
1861         WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1862                (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1863
1864         dce_v8_0_afmt_audio_select_pin(encoder);
1865         dce_v8_0_audio_write_sad_regs(encoder);
1866         dce_v8_0_audio_write_latency_fields(encoder, mode);
1867
1868         err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
1869         if (err < 0) {
1870                 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1871                 return;
1872         }
1873
1874         err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1875         if (err < 0) {
1876                 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1877                 return;
1878         }
1879
1880         dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1881
1882         WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1883                   HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
1884                   HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */
1885
1886         WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1887                  (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
1888                  ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1889
1890         WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1891                   AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
1892
1893         /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
1894         WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1895         WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1896         WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1897         WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1898
1899         /* enable audio after to setting up hw */
1900         dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1901 }
1902
1903 static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1904 {
1905         struct drm_device *dev = encoder->dev;
1906         struct amdgpu_device *adev = dev->dev_private;
1907         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1908         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1909
1910         if (!dig || !dig->afmt)
1911                 return;
1912
1913         /* Silent, r600_hdmi_enable will raise WARN for us */
1914         if (enable && dig->afmt->enabled)
1915                 return;
1916         if (!enable && !dig->afmt->enabled)
1917                 return;
1918
1919         if (!enable && dig->afmt->pin) {
1920                 dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1921                 dig->afmt->pin = NULL;
1922         }
1923
1924         dig->afmt->enabled = enable;
1925
1926         DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1927                   enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1928 }
1929
1930 static void dce_v8_0_afmt_init(struct amdgpu_device *adev)
1931 {
1932         int i;
1933
1934         for (i = 0; i < adev->mode_info.num_dig; i++)
1935                 adev->mode_info.afmt[i] = NULL;
1936
1937         /* DCE8 has audio blocks tied to DIG encoders */
1938         for (i = 0; i < adev->mode_info.num_dig; i++) {
1939                 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1940                 if (adev->mode_info.afmt[i]) {
1941                         adev->mode_info.afmt[i]->offset = dig_offsets[i];
1942                         adev->mode_info.afmt[i]->id = i;
1943                 }
1944         }
1945 }
1946
1947 static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1948 {
1949         int i;
1950
1951         for (i = 0; i < adev->mode_info.num_dig; i++) {
1952                 kfree(adev->mode_info.afmt[i]);
1953                 adev->mode_info.afmt[i] = NULL;
1954         }
1955 }
1956
1957 static const u32 vga_control_regs[6] =
1958 {
1959         mmD1VGA_CONTROL,
1960         mmD2VGA_CONTROL,
1961         mmD3VGA_CONTROL,
1962         mmD4VGA_CONTROL,
1963         mmD5VGA_CONTROL,
1964         mmD6VGA_CONTROL,
1965 };
1966
1967 static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1968 {
1969         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1970         struct drm_device *dev = crtc->dev;
1971         struct amdgpu_device *adev = dev->dev_private;
1972         u32 vga_control;
1973
1974         vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1975         if (enable)
1976                 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1977         else
1978                 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1979 }
1980
1981 static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1982 {
1983         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1984         struct drm_device *dev = crtc->dev;
1985         struct amdgpu_device *adev = dev->dev_private;
1986
1987         if (enable)
1988                 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1989         else
1990                 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1991 }
1992
1993 static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1994                                      struct drm_framebuffer *fb,
1995                                      int x, int y, int atomic)
1996 {
1997         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1998         struct drm_device *dev = crtc->dev;
1999         struct amdgpu_device *adev = dev->dev_private;
2000         struct amdgpu_framebuffer *amdgpu_fb;
2001         struct drm_framebuffer *target_fb;
2002         struct drm_gem_object *obj;
2003         struct amdgpu_bo *rbo;
2004         uint64_t fb_location, tiling_flags;
2005         uint32_t fb_format, fb_pitch_pixels;
2006         u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2007         u32 pipe_config;
2008         u32 tmp, viewport_w, viewport_h;
2009         int r;
2010         bool bypass_lut = false;
2011
2012         /* no fb bound */
2013         if (!atomic && !crtc->primary->fb) {
2014                 DRM_DEBUG_KMS("No FB bound\n");
2015                 return 0;
2016         }
2017
2018         if (atomic) {
2019                 amdgpu_fb = to_amdgpu_framebuffer(fb);
2020                 target_fb = fb;
2021         }
2022         else {
2023                 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2024                 target_fb = crtc->primary->fb;
2025         }
2026
2027         /* If atomic, assume fb object is pinned & idle & fenced and
2028          * just update base pointers
2029          */
2030         obj = amdgpu_fb->obj;
2031         rbo = gem_to_amdgpu_bo(obj);
2032         r = amdgpu_bo_reserve(rbo, false);
2033         if (unlikely(r != 0))
2034                 return r;
2035
2036         if (atomic)
2037                 fb_location = amdgpu_bo_gpu_offset(rbo);
2038         else {
2039                 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
2040                 if (unlikely(r != 0)) {
2041                         amdgpu_bo_unreserve(rbo);
2042                         return -EINVAL;
2043                 }
2044         }
2045
2046         amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
2047         amdgpu_bo_unreserve(rbo);
2048
2049         pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2050
2051         switch (target_fb->pixel_format) {
2052         case DRM_FORMAT_C8:
2053                 fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2054                              (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2055                 break;
2056         case DRM_FORMAT_XRGB4444:
2057         case DRM_FORMAT_ARGB4444:
2058                 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2059                              (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2060 #ifdef __BIG_ENDIAN
2061                 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2062 #endif
2063                 break;
2064         case DRM_FORMAT_XRGB1555:
2065         case DRM_FORMAT_ARGB1555:
2066                 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2067                              (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2068 #ifdef __BIG_ENDIAN
2069                 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2070 #endif
2071                 break;
2072         case DRM_FORMAT_BGRX5551:
2073         case DRM_FORMAT_BGRA5551:
2074                 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2075                              (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2076 #ifdef __BIG_ENDIAN
2077                 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2078 #endif
2079                 break;
2080         case DRM_FORMAT_RGB565:
2081                 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2082                              (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2083 #ifdef __BIG_ENDIAN
2084                 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2085 #endif
2086                 break;
2087         case DRM_FORMAT_XRGB8888:
2088         case DRM_FORMAT_ARGB8888:
2089                 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2090                              (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2091 #ifdef __BIG_ENDIAN
2092                 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2093 #endif
2094                 break;
2095         case DRM_FORMAT_XRGB2101010:
2096         case DRM_FORMAT_ARGB2101010:
2097                 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2098                              (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2099 #ifdef __BIG_ENDIAN
2100                 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2101 #endif
2102                 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2103                 bypass_lut = true;
2104                 break;
2105         case DRM_FORMAT_BGRX1010102:
2106         case DRM_FORMAT_BGRA1010102:
2107                 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2108                              (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2109 #ifdef __BIG_ENDIAN
2110                 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2111 #endif
2112                 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2113                 bypass_lut = true;
2114                 break;
2115         default:
2116                 DRM_ERROR("Unsupported screen format %s\n",
2117                           drm_get_format_name(target_fb->pixel_format));
2118                 return -EINVAL;
2119         }
2120
2121         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
2122                 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
2123
2124                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2125                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2126                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2127                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2128                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2129
2130                 fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
2131                 fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2132                 fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
2133                 fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
2134                 fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
2135                 fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
2136                 fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
2137         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2138                 fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2139         }
2140
2141         fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
2142
2143         dce_v8_0_vga_enable(crtc, false);
2144
2145         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2146                upper_32_bits(fb_location));
2147         WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2148                upper_32_bits(fb_location));
2149         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2150                (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2151         WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2152                (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2153         WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2154         WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2155
2156         /*
2157          * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2158          * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2159          * retain the full precision throughout the pipeline.
2160          */
2161         WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
2162                  (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
2163                  ~LUT_10BIT_BYPASS_EN);
2164
2165         if (bypass_lut)
2166                 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2167
2168         WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2169         WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2170         WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2171         WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2172         WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2173         WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2174
2175         fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
2176         WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2177
2178         dce_v8_0_grph_enable(crtc, true);
2179
2180         WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2181                target_fb->height);
2182
2183         x &= ~3;
2184         y &= ~1;
2185         WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2186                (x << 16) | y);
2187         viewport_w = crtc->mode.hdisplay;
2188         viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2189         WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2190                (viewport_w << 16) | viewport_h);
2191
2192         /* pageflip setup */
2193         /* make sure flip is at vb rather than hb */
2194         tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
2195         tmp &= ~GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK;
2196         WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2197
2198         /* set pageflip to happen only at start of vblank interval (front porch) */
2199         WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
2200
2201         if (!atomic && fb && fb != crtc->primary->fb) {
2202                 amdgpu_fb = to_amdgpu_framebuffer(fb);
2203                 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2204                 r = amdgpu_bo_reserve(rbo, false);
2205                 if (unlikely(r != 0))
2206                         return r;
2207                 amdgpu_bo_unpin(rbo);
2208                 amdgpu_bo_unreserve(rbo);
2209         }
2210
2211         /* Bytes per pixel may have changed */
2212         dce_v8_0_bandwidth_update(adev);
2213
2214         return 0;
2215 }
2216
2217 static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
2218                                     struct drm_display_mode *mode)
2219 {
2220         struct drm_device *dev = crtc->dev;
2221         struct amdgpu_device *adev = dev->dev_private;
2222         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2223
2224         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2225                 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
2226                        LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
2227         else
2228                 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2229 }
2230
2231 static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
2232 {
2233         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2234         struct drm_device *dev = crtc->dev;
2235         struct amdgpu_device *adev = dev->dev_private;
2236         int i;
2237
2238         DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2239
2240         WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2241                ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2242                 (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2243         WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2244                PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2245         WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2246                PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2247         WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2248                ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2249                 (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2250
2251         WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2252
2253         WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2254         WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2255         WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2256
2257         WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2258         WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2259         WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2260
2261         WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2262         WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2263
2264         WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2265         for (i = 0; i < 256; i++) {
2266                 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2267                        (amdgpu_crtc->lut_r[i] << 20) |
2268                        (amdgpu_crtc->lut_g[i] << 10) |
2269                        (amdgpu_crtc->lut_b[i] << 0));
2270         }
2271
2272         WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2273                ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2274                 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2275                 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2276         WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2277                ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2278                 (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2279         WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2280                ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2281                 (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2282         WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2283                ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2284                 (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2285         /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2286         WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2287         /* XXX this only needs to be programmed once per crtc at startup,
2288          * not sure where the best place for it is
2289          */
2290         WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2291                ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2292 }
2293
2294 static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2295 {
2296         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2297         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2298
2299         switch (amdgpu_encoder->encoder_id) {
2300         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2301                 if (dig->linkb)
2302                         return 1;
2303                 else
2304                         return 0;
2305                 break;
2306         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2307                 if (dig->linkb)
2308                         return 3;
2309                 else
2310                         return 2;
2311                 break;
2312         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2313                 if (dig->linkb)
2314                         return 5;
2315                 else
2316                         return 4;
2317                 break;
2318         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2319                 return 6;
2320                 break;
2321         default:
2322                 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2323                 return 0;
2324         }
2325 }
2326
2327 /**
2328  * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc.
2329  *
2330  * @crtc: drm crtc
2331  *
2332  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2333  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2334  * monitors a dedicated PPLL must be used.  If a particular board has
2335  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2336  * as there is no need to program the PLL itself.  If we are not able to
2337  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2338  * avoid messing up an existing monitor.
2339  *
2340  * Asic specific PLL information
2341  *
2342  * DCE 8.x
2343  * KB/KV
2344  * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2345  * CI
2346  * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2347  *
2348  */
2349 static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2350 {
2351         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2352         struct drm_device *dev = crtc->dev;
2353         struct amdgpu_device *adev = dev->dev_private;
2354         u32 pll_in_use;
2355         int pll;
2356
2357         if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2358                 if (adev->clock.dp_extclk)
2359                         /* skip PPLL programming if using ext clock */
2360                         return ATOM_PPLL_INVALID;
2361                 else {
2362                         /* use the same PPLL for all DP monitors */
2363                         pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2364                         if (pll != ATOM_PPLL_INVALID)
2365                                 return pll;
2366                 }
2367         } else {
2368                 /* use the same PPLL for all monitors with the same clock */
2369                 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2370                 if (pll != ATOM_PPLL_INVALID)
2371                         return pll;
2372         }
2373         /* otherwise, pick one of the plls */
2374         if ((adev->asic_type == CHIP_KABINI) ||
2375             (adev->asic_type == CHIP_MULLINS)) {
2376                 /* KB/ML has PPLL1 and PPLL2 */
2377                 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2378                 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2379                         return ATOM_PPLL2;
2380                 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2381                         return ATOM_PPLL1;
2382                 DRM_ERROR("unable to allocate a PPLL\n");
2383                 return ATOM_PPLL_INVALID;
2384         } else {
2385                 /* CI/KV has PPLL0, PPLL1, and PPLL2 */
2386                 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2387                 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2388                         return ATOM_PPLL2;
2389                 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2390                         return ATOM_PPLL1;
2391                 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2392                         return ATOM_PPLL0;
2393                 DRM_ERROR("unable to allocate a PPLL\n");
2394                 return ATOM_PPLL_INVALID;
2395         }
2396         return ATOM_PPLL_INVALID;
2397 }
2398
2399 static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2400 {
2401         struct amdgpu_device *adev = crtc->dev->dev_private;
2402         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2403         uint32_t cur_lock;
2404
2405         cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2406         if (lock)
2407                 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2408         else
2409                 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2410         WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2411 }
2412
2413 static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2414 {
2415         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2416         struct amdgpu_device *adev = crtc->dev->dev_private;
2417
2418         WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2419                    (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2420                    (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2421 }
2422
2423 static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2424 {
2425         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2426         struct amdgpu_device *adev = crtc->dev->dev_private;
2427
2428         WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2429                upper_32_bits(amdgpu_crtc->cursor_addr));
2430         WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2431                lower_32_bits(amdgpu_crtc->cursor_addr));
2432
2433         WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2434                    CUR_CONTROL__CURSOR_EN_MASK |
2435                    (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2436                    (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2437 }
2438
2439 static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2440                                        int x, int y)
2441 {
2442         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2443         struct amdgpu_device *adev = crtc->dev->dev_private;
2444         int xorigin = 0, yorigin = 0;
2445
2446         /* avivo cursor are offset into the total surface */
2447         x += crtc->x;
2448         y += crtc->y;
2449         DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2450
2451         if (x < 0) {
2452                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2453                 x = 0;
2454         }
2455         if (y < 0) {
2456                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2457                 y = 0;
2458         }
2459
2460         WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2461         WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2462         WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2463                ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2464
2465         amdgpu_crtc->cursor_x = x;
2466         amdgpu_crtc->cursor_y = y;
2467
2468         return 0;
2469 }
2470
2471 static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2472                                      int x, int y)
2473 {
2474         int ret;
2475
2476         dce_v8_0_lock_cursor(crtc, true);
2477         ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2478         dce_v8_0_lock_cursor(crtc, false);
2479
2480         return ret;
2481 }
2482
2483 static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2484                                      struct drm_file *file_priv,
2485                                      uint32_t handle,
2486                                      uint32_t width,
2487                                      uint32_t height,
2488                                      int32_t hot_x,
2489                                      int32_t hot_y)
2490 {
2491         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2492         struct drm_gem_object *obj;
2493         struct amdgpu_bo *aobj;
2494         int ret;
2495
2496         if (!handle) {
2497                 /* turn off cursor */
2498                 dce_v8_0_hide_cursor(crtc);
2499                 obj = NULL;
2500                 goto unpin;
2501         }
2502
2503         if ((width > amdgpu_crtc->max_cursor_width) ||
2504             (height > amdgpu_crtc->max_cursor_height)) {
2505                 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2506                 return -EINVAL;
2507         }
2508
2509         obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
2510         if (!obj) {
2511                 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2512                 return -ENOENT;
2513         }
2514
2515         aobj = gem_to_amdgpu_bo(obj);
2516         ret = amdgpu_bo_reserve(aobj, false);
2517         if (ret != 0) {
2518                 drm_gem_object_unreference_unlocked(obj);
2519                 return ret;
2520         }
2521
2522         ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2523         amdgpu_bo_unreserve(aobj);
2524         if (ret) {
2525                 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2526                 drm_gem_object_unreference_unlocked(obj);
2527                 return ret;
2528         }
2529
2530         amdgpu_crtc->cursor_width = width;
2531         amdgpu_crtc->cursor_height = height;
2532
2533         dce_v8_0_lock_cursor(crtc, true);
2534
2535         if (hot_x != amdgpu_crtc->cursor_hot_x ||
2536             hot_y != amdgpu_crtc->cursor_hot_y) {
2537                 int x, y;
2538
2539                 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2540                 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2541
2542                 dce_v8_0_cursor_move_locked(crtc, x, y);
2543
2544                 amdgpu_crtc->cursor_hot_x = hot_x;
2545                 amdgpu_crtc->cursor_hot_y = hot_y;
2546         }
2547
2548         dce_v8_0_show_cursor(crtc);
2549         dce_v8_0_lock_cursor(crtc, false);
2550
2551 unpin:
2552         if (amdgpu_crtc->cursor_bo) {
2553                 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2554                 ret = amdgpu_bo_reserve(aobj, false);
2555                 if (likely(ret == 0)) {
2556                         amdgpu_bo_unpin(aobj);
2557                         amdgpu_bo_unreserve(aobj);
2558                 }
2559                 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2560         }
2561
2562         amdgpu_crtc->cursor_bo = obj;
2563         return 0;
2564 }
2565
2566 static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2567 {
2568         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2569
2570         if (amdgpu_crtc->cursor_bo) {
2571                 dce_v8_0_lock_cursor(crtc, true);
2572
2573                 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2574                                             amdgpu_crtc->cursor_y);
2575
2576                 dce_v8_0_show_cursor(crtc);
2577
2578                 dce_v8_0_lock_cursor(crtc, false);
2579         }
2580 }
2581
2582 static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2583                                     u16 *blue, uint32_t start, uint32_t size)
2584 {
2585         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2586         int end = (start + size > 256) ? 256 : start + size, i;
2587
2588         /* userspace palettes are always correct as is */
2589         for (i = start; i < end; i++) {
2590                 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2591                 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2592                 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2593         }
2594         dce_v8_0_crtc_load_lut(crtc);
2595 }
2596
2597 static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2598 {
2599         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2600
2601         drm_crtc_cleanup(crtc);
2602         destroy_workqueue(amdgpu_crtc->pflip_queue);
2603         kfree(amdgpu_crtc);
2604 }
2605
2606 static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2607         .cursor_set2 = dce_v8_0_crtc_cursor_set2,
2608         .cursor_move = dce_v8_0_crtc_cursor_move,
2609         .gamma_set = dce_v8_0_crtc_gamma_set,
2610         .set_config = amdgpu_crtc_set_config,
2611         .destroy = dce_v8_0_crtc_destroy,
2612         .page_flip = amdgpu_crtc_page_flip,
2613 };
2614
2615 static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2616 {
2617         struct drm_device *dev = crtc->dev;
2618         struct amdgpu_device *adev = dev->dev_private;
2619         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2620         unsigned type;
2621
2622         switch (mode) {
2623         case DRM_MODE_DPMS_ON:
2624                 amdgpu_crtc->enabled = true;
2625                 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2626                 dce_v8_0_vga_enable(crtc, true);
2627                 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2628                 dce_v8_0_vga_enable(crtc, false);
2629                 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2630                 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2631                 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2632                 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2633                 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2634                 dce_v8_0_crtc_load_lut(crtc);
2635                 break;
2636         case DRM_MODE_DPMS_STANDBY:
2637         case DRM_MODE_DPMS_SUSPEND:
2638         case DRM_MODE_DPMS_OFF:
2639                 drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
2640                 if (amdgpu_crtc->enabled) {
2641                         dce_v8_0_vga_enable(crtc, true);
2642                         amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2643                         dce_v8_0_vga_enable(crtc, false);
2644                 }
2645                 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2646                 amdgpu_crtc->enabled = false;
2647                 break;
2648         }
2649         /* adjust pm to dpms */
2650         amdgpu_pm_compute_clocks(adev);
2651 }
2652
2653 static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2654 {
2655         /* disable crtc pair power gating before programming */
2656         amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2657         amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2658         dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2659 }
2660
2661 static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2662 {
2663         dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2664         amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2665 }
2666
2667 static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2668 {
2669         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2670         struct drm_device *dev = crtc->dev;
2671         struct amdgpu_device *adev = dev->dev_private;
2672         struct amdgpu_atom_ss ss;
2673         int i;
2674
2675         dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2676         if (crtc->primary->fb) {
2677                 int r;
2678                 struct amdgpu_framebuffer *amdgpu_fb;
2679                 struct amdgpu_bo *rbo;
2680
2681                 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2682                 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2683                 r = amdgpu_bo_reserve(rbo, false);
2684                 if (unlikely(r))
2685                         DRM_ERROR("failed to reserve rbo before unpin\n");
2686                 else {
2687                         amdgpu_bo_unpin(rbo);
2688                         amdgpu_bo_unreserve(rbo);
2689                 }
2690         }
2691         /* disable the GRPH */
2692         dce_v8_0_grph_enable(crtc, false);
2693
2694         amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2695
2696         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2697                 if (adev->mode_info.crtcs[i] &&
2698                     adev->mode_info.crtcs[i]->enabled &&
2699                     i != amdgpu_crtc->crtc_id &&
2700                     amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2701                         /* one other crtc is using this pll don't turn
2702                          * off the pll
2703                          */
2704                         goto done;
2705                 }
2706         }
2707
2708         switch (amdgpu_crtc->pll_id) {
2709         case ATOM_PPLL1:
2710         case ATOM_PPLL2:
2711                 /* disable the ppll */
2712                 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2713                                           0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2714                 break;
2715         case ATOM_PPLL0:
2716                 /* disable the ppll */
2717                 if ((adev->asic_type == CHIP_KAVERI) ||
2718                     (adev->asic_type == CHIP_BONAIRE) ||
2719                     (adev->asic_type == CHIP_HAWAII))
2720                         amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2721                                                   0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2722                 break;
2723         default:
2724                 break;
2725         }
2726 done:
2727         amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2728         amdgpu_crtc->adjusted_clock = 0;
2729         amdgpu_crtc->encoder = NULL;
2730         amdgpu_crtc->connector = NULL;
2731 }
2732
2733 static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2734                                   struct drm_display_mode *mode,
2735                                   struct drm_display_mode *adjusted_mode,
2736                                   int x, int y, struct drm_framebuffer *old_fb)
2737 {
2738         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2739
2740         if (!amdgpu_crtc->adjusted_clock)
2741                 return -EINVAL;
2742
2743         amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2744         amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2745         dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2746         amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2747         amdgpu_atombios_crtc_scaler_setup(crtc);
2748         dce_v8_0_cursor_reset(crtc);
2749         /* update the hw version fpr dpm */
2750         amdgpu_crtc->hw_mode = *adjusted_mode;
2751
2752         return 0;
2753 }
2754
2755 static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2756                                      const struct drm_display_mode *mode,
2757                                      struct drm_display_mode *adjusted_mode)
2758 {
2759         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2760         struct drm_device *dev = crtc->dev;
2761         struct drm_encoder *encoder;
2762
2763         /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2764         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2765                 if (encoder->crtc == crtc) {
2766                         amdgpu_crtc->encoder = encoder;
2767                         amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2768                         break;
2769                 }
2770         }
2771         if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2772                 amdgpu_crtc->encoder = NULL;
2773                 amdgpu_crtc->connector = NULL;
2774                 return false;
2775         }
2776         if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2777                 return false;
2778         if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2779                 return false;
2780         /* pick pll */
2781         amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2782         /* if we can't get a PPLL for a non-DP encoder, fail */
2783         if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2784             !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2785                 return false;
2786
2787         return true;
2788 }
2789
2790 static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2791                                   struct drm_framebuffer *old_fb)
2792 {
2793         return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2794 }
2795
2796 static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2797                                          struct drm_framebuffer *fb,
2798                                          int x, int y, enum mode_set_atomic state)
2799 {
2800        return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2801 }
2802
2803 static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2804         .dpms = dce_v8_0_crtc_dpms,
2805         .mode_fixup = dce_v8_0_crtc_mode_fixup,
2806         .mode_set = dce_v8_0_crtc_mode_set,
2807         .mode_set_base = dce_v8_0_crtc_set_base,
2808         .mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2809         .prepare = dce_v8_0_crtc_prepare,
2810         .commit = dce_v8_0_crtc_commit,
2811         .load_lut = dce_v8_0_crtc_load_lut,
2812         .disable = dce_v8_0_crtc_disable,
2813 };
2814
2815 static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2816 {
2817         struct amdgpu_crtc *amdgpu_crtc;
2818         int i;
2819
2820         amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2821                               (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2822         if (amdgpu_crtc == NULL)
2823                 return -ENOMEM;
2824
2825         drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2826
2827         drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2828         amdgpu_crtc->crtc_id = index;
2829         amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue");
2830         adev->mode_info.crtcs[index] = amdgpu_crtc;
2831
2832         amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2833         amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2834         adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2835         adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2836
2837         for (i = 0; i < 256; i++) {
2838                 amdgpu_crtc->lut_r[i] = i << 2;
2839                 amdgpu_crtc->lut_g[i] = i << 2;
2840                 amdgpu_crtc->lut_b[i] = i << 2;
2841         }
2842
2843         amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2844
2845         amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2846         amdgpu_crtc->adjusted_clock = 0;
2847         amdgpu_crtc->encoder = NULL;
2848         amdgpu_crtc->connector = NULL;
2849         drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2850
2851         return 0;
2852 }
2853
2854 static int dce_v8_0_early_init(void *handle)
2855 {
2856         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2857
2858         adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2859         adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2860
2861         dce_v8_0_set_display_funcs(adev);
2862         dce_v8_0_set_irq_funcs(adev);
2863
2864         switch (adev->asic_type) {
2865         case CHIP_BONAIRE:
2866         case CHIP_HAWAII:
2867                 adev->mode_info.num_crtc = 6;
2868                 adev->mode_info.num_hpd = 6;
2869                 adev->mode_info.num_dig = 6;
2870                 break;
2871         case CHIP_KAVERI:
2872                 adev->mode_info.num_crtc = 4;
2873                 adev->mode_info.num_hpd = 6;
2874                 adev->mode_info.num_dig = 7;
2875                 break;
2876         case CHIP_KABINI:
2877         case CHIP_MULLINS:
2878                 adev->mode_info.num_crtc = 2;
2879                 adev->mode_info.num_hpd = 6;
2880                 adev->mode_info.num_dig = 6; /* ? */
2881                 break;
2882         default:
2883                 /* FIXME: not supported yet */
2884                 return -EINVAL;
2885         }
2886
2887         return 0;
2888 }
2889
2890 static int dce_v8_0_sw_init(void *handle)
2891 {
2892         int r, i;
2893         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2894
2895         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2896                 r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
2897                 if (r)
2898                         return r;
2899         }
2900
2901         for (i = 8; i < 20; i += 2) {
2902                 r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
2903                 if (r)
2904                         return r;
2905         }
2906
2907         /* HPD hotplug */
2908         r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
2909         if (r)
2910                 return r;
2911
2912         adev->mode_info.mode_config_initialized = true;
2913
2914         adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2915
2916         adev->ddev->mode_config.max_width = 16384;
2917         adev->ddev->mode_config.max_height = 16384;
2918
2919         adev->ddev->mode_config.preferred_depth = 24;
2920         adev->ddev->mode_config.prefer_shadow = 1;
2921
2922         adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2923
2924         r = amdgpu_modeset_create_props(adev);
2925         if (r)
2926                 return r;
2927
2928         adev->ddev->mode_config.max_width = 16384;
2929         adev->ddev->mode_config.max_height = 16384;
2930
2931         /* allocate crtcs */
2932         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2933                 r = dce_v8_0_crtc_init(adev, i);
2934                 if (r)
2935                         return r;
2936         }
2937
2938         if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2939                 amdgpu_print_display_setup(adev->ddev);
2940         else
2941                 return -EINVAL;
2942
2943         /* setup afmt */
2944         dce_v8_0_afmt_init(adev);
2945
2946         r = dce_v8_0_audio_init(adev);
2947         if (r)
2948                 return r;
2949
2950         drm_kms_helper_poll_init(adev->ddev);
2951
2952         return r;
2953 }
2954
2955 static int dce_v8_0_sw_fini(void *handle)
2956 {
2957         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2958
2959         kfree(adev->mode_info.bios_hardcoded_edid);
2960
2961         drm_kms_helper_poll_fini(adev->ddev);
2962
2963         dce_v8_0_audio_fini(adev);
2964
2965         dce_v8_0_afmt_fini(adev);
2966
2967         drm_mode_config_cleanup(adev->ddev);
2968         adev->mode_info.mode_config_initialized = false;
2969
2970         return 0;
2971 }
2972
2973 static int dce_v8_0_hw_init(void *handle)
2974 {
2975         int i;
2976         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2977
2978         /* init dig PHYs, disp eng pll */
2979         amdgpu_atombios_encoder_init_dig(adev);
2980         amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2981
2982         /* initialize hpd */
2983         dce_v8_0_hpd_init(adev);
2984
2985         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2986                 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2987         }
2988
2989         dce_v8_0_pageflip_interrupt_init(adev);
2990
2991         return 0;
2992 }
2993
2994 static int dce_v8_0_hw_fini(void *handle)
2995 {
2996         int i;
2997         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2998
2999         dce_v8_0_hpd_fini(adev);
3000
3001         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
3002                 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3003         }
3004
3005         dce_v8_0_pageflip_interrupt_fini(adev);
3006
3007         return 0;
3008 }
3009
3010 static int dce_v8_0_suspend(void *handle)
3011 {
3012         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3013
3014         amdgpu_atombios_scratch_regs_save(adev);
3015
3016         return dce_v8_0_hw_fini(handle);
3017 }
3018
3019 static int dce_v8_0_resume(void *handle)
3020 {
3021         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3022         int ret;
3023
3024         ret = dce_v8_0_hw_init(handle);
3025
3026         amdgpu_atombios_scratch_regs_restore(adev);
3027
3028         /* turn on the BL */
3029         if (adev->mode_info.bl_encoder) {
3030                 u8 bl_level = amdgpu_display_backlight_get_level(adev,
3031                                                                   adev->mode_info.bl_encoder);
3032                 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
3033                                                     bl_level);
3034         }
3035
3036         return ret;
3037 }
3038
3039 static bool dce_v8_0_is_idle(void *handle)
3040 {
3041         return true;
3042 }
3043
3044 static int dce_v8_0_wait_for_idle(void *handle)
3045 {
3046         return 0;
3047 }
3048
3049 static void dce_v8_0_print_status(void *handle)
3050 {
3051         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3052
3053         dev_info(adev->dev, "DCE 8.x registers\n");
3054         /* XXX todo */
3055 }
3056
3057 static int dce_v8_0_soft_reset(void *handle)
3058 {
3059         u32 srbm_soft_reset = 0, tmp;
3060         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3061
3062         if (dce_v8_0_is_display_hung(adev))
3063                 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
3064
3065         if (srbm_soft_reset) {
3066                 dce_v8_0_print_status((void *)adev);
3067
3068                 tmp = RREG32(mmSRBM_SOFT_RESET);
3069                 tmp |= srbm_soft_reset;
3070                 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3071                 WREG32(mmSRBM_SOFT_RESET, tmp);
3072                 tmp = RREG32(mmSRBM_SOFT_RESET);
3073
3074                 udelay(50);
3075
3076                 tmp &= ~srbm_soft_reset;
3077                 WREG32(mmSRBM_SOFT_RESET, tmp);
3078                 tmp = RREG32(mmSRBM_SOFT_RESET);
3079
3080                 /* Wait a little for things to settle down */
3081                 udelay(50);
3082                 dce_v8_0_print_status((void *)adev);
3083         }
3084         return 0;
3085 }
3086
3087 static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
3088                                                      int crtc,
3089                                                      enum amdgpu_interrupt_state state)
3090 {
3091         u32 reg_block, lb_interrupt_mask;
3092
3093         if (crtc >= adev->mode_info.num_crtc) {
3094                 DRM_DEBUG("invalid crtc %d\n", crtc);
3095                 return;
3096         }
3097
3098         switch (crtc) {
3099         case 0:
3100                 reg_block = CRTC0_REGISTER_OFFSET;
3101                 break;
3102         case 1:
3103                 reg_block = CRTC1_REGISTER_OFFSET;
3104                 break;
3105         case 2:
3106                 reg_block = CRTC2_REGISTER_OFFSET;
3107                 break;
3108         case 3:
3109                 reg_block = CRTC3_REGISTER_OFFSET;
3110                 break;
3111         case 4:
3112                 reg_block = CRTC4_REGISTER_OFFSET;
3113                 break;
3114         case 5:
3115                 reg_block = CRTC5_REGISTER_OFFSET;
3116                 break;
3117         default:
3118                 DRM_DEBUG("invalid crtc %d\n", crtc);
3119                 return;
3120         }
3121
3122         switch (state) {
3123         case AMDGPU_IRQ_STATE_DISABLE:
3124                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3125                 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
3126                 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3127                 break;
3128         case AMDGPU_IRQ_STATE_ENABLE:
3129                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3130                 lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
3131                 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3132                 break;
3133         default:
3134                 break;
3135         }
3136 }
3137
3138 static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3139                                                     int crtc,
3140                                                     enum amdgpu_interrupt_state state)
3141 {
3142         u32 reg_block, lb_interrupt_mask;
3143
3144         if (crtc >= adev->mode_info.num_crtc) {
3145                 DRM_DEBUG("invalid crtc %d\n", crtc);
3146                 return;
3147         }
3148
3149         switch (crtc) {
3150         case 0:
3151                 reg_block = CRTC0_REGISTER_OFFSET;
3152                 break;
3153         case 1:
3154                 reg_block = CRTC1_REGISTER_OFFSET;
3155                 break;
3156         case 2:
3157                 reg_block = CRTC2_REGISTER_OFFSET;
3158                 break;
3159         case 3:
3160                 reg_block = CRTC3_REGISTER_OFFSET;
3161                 break;
3162         case 4:
3163                 reg_block = CRTC4_REGISTER_OFFSET;
3164                 break;
3165         case 5:
3166                 reg_block = CRTC5_REGISTER_OFFSET;
3167                 break;
3168         default:
3169                 DRM_DEBUG("invalid crtc %d\n", crtc);
3170                 return;
3171         }
3172
3173         switch (state) {
3174         case AMDGPU_IRQ_STATE_DISABLE:
3175                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3176                 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3177                 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3178                 break;
3179         case AMDGPU_IRQ_STATE_ENABLE:
3180                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3181                 lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3182                 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3183                 break;
3184         default:
3185                 break;
3186         }
3187 }
3188
3189 static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
3190                                             struct amdgpu_irq_src *src,
3191                                             unsigned type,
3192                                             enum amdgpu_interrupt_state state)
3193 {
3194         u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
3195
3196         switch (type) {
3197         case AMDGPU_HPD_1:
3198                 dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
3199                 break;
3200         case AMDGPU_HPD_2:
3201                 dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
3202                 break;
3203         case AMDGPU_HPD_3:
3204                 dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
3205                 break;
3206         case AMDGPU_HPD_4:
3207                 dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
3208                 break;
3209         case AMDGPU_HPD_5:
3210                 dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
3211                 break;
3212         case AMDGPU_HPD_6:
3213                 dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
3214                 break;
3215         default:
3216                 DRM_DEBUG("invalid hdp %d\n", type);
3217                 return 0;
3218         }
3219
3220         switch (state) {
3221         case AMDGPU_IRQ_STATE_DISABLE:
3222                 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
3223                 dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3224                 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
3225                 break;
3226         case AMDGPU_IRQ_STATE_ENABLE:
3227                 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
3228                 dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3229                 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
3230                 break;
3231         default:
3232                 break;
3233         }
3234
3235         return 0;
3236 }
3237
3238 static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
3239                                              struct amdgpu_irq_src *src,
3240                                              unsigned type,
3241                                              enum amdgpu_interrupt_state state)
3242 {
3243         switch (type) {
3244         case AMDGPU_CRTC_IRQ_VBLANK1:
3245                 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3246                 break;
3247         case AMDGPU_CRTC_IRQ_VBLANK2:
3248                 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3249                 break;
3250         case AMDGPU_CRTC_IRQ_VBLANK3:
3251                 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3252                 break;
3253         case AMDGPU_CRTC_IRQ_VBLANK4:
3254                 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3255                 break;
3256         case AMDGPU_CRTC_IRQ_VBLANK5:
3257                 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3258                 break;
3259         case AMDGPU_CRTC_IRQ_VBLANK6:
3260                 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3261                 break;
3262         case AMDGPU_CRTC_IRQ_VLINE1:
3263                 dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
3264                 break;
3265         case AMDGPU_CRTC_IRQ_VLINE2:
3266                 dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
3267                 break;
3268         case AMDGPU_CRTC_IRQ_VLINE3:
3269                 dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
3270                 break;
3271         case AMDGPU_CRTC_IRQ_VLINE4:
3272                 dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
3273                 break;
3274         case AMDGPU_CRTC_IRQ_VLINE5:
3275                 dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
3276                 break;
3277         case AMDGPU_CRTC_IRQ_VLINE6:
3278                 dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3279                 break;
3280         default:
3281                 break;
3282         }
3283         return 0;
3284 }
3285
3286 static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3287                              struct amdgpu_irq_src *source,
3288                              struct amdgpu_iv_entry *entry)
3289 {
3290         unsigned crtc = entry->src_id - 1;
3291         uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3292         unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
3293
3294         switch (entry->src_data) {
3295         case 0: /* vblank */
3296                 if (disp_int & interrupt_status_offsets[crtc].vblank)
3297                         WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3298                 else
3299                         DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3300
3301                 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3302                         drm_handle_vblank(adev->ddev, crtc);
3303                 }
3304                 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3305
3306                 break;
3307         case 1: /* vline */
3308                 if (disp_int & interrupt_status_offsets[crtc].vline)
3309                         WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3310                 else
3311                         DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3312
3313                 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3314
3315                 break;
3316         default:
3317                 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3318                 break;
3319         }
3320
3321         return 0;
3322 }
3323
3324 static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3325                                                  struct amdgpu_irq_src *src,
3326                                                  unsigned type,
3327                                                  enum amdgpu_interrupt_state state)
3328 {
3329         u32 reg;
3330
3331         if (type >= adev->mode_info.num_crtc) {
3332                 DRM_ERROR("invalid pageflip crtc %d\n", type);
3333                 return -EINVAL;
3334         }
3335
3336         reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3337         if (state == AMDGPU_IRQ_STATE_DISABLE)
3338                 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3339                        reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3340         else
3341                 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3342                        reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3343
3344         return 0;
3345 }
3346
3347 static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3348                                 struct amdgpu_irq_src *source,
3349                                 struct amdgpu_iv_entry *entry)
3350 {
3351         unsigned long flags;
3352         unsigned crtc_id;
3353         struct amdgpu_crtc *amdgpu_crtc;
3354         struct amdgpu_flip_work *works;
3355
3356         crtc_id = (entry->src_id - 8) >> 1;
3357         amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3358
3359         if (crtc_id >= adev->mode_info.num_crtc) {
3360                 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3361                 return -EINVAL;
3362         }
3363
3364         if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3365             GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3366                 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3367                        GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3368
3369         /* IRQ could occur when in initial stage */
3370         if (amdgpu_crtc == NULL)
3371                 return 0;
3372
3373         spin_lock_irqsave(&adev->ddev->event_lock, flags);
3374         works = amdgpu_crtc->pflip_works;
3375         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3376                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3377                                                 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3378                                                 amdgpu_crtc->pflip_status,
3379                                                 AMDGPU_FLIP_SUBMITTED);
3380                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3381                 return 0;
3382         }
3383
3384         /* page flip completed. clean up */
3385         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3386         amdgpu_crtc->pflip_works = NULL;
3387
3388         /* wakeup usersapce */
3389         if (works->event)
3390                 drm_send_vblank_event(adev->ddev, crtc_id, works->event);
3391
3392         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3393
3394         drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
3395         queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
3396
3397         return 0;
3398 }
3399
3400 static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3401                             struct amdgpu_irq_src *source,
3402                             struct amdgpu_iv_entry *entry)
3403 {
3404         uint32_t disp_int, mask, int_control, tmp;
3405         unsigned hpd;
3406
3407         if (entry->src_data >= adev->mode_info.num_hpd) {
3408                 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3409                 return 0;
3410         }
3411
3412         hpd = entry->src_data;
3413         disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3414         mask = interrupt_status_offsets[hpd].hpd;
3415         int_control = hpd_int_control_offsets[hpd];
3416
3417         if (disp_int & mask) {
3418                 tmp = RREG32(int_control);
3419                 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3420                 WREG32(int_control, tmp);
3421                 schedule_work(&adev->hotplug_work);
3422                 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3423         }
3424
3425         return 0;
3426
3427 }
3428
3429 static int dce_v8_0_set_clockgating_state(void *handle,
3430                                           enum amd_clockgating_state state)
3431 {
3432         return 0;
3433 }
3434
3435 static int dce_v8_0_set_powergating_state(void *handle,
3436                                           enum amd_powergating_state state)
3437 {
3438         return 0;
3439 }
3440
3441 const struct amd_ip_funcs dce_v8_0_ip_funcs = {
3442         .early_init = dce_v8_0_early_init,
3443         .late_init = NULL,
3444         .sw_init = dce_v8_0_sw_init,
3445         .sw_fini = dce_v8_0_sw_fini,
3446         .hw_init = dce_v8_0_hw_init,
3447         .hw_fini = dce_v8_0_hw_fini,
3448         .suspend = dce_v8_0_suspend,
3449         .resume = dce_v8_0_resume,
3450         .is_idle = dce_v8_0_is_idle,
3451         .wait_for_idle = dce_v8_0_wait_for_idle,
3452         .soft_reset = dce_v8_0_soft_reset,
3453         .print_status = dce_v8_0_print_status,
3454         .set_clockgating_state = dce_v8_0_set_clockgating_state,
3455         .set_powergating_state = dce_v8_0_set_powergating_state,
3456 };
3457
3458 static void
3459 dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3460                           struct drm_display_mode *mode,
3461                           struct drm_display_mode *adjusted_mode)
3462 {
3463         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3464
3465         amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3466
3467         /* need to call this here rather than in prepare() since we need some crtc info */
3468         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3469
3470         /* set scaler clears this on some chips */
3471         dce_v8_0_set_interleave(encoder->crtc, mode);
3472
3473         if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3474                 dce_v8_0_afmt_enable(encoder, true);
3475                 dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3476         }
3477 }
3478
3479 static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3480 {
3481         struct amdgpu_device *adev = encoder->dev->dev_private;
3482         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3483         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3484
3485         if ((amdgpu_encoder->active_device &
3486              (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3487             (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3488              ENCODER_OBJECT_ID_NONE)) {
3489                 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3490                 if (dig) {
3491                         dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3492                         if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3493                                 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3494                 }
3495         }
3496
3497         amdgpu_atombios_scratch_regs_lock(adev, true);
3498
3499         if (connector) {
3500                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3501
3502                 /* select the clock/data port if it uses a router */
3503                 if (amdgpu_connector->router.cd_valid)
3504                         amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3505
3506                 /* turn eDP panel on for mode set */
3507                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3508                         amdgpu_atombios_encoder_set_edp_panel_power(connector,
3509                                                              ATOM_TRANSMITTER_ACTION_POWER_ON);
3510         }
3511
3512         /* this is needed for the pll/ss setup to work correctly in some cases */
3513         amdgpu_atombios_encoder_set_crtc_source(encoder);
3514         /* set up the FMT blocks */
3515         dce_v8_0_program_fmt(encoder);
3516 }
3517
3518 static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3519 {
3520         struct drm_device *dev = encoder->dev;
3521         struct amdgpu_device *adev = dev->dev_private;
3522
3523         /* need to call this here as we need the crtc set up */
3524         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3525         amdgpu_atombios_scratch_regs_lock(adev, false);
3526 }
3527
3528 static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3529 {
3530         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3531         struct amdgpu_encoder_atom_dig *dig;
3532
3533         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3534
3535         if (amdgpu_atombios_encoder_is_digital(encoder)) {
3536                 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3537                         dce_v8_0_afmt_enable(encoder, false);
3538                 dig = amdgpu_encoder->enc_priv;
3539                 dig->dig_encoder = -1;
3540         }
3541         amdgpu_encoder->active_device = 0;
3542 }
3543
3544 /* these are handled by the primary encoders */
3545 static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3546 {
3547
3548 }
3549
3550 static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3551 {
3552
3553 }
3554
3555 static void
3556 dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3557                       struct drm_display_mode *mode,
3558                       struct drm_display_mode *adjusted_mode)
3559 {
3560
3561 }
3562
3563 static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3564 {
3565
3566 }
3567
3568 static void
3569 dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3570 {
3571
3572 }
3573
3574 static bool dce_v8_0_ext_mode_fixup(struct drm_encoder *encoder,
3575                                     const struct drm_display_mode *mode,
3576                                     struct drm_display_mode *adjusted_mode)
3577 {
3578         return true;
3579 }
3580
3581 static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3582         .dpms = dce_v8_0_ext_dpms,
3583         .mode_fixup = dce_v8_0_ext_mode_fixup,
3584         .prepare = dce_v8_0_ext_prepare,
3585         .mode_set = dce_v8_0_ext_mode_set,
3586         .commit = dce_v8_0_ext_commit,
3587         .disable = dce_v8_0_ext_disable,
3588         /* no detect for TMDS/LVDS yet */
3589 };
3590
3591 static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3592         .dpms = amdgpu_atombios_encoder_dpms,
3593         .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3594         .prepare = dce_v8_0_encoder_prepare,
3595         .mode_set = dce_v8_0_encoder_mode_set,
3596         .commit = dce_v8_0_encoder_commit,
3597         .disable = dce_v8_0_encoder_disable,
3598         .detect = amdgpu_atombios_encoder_dig_detect,
3599 };
3600
3601 static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3602         .dpms = amdgpu_atombios_encoder_dpms,
3603         .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3604         .prepare = dce_v8_0_encoder_prepare,
3605         .mode_set = dce_v8_0_encoder_mode_set,
3606         .commit = dce_v8_0_encoder_commit,
3607         .detect = amdgpu_atombios_encoder_dac_detect,
3608 };
3609
3610 static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3611 {
3612         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3613         if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3614                 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3615         kfree(amdgpu_encoder->enc_priv);
3616         drm_encoder_cleanup(encoder);
3617         kfree(amdgpu_encoder);
3618 }
3619
3620 static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3621         .destroy = dce_v8_0_encoder_destroy,
3622 };
3623
3624 static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3625                                  uint32_t encoder_enum,
3626                                  uint32_t supported_device,
3627                                  u16 caps)
3628 {
3629         struct drm_device *dev = adev->ddev;
3630         struct drm_encoder *encoder;
3631         struct amdgpu_encoder *amdgpu_encoder;
3632
3633         /* see if we already added it */
3634         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3635                 amdgpu_encoder = to_amdgpu_encoder(encoder);
3636                 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3637                         amdgpu_encoder->devices |= supported_device;
3638                         return;
3639                 }
3640
3641         }
3642
3643         /* add a new one */
3644         amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3645         if (!amdgpu_encoder)
3646                 return;
3647
3648         encoder = &amdgpu_encoder->base;
3649         switch (adev->mode_info.num_crtc) {
3650         case 1:
3651                 encoder->possible_crtcs = 0x1;
3652                 break;
3653         case 2:
3654         default:
3655                 encoder->possible_crtcs = 0x3;
3656                 break;
3657         case 4:
3658                 encoder->possible_crtcs = 0xf;
3659                 break;
3660         case 6:
3661                 encoder->possible_crtcs = 0x3f;
3662                 break;
3663         }
3664
3665         amdgpu_encoder->enc_priv = NULL;
3666
3667         amdgpu_encoder->encoder_enum = encoder_enum;
3668         amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3669         amdgpu_encoder->devices = supported_device;
3670         amdgpu_encoder->rmx_type = RMX_OFF;
3671         amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3672         amdgpu_encoder->is_ext_encoder = false;
3673         amdgpu_encoder->caps = caps;
3674
3675         switch (amdgpu_encoder->encoder_id) {
3676         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3677         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3678                 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3679                                  DRM_MODE_ENCODER_DAC);
3680                 drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3681                 break;
3682         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3683         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3684         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3685         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3686         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3687                 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3688                         amdgpu_encoder->rmx_type = RMX_FULL;
3689                         drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3690                                          DRM_MODE_ENCODER_LVDS);
3691                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3692                 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3693                         drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3694                                          DRM_MODE_ENCODER_DAC);
3695                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3696                 } else {
3697                         drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3698                                          DRM_MODE_ENCODER_TMDS);
3699                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3700                 }
3701                 drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3702                 break;
3703         case ENCODER_OBJECT_ID_SI170B:
3704         case ENCODER_OBJECT_ID_CH7303:
3705         case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3706         case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3707         case ENCODER_OBJECT_ID_TITFP513:
3708         case ENCODER_OBJECT_ID_VT1623:
3709         case ENCODER_OBJECT_ID_HDMI_SI1930:
3710         case ENCODER_OBJECT_ID_TRAVIS:
3711         case ENCODER_OBJECT_ID_NUTMEG:
3712                 /* these are handled by the primary encoders */
3713                 amdgpu_encoder->is_ext_encoder = true;
3714                 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3715                         drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3716                                          DRM_MODE_ENCODER_LVDS);
3717                 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3718                         drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3719                                          DRM_MODE_ENCODER_DAC);
3720                 else
3721                         drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3722                                          DRM_MODE_ENCODER_TMDS);
3723                 drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3724                 break;
3725         }
3726 }
3727
3728 static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3729         .set_vga_render_state = &dce_v8_0_set_vga_render_state,
3730         .bandwidth_update = &dce_v8_0_bandwidth_update,
3731         .vblank_get_counter = &dce_v8_0_vblank_get_counter,
3732         .vblank_wait = &dce_v8_0_vblank_wait,
3733         .is_display_hung = &dce_v8_0_is_display_hung,
3734         .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3735         .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3736         .hpd_sense = &dce_v8_0_hpd_sense,
3737         .hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3738         .hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3739         .page_flip = &dce_v8_0_page_flip,
3740         .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3741         .add_encoder = &dce_v8_0_encoder_add,
3742         .add_connector = &amdgpu_connector_add,
3743         .stop_mc_access = &dce_v8_0_stop_mc_access,
3744         .resume_mc_access = &dce_v8_0_resume_mc_access,
3745 };
3746
3747 static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3748 {
3749         if (adev->mode_info.funcs == NULL)
3750                 adev->mode_info.funcs = &dce_v8_0_display_funcs;
3751 }
3752
3753 static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3754         .set = dce_v8_0_set_crtc_interrupt_state,
3755         .process = dce_v8_0_crtc_irq,
3756 };
3757
3758 static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3759         .set = dce_v8_0_set_pageflip_interrupt_state,
3760         .process = dce_v8_0_pageflip_irq,
3761 };
3762
3763 static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3764         .set = dce_v8_0_set_hpd_interrupt_state,
3765         .process = dce_v8_0_hpd_irq,
3766 };
3767
3768 static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3769 {
3770         adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3771         adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3772
3773         adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3774         adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3775
3776         adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3777         adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3778 }