GNU Linux-libre 4.4.288-gnu1
[releases.git] / drivers / gpu / drm / radeon / radeon_uvd.c
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <deathsimple@vodafone.de>
29  */
30
31 #include <linux/firmware.h>
32 #include <linux/module.h>
33 #include <drm/drmP.h>
34 #include <drm/drm.h>
35
36 #include "radeon.h"
37 #include "r600d.h"
38
39 /* 1 second timeout */
40 #define UVD_IDLE_TIMEOUT_MS     1000
41
42 /* Firmware Names */
43 #define FIRMWARE_R600           "/*(DEBLOBBED)*/"
44 #define FIRMWARE_RS780          "/*(DEBLOBBED)*/"
45 #define FIRMWARE_RV770          "/*(DEBLOBBED)*/"
46 #define FIRMWARE_RV710          "/*(DEBLOBBED)*/"
47 #define FIRMWARE_CYPRESS        "/*(DEBLOBBED)*/"
48 #define FIRMWARE_SUMO           "/*(DEBLOBBED)*/"
49 #define FIRMWARE_TAHITI         "/*(DEBLOBBED)*/"
50 #define FIRMWARE_BONAIRE        "/*(DEBLOBBED)*/"
51
52 /*(DEBLOBBED)*/
53
54 static void radeon_uvd_idle_work_handler(struct work_struct *work);
55
56 int radeon_uvd_init(struct radeon_device *rdev)
57 {
58         unsigned long bo_size;
59         const char *fw_name;
60         int i, r;
61
62         INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
63
64         switch (rdev->family) {
65         case CHIP_RV610:
66         case CHIP_RV630:
67         case CHIP_RV670:
68         case CHIP_RV620:
69         case CHIP_RV635:
70                 fw_name = FIRMWARE_R600;
71                 break;
72
73         case CHIP_RS780:
74         case CHIP_RS880:
75                 fw_name = FIRMWARE_RS780;
76                 break;
77
78         case CHIP_RV770:
79                 fw_name = FIRMWARE_RV770;
80                 break;
81
82         case CHIP_RV710:
83         case CHIP_RV730:
84         case CHIP_RV740:
85                 fw_name = FIRMWARE_RV710;
86                 break;
87
88         case CHIP_CYPRESS:
89         case CHIP_HEMLOCK:
90         case CHIP_JUNIPER:
91         case CHIP_REDWOOD:
92         case CHIP_CEDAR:
93                 fw_name = FIRMWARE_CYPRESS;
94                 break;
95
96         case CHIP_SUMO:
97         case CHIP_SUMO2:
98         case CHIP_PALM:
99         case CHIP_CAYMAN:
100         case CHIP_BARTS:
101         case CHIP_TURKS:
102         case CHIP_CAICOS:
103                 fw_name = FIRMWARE_SUMO;
104                 break;
105
106         case CHIP_TAHITI:
107         case CHIP_VERDE:
108         case CHIP_PITCAIRN:
109         case CHIP_ARUBA:
110         case CHIP_OLAND:
111                 fw_name = FIRMWARE_TAHITI;
112                 break;
113
114         case CHIP_BONAIRE:
115         case CHIP_KABINI:
116         case CHIP_KAVERI:
117         case CHIP_HAWAII:
118         case CHIP_MULLINS:
119                 fw_name = FIRMWARE_BONAIRE;
120                 break;
121
122         default:
123                 return -EINVAL;
124         }
125
126         r = reject_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
127         if (r) {
128                 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
129                         fw_name);
130                 return r;
131         }
132
133         bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
134                   RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE +
135                   RADEON_GPU_PAGE_SIZE;
136         r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
137                              RADEON_GEM_DOMAIN_VRAM, 0, NULL,
138                              NULL, &rdev->uvd.vcpu_bo);
139         if (r) {
140                 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
141                 return r;
142         }
143
144         r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
145         if (r) {
146                 radeon_bo_unref(&rdev->uvd.vcpu_bo);
147                 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
148                 return r;
149         }
150
151         r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
152                           &rdev->uvd.gpu_addr);
153         if (r) {
154                 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
155                 radeon_bo_unref(&rdev->uvd.vcpu_bo);
156                 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
157                 return r;
158         }
159
160         r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
161         if (r) {
162                 dev_err(rdev->dev, "(%d) UVD map failed\n", r);
163                 return r;
164         }
165
166         radeon_bo_unreserve(rdev->uvd.vcpu_bo);
167
168         for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
169                 atomic_set(&rdev->uvd.handles[i], 0);
170                 rdev->uvd.filp[i] = NULL;
171                 rdev->uvd.img_size[i] = 0;
172         }
173
174         return 0;
175 }
176
177 void radeon_uvd_fini(struct radeon_device *rdev)
178 {
179         int r;
180
181         if (rdev->uvd.vcpu_bo == NULL)
182                 return;
183
184         r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
185         if (!r) {
186                 radeon_bo_kunmap(rdev->uvd.vcpu_bo);
187                 radeon_bo_unpin(rdev->uvd.vcpu_bo);
188                 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
189         }
190
191         radeon_bo_unref(&rdev->uvd.vcpu_bo);
192
193         radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]);
194
195         release_firmware(rdev->uvd_fw);
196 }
197
198 int radeon_uvd_suspend(struct radeon_device *rdev)
199 {
200         int i, r;
201
202         if (rdev->uvd.vcpu_bo == NULL)
203                 return 0;
204
205         for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
206                 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
207                 if (handle != 0) {
208                         struct radeon_fence *fence;
209
210                         radeon_uvd_note_usage(rdev);
211
212                         r = radeon_uvd_get_destroy_msg(rdev,
213                                 R600_RING_TYPE_UVD_INDEX, handle, &fence);
214                         if (r) {
215                                 DRM_ERROR("Error destroying UVD (%d)!\n", r);
216                                 continue;
217                         }
218
219                         radeon_fence_wait(fence, false);
220                         radeon_fence_unref(&fence);
221
222                         rdev->uvd.filp[i] = NULL;
223                         atomic_set(&rdev->uvd.handles[i], 0);
224                 }
225         }
226
227         return 0;
228 }
229
230 int radeon_uvd_resume(struct radeon_device *rdev)
231 {
232         unsigned size;
233         void *ptr;
234
235         if (rdev->uvd.vcpu_bo == NULL)
236                 return -EINVAL;
237
238         memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
239
240         size = radeon_bo_size(rdev->uvd.vcpu_bo);
241         size -= rdev->uvd_fw->size;
242
243         ptr = rdev->uvd.cpu_addr;
244         ptr += rdev->uvd_fw->size;
245
246         memset_io((void __iomem *)ptr, 0, size);
247
248         return 0;
249 }
250
251 void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
252                                        uint32_t allowed_domains)
253 {
254         int i;
255
256         for (i = 0; i < rbo->placement.num_placement; ++i) {
257                 rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
258                 rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
259         }
260
261         /* If it must be in VRAM it must be in the first segment as well */
262         if (allowed_domains == RADEON_GEM_DOMAIN_VRAM)
263                 return;
264
265         /* abort if we already have more than one placement */
266         if (rbo->placement.num_placement > 1)
267                 return;
268
269         /* add another 256MB segment */
270         rbo->placements[1] = rbo->placements[0];
271         rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
272         rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
273         rbo->placement.num_placement++;
274         rbo->placement.num_busy_placement++;
275 }
276
277 void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
278 {
279         int i, r;
280         for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
281                 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
282                 if (handle != 0 && rdev->uvd.filp[i] == filp) {
283                         struct radeon_fence *fence;
284
285                         radeon_uvd_note_usage(rdev);
286
287                         r = radeon_uvd_get_destroy_msg(rdev,
288                                 R600_RING_TYPE_UVD_INDEX, handle, &fence);
289                         if (r) {
290                                 DRM_ERROR("Error destroying UVD (%d)!\n", r);
291                                 continue;
292                         }
293
294                         radeon_fence_wait(fence, false);
295                         radeon_fence_unref(&fence);
296
297                         rdev->uvd.filp[i] = NULL;
298                         atomic_set(&rdev->uvd.handles[i], 0);
299                 }
300         }
301 }
302
303 static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
304 {
305         unsigned stream_type = msg[4];
306         unsigned width = msg[6];
307         unsigned height = msg[7];
308         unsigned dpb_size = msg[9];
309         unsigned pitch = msg[28];
310
311         unsigned width_in_mb = width / 16;
312         unsigned height_in_mb = ALIGN(height / 16, 2);
313
314         unsigned image_size, tmp, min_dpb_size;
315
316         image_size = width * height;
317         image_size += image_size / 2;
318         image_size = ALIGN(image_size, 1024);
319
320         switch (stream_type) {
321         case 0: /* H264 */
322
323                 /* reference picture buffer */
324                 min_dpb_size = image_size * 17;
325
326                 /* macroblock context buffer */
327                 min_dpb_size += width_in_mb * height_in_mb * 17 * 192;
328
329                 /* IT surface buffer */
330                 min_dpb_size += width_in_mb * height_in_mb * 32;
331                 break;
332
333         case 1: /* VC1 */
334
335                 /* reference picture buffer */
336                 min_dpb_size = image_size * 3;
337
338                 /* CONTEXT_BUFFER */
339                 min_dpb_size += width_in_mb * height_in_mb * 128;
340
341                 /* IT surface buffer */
342                 min_dpb_size += width_in_mb * 64;
343
344                 /* DB surface buffer */
345                 min_dpb_size += width_in_mb * 128;
346
347                 /* BP */
348                 tmp = max(width_in_mb, height_in_mb);
349                 min_dpb_size += ALIGN(tmp * 7 * 16, 64);
350                 break;
351
352         case 3: /* MPEG2 */
353
354                 /* reference picture buffer */
355                 min_dpb_size = image_size * 3;
356                 break;
357
358         case 4: /* MPEG4 */
359
360                 /* reference picture buffer */
361                 min_dpb_size = image_size * 3;
362
363                 /* CM */
364                 min_dpb_size += width_in_mb * height_in_mb * 64;
365
366                 /* IT surface buffer */
367                 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
368                 break;
369
370         default:
371                 DRM_ERROR("UVD codec not handled %d!\n", stream_type);
372                 return -EINVAL;
373         }
374
375         if (width > pitch) {
376                 DRM_ERROR("Invalid UVD decoding target pitch!\n");
377                 return -EINVAL;
378         }
379
380         if (dpb_size < min_dpb_size) {
381                 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
382                           dpb_size, min_dpb_size);
383                 return -EINVAL;
384         }
385
386         buf_sizes[0x1] = dpb_size;
387         buf_sizes[0x2] = image_size;
388         return 0;
389 }
390
391 static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
392                                      unsigned stream_type)
393 {
394         switch (stream_type) {
395         case 0: /* H264 */
396         case 1: /* VC1 */
397                 /* always supported */
398                 return 0;
399
400         case 3: /* MPEG2 */
401         case 4: /* MPEG4 */
402                 /* only since UVD 3 */
403                 if (p->rdev->family >= CHIP_PALM)
404                         return 0;
405
406                 /* fall through */
407         default:
408                 DRM_ERROR("UVD codec not supported by hardware %d!\n",
409                           stream_type);
410                 return -EINVAL;
411         }
412 }
413
414 static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
415                              unsigned offset, unsigned buf_sizes[])
416 {
417         int32_t *msg, msg_type, handle;
418         unsigned img_size = 0;
419         struct fence *f;
420         void *ptr;
421
422         int i, r;
423
424         if (offset & 0x3F) {
425                 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
426                 return -EINVAL;
427         }
428
429         f = reservation_object_get_excl(bo->tbo.resv);
430         if (f) {
431                 r = radeon_fence_wait((struct radeon_fence *)f, false);
432                 if (r) {
433                         DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
434                         return r;
435                 }
436         }
437
438         r = radeon_bo_kmap(bo, &ptr);
439         if (r) {
440                 DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
441                 return r;
442         }
443
444         msg = ptr + offset;
445
446         msg_type = msg[1];
447         handle = msg[2];
448
449         if (handle == 0) {
450                 DRM_ERROR("Invalid UVD handle!\n");
451                 return -EINVAL;
452         }
453
454         switch (msg_type) {
455         case 0:
456                 /* it's a create msg, calc image size (width * height) */
457                 img_size = msg[7] * msg[8];
458
459                 r = radeon_uvd_validate_codec(p, msg[4]);
460                 radeon_bo_kunmap(bo);
461                 if (r)
462                         return r;
463
464                 /* try to alloc a new handle */
465                 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
466                         if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
467                                 DRM_ERROR("Handle 0x%x already in use!\n", handle);
468                                 return -EINVAL;
469                         }
470
471                         if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
472                                 p->rdev->uvd.filp[i] = p->filp;
473                                 p->rdev->uvd.img_size[i] = img_size;
474                                 return 0;
475                         }
476                 }
477
478                 DRM_ERROR("No more free UVD handles!\n");
479                 return -EINVAL;
480
481         case 1:
482                 /* it's a decode msg, validate codec and calc buffer sizes */
483                 r = radeon_uvd_validate_codec(p, msg[4]);
484                 if (!r)
485                         r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
486                 radeon_bo_kunmap(bo);
487                 if (r)
488                         return r;
489
490                 /* validate the handle */
491                 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
492                         if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
493                                 if (p->rdev->uvd.filp[i] != p->filp) {
494                                         DRM_ERROR("UVD handle collision detected!\n");
495                                         return -EINVAL;
496                                 }
497                                 return 0;
498                         }
499                 }
500
501                 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
502                 return -ENOENT;
503
504         case 2:
505                 /* it's a destroy msg, free the handle */
506                 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
507                         atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
508                 radeon_bo_kunmap(bo);
509                 return 0;
510
511         default:
512
513                 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
514                 return -EINVAL;
515         }
516
517         BUG();
518         return -EINVAL;
519 }
520
521 static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
522                                int data0, int data1,
523                                unsigned buf_sizes[], bool *has_msg_cmd)
524 {
525         struct radeon_cs_chunk *relocs_chunk;
526         struct radeon_bo_list *reloc;
527         unsigned idx, cmd, offset;
528         uint64_t start, end;
529         int r;
530
531         relocs_chunk = p->chunk_relocs;
532         offset = radeon_get_ib_value(p, data0);
533         idx = radeon_get_ib_value(p, data1);
534         if (idx >= relocs_chunk->length_dw) {
535                 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
536                           idx, relocs_chunk->length_dw);
537                 return -EINVAL;
538         }
539
540         reloc = &p->relocs[(idx / 4)];
541         start = reloc->gpu_offset;
542         end = start + radeon_bo_size(reloc->robj);
543         start += offset;
544
545         p->ib.ptr[data0] = start & 0xFFFFFFFF;
546         p->ib.ptr[data1] = start >> 32;
547
548         cmd = radeon_get_ib_value(p, p->idx) >> 1;
549
550         if (cmd < 0x4) {
551                 if (end <= start) {
552                         DRM_ERROR("invalid reloc offset %X!\n", offset);
553                         return -EINVAL;
554                 }
555                 if ((end - start) < buf_sizes[cmd]) {
556                         DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
557                                   (unsigned)(end - start), buf_sizes[cmd]);
558                         return -EINVAL;
559                 }
560
561         } else if (cmd != 0x100) {
562                 DRM_ERROR("invalid UVD command %X!\n", cmd);
563                 return -EINVAL;
564         }
565
566         if ((start >> 28) != ((end - 1) >> 28)) {
567                 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
568                           start, end);
569                 return -EINVAL;
570         }
571
572         /* TODO: is this still necessary on NI+ ? */
573         if ((cmd == 0 || cmd == 0x3) &&
574             (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
575                 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
576                           start, end);
577                 return -EINVAL;
578         }
579
580         if (cmd == 0) {
581                 if (*has_msg_cmd) {
582                         DRM_ERROR("More than one message in a UVD-IB!\n");
583                         return -EINVAL;
584                 }
585                 *has_msg_cmd = true;
586                 r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
587                 if (r)
588                         return r;
589         } else if (!*has_msg_cmd) {
590                 DRM_ERROR("Message needed before other commands are send!\n");
591                 return -EINVAL;
592         }
593
594         return 0;
595 }
596
597 static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
598                              struct radeon_cs_packet *pkt,
599                              int *data0, int *data1,
600                              unsigned buf_sizes[],
601                              bool *has_msg_cmd)
602 {
603         int i, r;
604
605         p->idx++;
606         for (i = 0; i <= pkt->count; ++i) {
607                 switch (pkt->reg + i*4) {
608                 case UVD_GPCOM_VCPU_DATA0:
609                         *data0 = p->idx;
610                         break;
611                 case UVD_GPCOM_VCPU_DATA1:
612                         *data1 = p->idx;
613                         break;
614                 case UVD_GPCOM_VCPU_CMD:
615                         r = radeon_uvd_cs_reloc(p, *data0, *data1,
616                                                 buf_sizes, has_msg_cmd);
617                         if (r)
618                                 return r;
619                         break;
620                 case UVD_ENGINE_CNTL:
621                         break;
622                 default:
623                         DRM_ERROR("Invalid reg 0x%X!\n",
624                                   pkt->reg + i*4);
625                         return -EINVAL;
626                 }
627                 p->idx++;
628         }
629         return 0;
630 }
631
632 int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
633 {
634         struct radeon_cs_packet pkt;
635         int r, data0 = 0, data1 = 0;
636
637         /* does the IB has a msg command */
638         bool has_msg_cmd = false;
639
640         /* minimum buffer sizes */
641         unsigned buf_sizes[] = {
642                 [0x00000000]    =       2048,
643                 [0x00000001]    =       32 * 1024 * 1024,
644                 [0x00000002]    =       2048 * 1152 * 3,
645                 [0x00000003]    =       2048,
646         };
647
648         if (p->chunk_ib->length_dw % 16) {
649                 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
650                           p->chunk_ib->length_dw);
651                 return -EINVAL;
652         }
653
654         if (p->chunk_relocs == NULL) {
655                 DRM_ERROR("No relocation chunk !\n");
656                 return -EINVAL;
657         }
658
659
660         do {
661                 r = radeon_cs_packet_parse(p, &pkt, p->idx);
662                 if (r)
663                         return r;
664                 switch (pkt.type) {
665                 case RADEON_PACKET_TYPE0:
666                         r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1,
667                                               buf_sizes, &has_msg_cmd);
668                         if (r)
669                                 return r;
670                         break;
671                 case RADEON_PACKET_TYPE2:
672                         p->idx += pkt.count + 2;
673                         break;
674                 default:
675                         DRM_ERROR("Unknown packet type %d !\n", pkt.type);
676                         return -EINVAL;
677                 }
678         } while (p->idx < p->chunk_ib->length_dw);
679
680         if (!has_msg_cmd) {
681                 DRM_ERROR("UVD-IBs need a msg command!\n");
682                 return -EINVAL;
683         }
684
685         return 0;
686 }
687
688 static int radeon_uvd_send_msg(struct radeon_device *rdev,
689                                int ring, uint64_t addr,
690                                struct radeon_fence **fence)
691 {
692         struct radeon_ib ib;
693         int i, r;
694
695         r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
696         if (r)
697                 return r;
698
699         ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
700         ib.ptr[1] = addr;
701         ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
702         ib.ptr[3] = addr >> 32;
703         ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
704         ib.ptr[5] = 0;
705         for (i = 6; i < 16; ++i)
706                 ib.ptr[i] = PACKET2(0);
707         ib.length_dw = 16;
708
709         r = radeon_ib_schedule(rdev, &ib, NULL, false);
710
711         if (fence)
712                 *fence = radeon_fence_ref(ib.fence);
713
714         radeon_ib_free(rdev, &ib);
715         return r;
716 }
717
718 /* multiple fence commands without any stream commands in between can
719    crash the vcpu so just try to emmit a dummy create/destroy msg to
720    avoid this */
721 int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
722                               uint32_t handle, struct radeon_fence **fence)
723 {
724         /* we use the last page of the vcpu bo for the UVD message */
725         uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
726                 RADEON_GPU_PAGE_SIZE;
727
728         uint32_t *msg = rdev->uvd.cpu_addr + offs;
729         uint64_t addr = rdev->uvd.gpu_addr + offs;
730
731         int r, i;
732
733         r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
734         if (r)
735                 return r;
736
737         /* stitch together an UVD create msg */
738         msg[0] = cpu_to_le32(0x00000de4);
739         msg[1] = cpu_to_le32(0x00000000);
740         msg[2] = cpu_to_le32(handle);
741         msg[3] = cpu_to_le32(0x00000000);
742         msg[4] = cpu_to_le32(0x00000000);
743         msg[5] = cpu_to_le32(0x00000000);
744         msg[6] = cpu_to_le32(0x00000000);
745         msg[7] = cpu_to_le32(0x00000780);
746         msg[8] = cpu_to_le32(0x00000440);
747         msg[9] = cpu_to_le32(0x00000000);
748         msg[10] = cpu_to_le32(0x01b37000);
749         for (i = 11; i < 1024; ++i)
750                 msg[i] = cpu_to_le32(0x0);
751
752         r = radeon_uvd_send_msg(rdev, ring, addr, fence);
753         radeon_bo_unreserve(rdev->uvd.vcpu_bo);
754         return r;
755 }
756
757 int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
758                                uint32_t handle, struct radeon_fence **fence)
759 {
760         /* we use the last page of the vcpu bo for the UVD message */
761         uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
762                 RADEON_GPU_PAGE_SIZE;
763
764         uint32_t *msg = rdev->uvd.cpu_addr + offs;
765         uint64_t addr = rdev->uvd.gpu_addr + offs;
766
767         int r, i;
768
769         r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
770         if (r)
771                 return r;
772
773         /* stitch together an UVD destroy msg */
774         msg[0] = cpu_to_le32(0x00000de4);
775         msg[1] = cpu_to_le32(0x00000002);
776         msg[2] = cpu_to_le32(handle);
777         msg[3] = cpu_to_le32(0x00000000);
778         for (i = 4; i < 1024; ++i)
779                 msg[i] = cpu_to_le32(0x0);
780
781         r = radeon_uvd_send_msg(rdev, ring, addr, fence);
782         radeon_bo_unreserve(rdev->uvd.vcpu_bo);
783         return r;
784 }
785
786 /**
787  * radeon_uvd_count_handles - count number of open streams
788  *
789  * @rdev: radeon_device pointer
790  * @sd: number of SD streams
791  * @hd: number of HD streams
792  *
793  * Count the number of open SD/HD streams as a hint for power mangement
794  */
795 static void radeon_uvd_count_handles(struct radeon_device *rdev,
796                                      unsigned *sd, unsigned *hd)
797 {
798         unsigned i;
799
800         *sd = 0;
801         *hd = 0;
802
803         for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
804                 if (!atomic_read(&rdev->uvd.handles[i]))
805                         continue;
806
807                 if (rdev->uvd.img_size[i] >= 720*576)
808                         ++(*hd);
809                 else
810                         ++(*sd);
811         }
812 }
813
814 static void radeon_uvd_idle_work_handler(struct work_struct *work)
815 {
816         struct radeon_device *rdev =
817                 container_of(work, struct radeon_device, uvd.idle_work.work);
818
819         if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
820                 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
821                         radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd,
822                                                  &rdev->pm.dpm.hd);
823                         radeon_dpm_enable_uvd(rdev, false);
824                 } else {
825                         radeon_set_uvd_clocks(rdev, 0, 0);
826                 }
827         } else {
828                 schedule_delayed_work(&rdev->uvd.idle_work,
829                                       msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
830         }
831 }
832
833 void radeon_uvd_note_usage(struct radeon_device *rdev)
834 {
835         bool streams_changed = false;
836         bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
837         set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
838                                             msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
839
840         if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
841                 unsigned hd = 0, sd = 0;
842                 radeon_uvd_count_handles(rdev, &sd, &hd);
843                 if ((rdev->pm.dpm.sd != sd) ||
844                     (rdev->pm.dpm.hd != hd)) {
845                         rdev->pm.dpm.sd = sd;
846                         rdev->pm.dpm.hd = hd;
847                         /* disable this for now */
848                         /*streams_changed = true;*/
849                 }
850         }
851
852         if (set_clocks || streams_changed) {
853                 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
854                         radeon_dpm_enable_uvd(rdev, true);
855                 } else {
856                         radeon_set_uvd_clocks(rdev, 53300, 40000);
857                 }
858         }
859 }
860
861 static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
862                                               unsigned target_freq,
863                                               unsigned pd_min,
864                                               unsigned pd_even)
865 {
866         unsigned post_div = vco_freq / target_freq;
867
868         /* adjust to post divider minimum value */
869         if (post_div < pd_min)
870                 post_div = pd_min;
871
872         /* we alway need a frequency less than or equal the target */
873         if ((vco_freq / post_div) > target_freq)
874                 post_div += 1;
875
876         /* post dividers above a certain value must be even */
877         if (post_div > pd_even && post_div % 2)
878                 post_div += 1;
879
880         return post_div;
881 }
882
883 /**
884  * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
885  *
886  * @rdev: radeon_device pointer
887  * @vclk: wanted VCLK
888  * @dclk: wanted DCLK
889  * @vco_min: minimum VCO frequency
890  * @vco_max: maximum VCO frequency
891  * @fb_factor: factor to multiply vco freq with
892  * @fb_mask: limit and bitmask for feedback divider
893  * @pd_min: post divider minimum
894  * @pd_max: post divider maximum
895  * @pd_even: post divider must be even above this value
896  * @optimal_fb_div: resulting feedback divider
897  * @optimal_vclk_div: resulting vclk post divider
898  * @optimal_dclk_div: resulting dclk post divider
899  *
900  * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
901  * Returns zero on success -EINVAL on error.
902  */
903 int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
904                                   unsigned vclk, unsigned dclk,
905                                   unsigned vco_min, unsigned vco_max,
906                                   unsigned fb_factor, unsigned fb_mask,
907                                   unsigned pd_min, unsigned pd_max,
908                                   unsigned pd_even,
909                                   unsigned *optimal_fb_div,
910                                   unsigned *optimal_vclk_div,
911                                   unsigned *optimal_dclk_div)
912 {
913         unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
914
915         /* start off with something large */
916         unsigned optimal_score = ~0;
917
918         /* loop through vco from low to high */
919         vco_min = max(max(vco_min, vclk), dclk);
920         for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
921
922                 uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
923                 unsigned vclk_div, dclk_div, score;
924
925                 do_div(fb_div, ref_freq);
926
927                 /* fb div out of range ? */
928                 if (fb_div > fb_mask)
929                         break; /* it can oly get worse */
930
931                 fb_div &= fb_mask;
932
933                 /* calc vclk divider with current vco freq */
934                 vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk,
935                                                          pd_min, pd_even);
936                 if (vclk_div > pd_max)
937                         break; /* vco is too big, it has to stop */
938
939                 /* calc dclk divider with current vco freq */
940                 dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
941                                                          pd_min, pd_even);
942                 if (dclk_div > pd_max)
943                         break; /* vco is too big, it has to stop */
944
945                 /* calc score with current vco freq */
946                 score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
947
948                 /* determine if this vco setting is better than current optimal settings */
949                 if (score < optimal_score) {
950                         *optimal_fb_div = fb_div;
951                         *optimal_vclk_div = vclk_div;
952                         *optimal_dclk_div = dclk_div;
953                         optimal_score = score;
954                         if (optimal_score == 0)
955                                 break; /* it can't get better than this */
956                 }
957         }
958
959         /* did we found a valid setup ? */
960         if (optimal_score == ~0)
961                 return -EINVAL;
962
963         return 0;
964 }
965
966 int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
967                                 unsigned cg_upll_func_cntl)
968 {
969         unsigned i;
970
971         /* make sure UPLL_CTLREQ is deasserted */
972         WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
973
974         mdelay(10);
975
976         /* assert UPLL_CTLREQ */
977         WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
978
979         /* wait for CTLACK and CTLACK2 to get asserted */
980         for (i = 0; i < 100; ++i) {
981                 uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
982                 if ((RREG32(cg_upll_func_cntl) & mask) == mask)
983                         break;
984                 mdelay(10);
985         }
986
987         /* deassert UPLL_CTLREQ */
988         WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
989
990         if (i == 100) {
991                 DRM_ERROR("Timeout setting UVD clocks!\n");
992                 return -ETIMEDOUT;
993         }
994
995         return 0;
996 }