2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
27 #include "amdgpu_ucode.h"
28 #include "amdgpu_trace.h"
32 #include "bif/bif_4_1_d.h"
33 #include "bif/bif_4_1_sh_mask.h"
35 #include "gca/gfx_7_2_d.h"
36 #include "gca/gfx_7_2_enum.h"
37 #include "gca/gfx_7_2_sh_mask.h"
39 #include "gmc/gmc_7_1_d.h"
40 #include "gmc/gmc_7_1_sh_mask.h"
42 #include "oss/oss_2_0_d.h"
43 #include "oss/oss_2_0_sh_mask.h"
45 static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
47 SDMA0_REGISTER_OFFSET,
51 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
52 static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
53 static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
54 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
55 static int cik_sdma_soft_reset(void *handle);
59 u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
63 * Starting with CIK, the GPU has new asynchronous
64 * DMA engines. These engines are used for compute
65 * and gfx. There are two DMA engines (SDMA0, SDMA1)
66 * and each one supports 1 ring buffer used for gfx
67 * and 2 queues used for compute.
69 * The programming model is very similar to the CP
70 * (ring buffer, IBs, etc.), but sDMA has it's own
71 * packet format that is different from the PM4 format
72 * used by the CP. sDMA supports copying data, writing
73 * embedded data, solid fills, and a number of other
74 * things. It also has support for tiling/detiling of
79 * cik_sdma_init_microcode - load ucode images from disk
81 * @adev: amdgpu_device pointer
83 * Use the firmware interface to load the ucode images into
84 * the driver (not loaded into hw).
85 * Returns 0 on success, error on failure.
87 static int cik_sdma_init_microcode(struct amdgpu_device *adev)
89 const char *chip_name;
95 switch (adev->asic_type) {
97 chip_name = "bonaire";
100 chip_name = "hawaii";
103 chip_name = "kaveri";
106 chip_name = "kabini";
109 chip_name = "mullins";
114 for (i = 0; i < adev->sdma.num_instances; i++) {
116 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
118 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
119 err = reject_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
122 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
127 "cik_sdma: Failed to load firmware \"%s\"\n",
129 for (i = 0; i < adev->sdma.num_instances; i++) {
130 release_firmware(adev->sdma.instance[i].fw);
131 adev->sdma.instance[i].fw = NULL;
138 * cik_sdma_ring_get_rptr - get the current read pointer
140 * @ring: amdgpu ring pointer
142 * Get the current rptr from the hardware (CIK+).
144 static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
148 rptr = ring->adev->wb.wb[ring->rptr_offs];
150 return (rptr & 0x3fffc) >> 2;
154 * cik_sdma_ring_get_wptr - get the current write pointer
156 * @ring: amdgpu ring pointer
158 * Get the current wptr from the hardware (CIK+).
160 static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
162 struct amdgpu_device *adev = ring->adev;
163 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
165 return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
169 * cik_sdma_ring_set_wptr - commit the write pointer
171 * @ring: amdgpu ring pointer
173 * Write the wptr back to the hardware (CIK+).
175 static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
177 struct amdgpu_device *adev = ring->adev;
178 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
180 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
183 static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
185 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
188 for (i = 0; i < count; i++)
189 if (sdma && sdma->burst_nop && (i == 0))
190 amdgpu_ring_write(ring, ring->nop |
191 SDMA_NOP_COUNT(count - 1));
193 amdgpu_ring_write(ring, ring->nop);
197 * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
199 * @ring: amdgpu ring pointer
200 * @ib: IB object to schedule
202 * Schedule an IB in the DMA ring (CIK).
204 static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
205 struct amdgpu_ib *ib)
207 u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
208 u32 next_rptr = ring->wptr + 5;
210 while ((next_rptr & 7) != 4)
214 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
215 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
216 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
217 amdgpu_ring_write(ring, 1); /* number of DWs to follow */
218 amdgpu_ring_write(ring, next_rptr);
220 /* IB packet must end on a 8 DW boundary */
221 cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
223 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
224 amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
225 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
226 amdgpu_ring_write(ring, ib->length_dw);
231 * cik_sdma_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
233 * @ring: amdgpu ring pointer
235 * Emit an hdp flush packet on the requested DMA ring.
237 static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
239 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
240 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
243 if (ring == &ring->adev->sdma.instance[0].ring)
244 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
246 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
248 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
249 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
250 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
251 amdgpu_ring_write(ring, ref_and_mask); /* reference */
252 amdgpu_ring_write(ring, ref_and_mask); /* mask */
253 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
257 * cik_sdma_ring_emit_fence - emit a fence on the DMA ring
259 * @ring: amdgpu ring pointer
260 * @fence: amdgpu fence object
262 * Add a DMA fence packet to the ring to write
263 * the fence seq number and DMA trap packet to generate
264 * an interrupt if needed (CIK).
266 static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
269 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
270 /* write the fence */
271 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
272 amdgpu_ring_write(ring, lower_32_bits(addr));
273 amdgpu_ring_write(ring, upper_32_bits(addr));
274 amdgpu_ring_write(ring, lower_32_bits(seq));
276 /* optionally write high bits as well */
279 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
280 amdgpu_ring_write(ring, lower_32_bits(addr));
281 amdgpu_ring_write(ring, upper_32_bits(addr));
282 amdgpu_ring_write(ring, upper_32_bits(seq));
285 /* generate an interrupt */
286 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
290 * cik_sdma_ring_emit_semaphore - emit a semaphore on the dma ring
292 * @ring: amdgpu_ring structure holding ring information
293 * @semaphore: amdgpu semaphore object
294 * @emit_wait: wait or signal semaphore
296 * Add a DMA semaphore packet to the ring wait on or signal
299 static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring,
300 struct amdgpu_semaphore *semaphore,
303 u64 addr = semaphore->gpu_addr;
304 u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
306 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
307 amdgpu_ring_write(ring, addr & 0xfffffff8);
308 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
314 * cik_sdma_gfx_stop - stop the gfx async dma engines
316 * @adev: amdgpu_device pointer
318 * Stop the gfx async dma ring buffers (CIK).
320 static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
322 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
323 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
327 if ((adev->mman.buffer_funcs_ring == sdma0) ||
328 (adev->mman.buffer_funcs_ring == sdma1))
329 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
331 for (i = 0; i < adev->sdma.num_instances; i++) {
332 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
333 rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
334 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
335 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
337 sdma0->ready = false;
338 sdma1->ready = false;
342 * cik_sdma_rlc_stop - stop the compute async dma engines
344 * @adev: amdgpu_device pointer
346 * Stop the compute async dma queues (CIK).
348 static void cik_sdma_rlc_stop(struct amdgpu_device *adev)
354 * cik_sdma_enable - stop the async dma engines
356 * @adev: amdgpu_device pointer
357 * @enable: enable/disable the DMA MEs.
359 * Halt or unhalt the async dma engines (CIK).
361 static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
366 if (enable == false) {
367 cik_sdma_gfx_stop(adev);
368 cik_sdma_rlc_stop(adev);
371 for (i = 0; i < adev->sdma.num_instances; i++) {
372 me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
374 me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK;
376 me_cntl |= SDMA0_F32_CNTL__HALT_MASK;
377 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl);
382 * cik_sdma_gfx_resume - setup and start the async dma engines
384 * @adev: amdgpu_device pointer
386 * Set up the gfx DMA ring buffers and enable them (CIK).
387 * Returns 0 for success, error for failure.
389 static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
391 struct amdgpu_ring *ring;
392 u32 rb_cntl, ib_cntl;
397 for (i = 0; i < adev->sdma.num_instances; i++) {
398 ring = &adev->sdma.instance[i].ring;
399 wb_offset = (ring->rptr_offs * 4);
401 mutex_lock(&adev->srbm_mutex);
402 for (j = 0; j < 16; j++) {
403 cik_srbm_select(adev, 0, 0, 0, j);
405 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
406 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
407 /* XXX SDMA RLC - todo */
409 cik_srbm_select(adev, 0, 0, 0, 0);
410 mutex_unlock(&adev->srbm_mutex);
412 WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
413 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
415 /* Set ring buffer size in dwords */
416 rb_bufsz = order_base_2(ring->ring_size / 4);
417 rb_cntl = rb_bufsz << 1;
419 rb_cntl |= SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK |
420 SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK;
422 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
424 /* Initialize the ring buffer's read and write pointers */
425 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
426 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
428 /* set the wb address whether it's enabled or not */
429 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
430 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
431 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
432 ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
434 rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK;
436 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
437 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
440 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
443 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i],
444 rb_cntl | SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK);
446 ib_cntl = SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK;
448 ib_cntl |= SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK;
451 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
455 r = amdgpu_ring_test_ring(ring);
461 if (adev->mman.buffer_funcs_ring == ring)
462 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
469 * cik_sdma_rlc_resume - setup and start the async dma engines
471 * @adev: amdgpu_device pointer
473 * Set up the compute DMA queues and enable them (CIK).
474 * Returns 0 for success, error for failure.
476 static int cik_sdma_rlc_resume(struct amdgpu_device *adev)
483 * cik_sdma_load_microcode - load the sDMA ME ucode
485 * @adev: amdgpu_device pointer
487 * Loads the sDMA0/1 ucode.
488 * Returns 0 for success, -EINVAL if the ucode is not available.
490 static int cik_sdma_load_microcode(struct amdgpu_device *adev)
492 const struct sdma_firmware_header_v1_0 *hdr;
493 const __le32 *fw_data;
498 cik_sdma_enable(adev, false);
500 for (i = 0; i < adev->sdma.num_instances; i++) {
501 if (!adev->sdma.instance[i].fw)
503 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
504 amdgpu_ucode_print_sdma_hdr(&hdr->header);
505 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
506 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
507 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
508 if (adev->sdma.instance[i].feature_version >= 20)
509 adev->sdma.instance[i].burst_nop = true;
510 fw_data = (const __le32 *)
511 (adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
512 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
513 for (j = 0; j < fw_size; j++)
514 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
515 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
522 * cik_sdma_start - setup and start the async dma engines
524 * @adev: amdgpu_device pointer
526 * Set up the DMA engines and enable them (CIK).
527 * Returns 0 for success, error for failure.
529 static int cik_sdma_start(struct amdgpu_device *adev)
533 r = cik_sdma_load_microcode(adev);
538 cik_sdma_enable(adev, true);
540 /* start the gfx rings and rlc compute queues */
541 r = cik_sdma_gfx_resume(adev);
544 r = cik_sdma_rlc_resume(adev);
552 * cik_sdma_ring_test_ring - simple async dma engine test
554 * @ring: amdgpu_ring structure holding ring information
556 * Test the DMA engine by writing using it to write an
557 * value to memory. (CIK).
558 * Returns 0 for success, error for failure.
560 static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
562 struct amdgpu_device *adev = ring->adev;
569 r = amdgpu_wb_get(adev, &index);
571 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
575 gpu_addr = adev->wb.gpu_addr + (index * 4);
577 adev->wb.wb[index] = cpu_to_le32(tmp);
579 r = amdgpu_ring_lock(ring, 5);
581 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
582 amdgpu_wb_free(adev, index);
585 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
586 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
587 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
588 amdgpu_ring_write(ring, 1); /* number of DWs to follow */
589 amdgpu_ring_write(ring, 0xDEADBEEF);
590 amdgpu_ring_unlock_commit(ring);
592 for (i = 0; i < adev->usec_timeout; i++) {
593 tmp = le32_to_cpu(adev->wb.wb[index]);
594 if (tmp == 0xDEADBEEF)
599 if (i < adev->usec_timeout) {
600 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
602 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
606 amdgpu_wb_free(adev, index);
612 * cik_sdma_ring_test_ib - test an IB on the DMA engine
614 * @ring: amdgpu_ring structure holding ring information
616 * Test a simple IB in the DMA ring (CIK).
617 * Returns 0 on success, error on failure.
619 static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
621 struct amdgpu_device *adev = ring->adev;
623 struct fence *f = NULL;
630 r = amdgpu_wb_get(adev, &index);
632 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
636 gpu_addr = adev->wb.gpu_addr + (index * 4);
638 adev->wb.wb[index] = cpu_to_le32(tmp);
639 memset(&ib, 0, sizeof(ib));
640 r = amdgpu_ib_get(ring, NULL, 256, &ib);
642 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
646 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
647 ib.ptr[1] = lower_32_bits(gpu_addr);
648 ib.ptr[2] = upper_32_bits(gpu_addr);
650 ib.ptr[4] = 0xDEADBEEF;
652 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
653 AMDGPU_FENCE_OWNER_UNDEFINED,
658 r = fence_wait(f, false);
660 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
663 for (i = 0; i < adev->usec_timeout; i++) {
664 tmp = le32_to_cpu(adev->wb.wb[index]);
665 if (tmp == 0xDEADBEEF)
669 if (i < adev->usec_timeout) {
670 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
674 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
680 amdgpu_ib_free(adev, &ib);
682 amdgpu_wb_free(adev, index);
687 * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
689 * @ib: indirect buffer to fill with commands
690 * @pe: addr of the page entry
691 * @src: src addr to copy from
692 * @count: number of page entries to update
694 * Update PTEs by copying them from the GART using sDMA (CIK).
696 static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
697 uint64_t pe, uint64_t src,
701 unsigned bytes = count * 8;
702 if (bytes > 0x1FFFF8)
705 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
706 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
707 ib->ptr[ib->length_dw++] = bytes;
708 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
709 ib->ptr[ib->length_dw++] = lower_32_bits(src);
710 ib->ptr[ib->length_dw++] = upper_32_bits(src);
711 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
712 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
721 * cik_sdma_vm_write_pages - update PTEs by writing them manually
723 * @ib: indirect buffer to fill with commands
724 * @pe: addr of the page entry
725 * @addr: dst addr to write into pe
726 * @count: number of page entries to update
727 * @incr: increase next addr by incr bytes
728 * @flags: access flags
730 * Update PTEs by writing them manually using sDMA (CIK).
732 static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
734 uint64_t addr, unsigned count,
735 uint32_t incr, uint32_t flags)
745 /* for non-physically contiguous pages (system) */
746 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
747 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
748 ib->ptr[ib->length_dw++] = pe;
749 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
750 ib->ptr[ib->length_dw++] = ndw;
751 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
752 if (flags & AMDGPU_PTE_SYSTEM) {
753 value = amdgpu_vm_map_gart(ib->ring->adev, addr);
754 value &= 0xFFFFFFFFFFFFF000ULL;
755 } else if (flags & AMDGPU_PTE_VALID) {
762 ib->ptr[ib->length_dw++] = value;
763 ib->ptr[ib->length_dw++] = upper_32_bits(value);
769 * cik_sdma_vm_set_pages - update the page tables using sDMA
771 * @ib: indirect buffer to fill with commands
772 * @pe: addr of the page entry
773 * @addr: dst addr to write into pe
774 * @count: number of page entries to update
775 * @incr: increase next addr by incr bytes
776 * @flags: access flags
778 * Update the page tables using sDMA (CIK).
780 static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
782 uint64_t addr, unsigned count,
783 uint32_t incr, uint32_t flags)
793 if (flags & AMDGPU_PTE_VALID)
798 /* for physically contiguous pages (vram) */
799 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
800 ib->ptr[ib->length_dw++] = pe; /* dst addr */
801 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
802 ib->ptr[ib->length_dw++] = flags; /* mask */
803 ib->ptr[ib->length_dw++] = 0;
804 ib->ptr[ib->length_dw++] = value; /* value */
805 ib->ptr[ib->length_dw++] = upper_32_bits(value);
806 ib->ptr[ib->length_dw++] = incr; /* increment size */
807 ib->ptr[ib->length_dw++] = 0;
808 ib->ptr[ib->length_dw++] = ndw; /* number of entries */
817 * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
819 * @ib: indirect buffer to fill with padding
822 static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
824 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
828 pad_count = (8 - (ib->length_dw & 0x7)) % 8;
829 for (i = 0; i < pad_count; i++)
830 if (sdma && sdma->burst_nop && (i == 0))
831 ib->ptr[ib->length_dw++] =
832 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0) |
833 SDMA_NOP_COUNT(pad_count - 1);
835 ib->ptr[ib->length_dw++] =
836 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
840 * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA
842 * @ring: amdgpu_ring pointer
843 * @vm: amdgpu_vm pointer
845 * Update the page table base and flush the VM TLB
848 static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
849 unsigned vm_id, uint64_t pd_addr)
851 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
852 SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
854 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
856 amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
858 amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
860 amdgpu_ring_write(ring, pd_addr >> 12);
863 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
864 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
865 amdgpu_ring_write(ring, 1 << vm_id);
867 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
868 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
869 amdgpu_ring_write(ring, 0);
870 amdgpu_ring_write(ring, 0); /* reference */
871 amdgpu_ring_write(ring, 0); /* mask */
872 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
875 static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
880 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) {
881 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
882 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
884 orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
887 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
889 orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
892 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
896 static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
901 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) {
902 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
905 WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
907 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
910 WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
912 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
915 WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
917 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
920 WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
924 static int cik_sdma_early_init(void *handle)
926 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
928 adev->sdma.num_instances = SDMA_MAX_INSTANCE;
930 cik_sdma_set_ring_funcs(adev);
931 cik_sdma_set_irq_funcs(adev);
932 cik_sdma_set_buffer_funcs(adev);
933 cik_sdma_set_vm_pte_funcs(adev);
938 static int cik_sdma_sw_init(void *handle)
940 struct amdgpu_ring *ring;
941 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
944 r = cik_sdma_init_microcode(adev);
946 DRM_ERROR("Failed to load sdma firmware!\n");
950 /* SDMA trap event */
951 r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
955 /* SDMA Privileged inst */
956 r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
960 /* SDMA Privileged inst */
961 r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
965 for (i = 0; i < adev->sdma.num_instances; i++) {
966 ring = &adev->sdma.instance[i].ring;
967 ring->ring_obj = NULL;
968 sprintf(ring->name, "sdma%d", i);
969 r = amdgpu_ring_init(adev, ring, 256 * 1024,
970 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
971 &adev->sdma.trap_irq,
973 AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
974 AMDGPU_RING_TYPE_SDMA);
982 static int cik_sdma_sw_fini(void *handle)
984 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
987 for (i = 0; i < adev->sdma.num_instances; i++)
988 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
993 static int cik_sdma_hw_init(void *handle)
996 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
998 r = cik_sdma_start(adev);
1005 static int cik_sdma_hw_fini(void *handle)
1007 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1009 cik_sdma_enable(adev, false);
1014 static int cik_sdma_suspend(void *handle)
1016 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1018 return cik_sdma_hw_fini(adev);
1021 static int cik_sdma_resume(void *handle)
1023 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1025 cik_sdma_soft_reset(handle);
1027 return cik_sdma_hw_init(adev);
1030 static bool cik_sdma_is_idle(void *handle)
1032 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1033 u32 tmp = RREG32(mmSRBM_STATUS2);
1035 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
1036 SRBM_STATUS2__SDMA1_BUSY_MASK))
1042 static int cik_sdma_wait_for_idle(void *handle)
1046 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1048 for (i = 0; i < adev->usec_timeout; i++) {
1049 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
1050 SRBM_STATUS2__SDMA1_BUSY_MASK);
1059 static void cik_sdma_print_status(void *handle)
1062 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1064 dev_info(adev->dev, "CIK SDMA registers\n");
1065 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
1066 RREG32(mmSRBM_STATUS2));
1067 for (i = 0; i < adev->sdma.num_instances; i++) {
1068 dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
1069 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
1070 dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n",
1071 i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
1072 dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n",
1073 i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
1074 dev_info(adev->dev, " SDMA%d_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
1075 i, RREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i]));
1076 dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
1077 i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
1078 dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n",
1079 i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
1080 dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n",
1081 i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
1082 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n",
1083 i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
1084 dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n",
1085 i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
1086 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
1087 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
1088 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
1089 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
1090 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n",
1091 i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
1092 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
1093 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
1094 mutex_lock(&adev->srbm_mutex);
1095 for (j = 0; j < 16; j++) {
1096 cik_srbm_select(adev, 0, 0, 0, j);
1097 dev_info(adev->dev, " VM %d:\n", j);
1098 dev_info(adev->dev, " SDMA0_GFX_VIRTUAL_ADDR=0x%08X\n",
1099 RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
1100 dev_info(adev->dev, " SDMA0_GFX_APE1_CNTL=0x%08X\n",
1101 RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
1103 cik_srbm_select(adev, 0, 0, 0, 0);
1104 mutex_unlock(&adev->srbm_mutex);
1108 static int cik_sdma_soft_reset(void *handle)
1110 u32 srbm_soft_reset = 0;
1111 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1115 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
1116 tmp |= SDMA0_F32_CNTL__HALT_MASK;
1117 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
1118 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
1121 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
1122 tmp |= SDMA0_F32_CNTL__HALT_MASK;
1123 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
1124 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
1126 if (srbm_soft_reset) {
1127 cik_sdma_print_status((void *)adev);
1129 tmp = RREG32(mmSRBM_SOFT_RESET);
1130 tmp |= srbm_soft_reset;
1131 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1132 WREG32(mmSRBM_SOFT_RESET, tmp);
1133 tmp = RREG32(mmSRBM_SOFT_RESET);
1137 tmp &= ~srbm_soft_reset;
1138 WREG32(mmSRBM_SOFT_RESET, tmp);
1139 tmp = RREG32(mmSRBM_SOFT_RESET);
1141 /* Wait a little for things to settle down */
1144 cik_sdma_print_status((void *)adev);
1150 static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
1151 struct amdgpu_irq_src *src,
1153 enum amdgpu_interrupt_state state)
1158 case AMDGPU_SDMA_IRQ_TRAP0:
1160 case AMDGPU_IRQ_STATE_DISABLE:
1161 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1162 sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
1163 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1165 case AMDGPU_IRQ_STATE_ENABLE:
1166 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1167 sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
1168 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1174 case AMDGPU_SDMA_IRQ_TRAP1:
1176 case AMDGPU_IRQ_STATE_DISABLE:
1177 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1178 sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
1179 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1181 case AMDGPU_IRQ_STATE_ENABLE:
1182 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1183 sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
1184 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1196 static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
1197 struct amdgpu_irq_src *source,
1198 struct amdgpu_iv_entry *entry)
1200 u8 instance_id, queue_id;
1202 instance_id = (entry->ring_id & 0x3) >> 0;
1203 queue_id = (entry->ring_id & 0xc) >> 2;
1204 DRM_DEBUG("IH: SDMA trap\n");
1205 switch (instance_id) {
1209 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1222 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1237 static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
1238 struct amdgpu_irq_src *source,
1239 struct amdgpu_iv_entry *entry)
1241 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1242 schedule_work(&adev->reset_work);
1246 static int cik_sdma_set_clockgating_state(void *handle,
1247 enum amd_clockgating_state state)
1250 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1252 if (state == AMD_CG_STATE_GATE)
1255 cik_enable_sdma_mgcg(adev, gate);
1256 cik_enable_sdma_mgls(adev, gate);
1261 static int cik_sdma_set_powergating_state(void *handle,
1262 enum amd_powergating_state state)
1267 const struct amd_ip_funcs cik_sdma_ip_funcs = {
1268 .early_init = cik_sdma_early_init,
1270 .sw_init = cik_sdma_sw_init,
1271 .sw_fini = cik_sdma_sw_fini,
1272 .hw_init = cik_sdma_hw_init,
1273 .hw_fini = cik_sdma_hw_fini,
1274 .suspend = cik_sdma_suspend,
1275 .resume = cik_sdma_resume,
1276 .is_idle = cik_sdma_is_idle,
1277 .wait_for_idle = cik_sdma_wait_for_idle,
1278 .soft_reset = cik_sdma_soft_reset,
1279 .print_status = cik_sdma_print_status,
1280 .set_clockgating_state = cik_sdma_set_clockgating_state,
1281 .set_powergating_state = cik_sdma_set_powergating_state,
1284 static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
1285 .get_rptr = cik_sdma_ring_get_rptr,
1286 .get_wptr = cik_sdma_ring_get_wptr,
1287 .set_wptr = cik_sdma_ring_set_wptr,
1289 .emit_ib = cik_sdma_ring_emit_ib,
1290 .emit_fence = cik_sdma_ring_emit_fence,
1291 .emit_semaphore = cik_sdma_ring_emit_semaphore,
1292 .emit_vm_flush = cik_sdma_ring_emit_vm_flush,
1293 .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
1294 .test_ring = cik_sdma_ring_test_ring,
1295 .test_ib = cik_sdma_ring_test_ib,
1296 .insert_nop = cik_sdma_ring_insert_nop,
1299 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
1303 for (i = 0; i < adev->sdma.num_instances; i++)
1304 adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
1307 static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
1308 .set = cik_sdma_set_trap_irq_state,
1309 .process = cik_sdma_process_trap_irq,
1312 static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = {
1313 .process = cik_sdma_process_illegal_inst_irq,
1316 static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
1318 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1319 adev->sdma.trap_irq.funcs = &cik_sdma_trap_irq_funcs;
1320 adev->sdma.illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
1324 * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine
1326 * @ring: amdgpu_ring structure holding ring information
1327 * @src_offset: src GPU address
1328 * @dst_offset: dst GPU address
1329 * @byte_count: number of bytes to xfer
1331 * Copy GPU buffers using the DMA engine (CIK).
1332 * Used by the amdgpu ttm implementation to move pages if
1333 * registered as the asic copy callback.
1335 static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
1336 uint64_t src_offset,
1337 uint64_t dst_offset,
1338 uint32_t byte_count)
1340 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
1341 ib->ptr[ib->length_dw++] = byte_count;
1342 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1343 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1344 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1345 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1346 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1350 * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine
1352 * @ring: amdgpu_ring structure holding ring information
1353 * @src_data: value to write to buffer
1354 * @dst_offset: dst GPU address
1355 * @byte_count: number of bytes to xfer
1357 * Fill GPU buffers using the DMA engine (CIK).
1359 static void cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib,
1361 uint64_t dst_offset,
1362 uint32_t byte_count)
1364 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0);
1365 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1366 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1367 ib->ptr[ib->length_dw++] = src_data;
1368 ib->ptr[ib->length_dw++] = byte_count;
1371 static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
1372 .copy_max_bytes = 0x1fffff,
1374 .emit_copy_buffer = cik_sdma_emit_copy_buffer,
1376 .fill_max_bytes = 0x1fffff,
1378 .emit_fill_buffer = cik_sdma_emit_fill_buffer,
1381 static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
1383 if (adev->mman.buffer_funcs == NULL) {
1384 adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
1385 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1389 static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
1390 .copy_pte = cik_sdma_vm_copy_pte,
1391 .write_pte = cik_sdma_vm_write_pte,
1392 .set_pte_pde = cik_sdma_vm_set_pte_pde,
1393 .pad_ib = cik_sdma_vm_pad_ib,
1396 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
1398 if (adev->vm_manager.vm_pte_funcs == NULL) {
1399 adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
1400 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
1401 adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;