2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "../i915_selftest.h"
26 #include "i915_random.h"
27 #include "igt_flush_test.h"
30 #include "mock_gem_device.h"
31 #include "huge_gem_object.h"
33 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
35 static struct i915_vma *
36 gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
38 struct drm_i915_gem_object *obj;
39 const int gen = INTEL_GEN(vma->vm->i915);
40 unsigned long n, size;
44 size = (4 * count + 1) * sizeof(u32);
45 size = round_up(size, PAGE_SIZE);
46 obj = i915_gem_object_create_internal(vma->vm->i915, size);
50 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
56 GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
57 offset += vma->node.start;
59 for (n = 0; n < count; n++) {
61 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
62 *cmd++ = lower_32_bits(offset);
63 *cmd++ = upper_32_bits(offset);
65 } else if (gen >= 4) {
66 *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
67 (gen < 6 ? MI_USE_GGTT : 0);
72 *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
78 *cmd = MI_BATCH_BUFFER_END;
79 i915_gem_object_unpin_map(obj);
81 err = i915_gem_object_set_to_gtt_domain(obj, false);
85 vma = i915_vma_instance(obj, vma->vm, NULL);
91 err = i915_vma_pin(vma, 0, 0, PIN_USER);
98 i915_gem_object_put(obj);
102 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
104 return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
107 static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
109 return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
112 static int gpu_fill(struct drm_i915_gem_object *obj,
113 struct i915_gem_context *ctx,
114 struct intel_engine_cs *engine,
117 struct drm_i915_private *i915 = to_i915(obj->base.dev);
118 struct i915_address_space *vm =
119 ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
120 struct i915_request *rq;
121 struct i915_vma *vma;
122 struct i915_vma *batch;
126 GEM_BUG_ON(obj->base.size > vm->total);
127 GEM_BUG_ON(!intel_engine_can_store_dword(engine));
129 vma = i915_vma_instance(obj, vm, NULL);
133 err = i915_gem_object_set_to_gtt_domain(obj, false);
137 err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
141 /* Within the GTT the huge objects maps every page onto
142 * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
143 * We set the nth dword within the page using the nth
144 * mapping via the GTT - this should exercise the GTT mapping
145 * whilst checking that each context provides a unique view
148 batch = gpu_fill_dw(vma,
149 (dw * real_page_count(obj)) << PAGE_SHIFT |
151 real_page_count(obj),
154 err = PTR_ERR(batch);
158 rq = i915_request_alloc(engine, ctx);
165 if (INTEL_GEN(vm->i915) <= 5)
166 flags |= I915_DISPATCH_SECURE;
168 err = engine->emit_bb_start(rq,
169 batch->node.start, batch->node.size,
174 err = i915_vma_move_to_active(batch, rq, 0);
178 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
182 i915_gem_object_set_active_reference(batch->obj);
183 i915_vma_unpin(batch);
184 i915_vma_close(batch);
188 i915_request_add(rq);
193 i915_request_skip(rq, err);
195 i915_request_add(rq);
197 i915_vma_unpin(batch);
203 static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
205 const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
206 unsigned int n, m, need_flush;
209 err = i915_gem_obj_prepare_shmem_write(obj, &need_flush);
213 for (n = 0; n < real_page_count(obj); n++) {
216 map = kmap_atomic(i915_gem_object_get_page(obj, n));
217 for (m = 0; m < DW_PER_PAGE; m++)
220 drm_clflush_virt_range(map, PAGE_SIZE);
224 i915_gem_obj_finish_shmem_access(obj);
225 obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
226 obj->write_domain = 0;
230 static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
232 unsigned int n, m, needs_flush;
235 err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
239 for (n = 0; n < real_page_count(obj); n++) {
242 map = kmap_atomic(i915_gem_object_get_page(obj, n));
243 if (needs_flush & CLFLUSH_BEFORE)
244 drm_clflush_virt_range(map, PAGE_SIZE);
246 for (m = 0; m < max; m++) {
248 pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
255 for (; m < DW_PER_PAGE; m++) {
256 if (map[m] != STACK_MAGIC) {
257 pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
258 n, m, map[m], STACK_MAGIC);
270 i915_gem_obj_finish_shmem_access(obj);
274 static int file_add_object(struct drm_file *file,
275 struct drm_i915_gem_object *obj)
279 GEM_BUG_ON(obj->base.handle_count);
281 /* tie the object to the drm_file for easy reaping */
282 err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
286 i915_gem_object_get(obj);
287 obj->base.handle_count++;
291 static struct drm_i915_gem_object *
292 create_test_object(struct i915_gem_context *ctx,
293 struct drm_file *file,
294 struct list_head *objects)
296 struct drm_i915_gem_object *obj;
297 struct i915_address_space *vm =
298 ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
302 size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
303 size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
305 obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
309 err = file_add_object(file, obj);
310 i915_gem_object_put(obj);
314 err = cpu_fill(obj, STACK_MAGIC);
316 pr_err("Failed to fill object with cpu, err=%d\n",
321 list_add_tail(&obj->st_link, objects);
325 static unsigned long max_dwords(struct drm_i915_gem_object *obj)
327 unsigned long npages = fake_page_count(obj);
329 GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
330 return npages / DW_PER_PAGE;
333 static int igt_ctx_exec(void *arg)
335 struct drm_i915_private *i915 = arg;
336 struct drm_i915_gem_object *obj = NULL;
337 struct drm_file *file;
338 IGT_TIMEOUT(end_time);
340 unsigned long ncontexts, ndwords, dw;
341 bool first_shared_gtt = true;
345 * Create a few different contexts (with different mm) and write
346 * through each ctx/mm using the GPU making sure those writes end
347 * up in the expected pages of our obj.
350 if (!DRIVER_CAPS(i915)->has_logical_contexts)
353 file = mock_file(i915);
355 return PTR_ERR(file);
357 mutex_lock(&i915->drm.struct_mutex);
362 while (!time_after(jiffies, end_time)) {
363 struct intel_engine_cs *engine;
364 struct i915_gem_context *ctx;
367 if (first_shared_gtt) {
368 ctx = __create_hw_context(i915, file->driver_priv);
369 first_shared_gtt = false;
371 ctx = i915_gem_create_context(i915, file->driver_priv);
378 for_each_engine(engine, i915, id) {
379 if (!engine->context_size)
380 continue; /* No logical context support in HW */
382 if (!intel_engine_can_store_dword(engine))
386 obj = create_test_object(ctx, file, &objects);
393 intel_runtime_pm_get(i915);
394 err = gpu_fill(obj, ctx, engine, dw);
395 intel_runtime_pm_put(i915);
397 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
398 ndwords, dw, max_dwords(obj),
399 engine->name, ctx->hw_id,
400 yesno(!!ctx->ppgtt), err);
404 if (++dw == max_dwords(obj)) {
412 pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
413 ncontexts, INTEL_INFO(i915)->num_rings, ndwords);
416 list_for_each_entry(obj, &objects, st_link) {
418 min_t(unsigned int, ndwords - dw, max_dwords(obj));
420 err = cpu_check(obj, rem);
428 if (igt_flush_test(i915, I915_WAIT_LOCKED))
430 mutex_unlock(&i915->drm.struct_mutex);
432 mock_file_free(i915, file);
436 static int igt_ctx_readonly(void *arg)
438 struct drm_i915_private *i915 = arg;
439 struct drm_i915_gem_object *obj = NULL;
440 struct drm_file *file;
441 I915_RND_STATE(prng);
442 IGT_TIMEOUT(end_time);
444 struct i915_gem_context *ctx;
445 struct i915_hw_ppgtt *ppgtt;
446 unsigned long ndwords, dw;
450 * Create a few read-only objects (with the occasional writable object)
451 * and try to write into these object checking that the GPU discards
452 * any write to a read-only object.
455 file = mock_file(i915);
457 return PTR_ERR(file);
459 mutex_lock(&i915->drm.struct_mutex);
461 ctx = i915_gem_create_context(i915, file->driver_priv);
467 ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt;
468 if (!ppgtt || !ppgtt->vm.has_read_only) {
475 while (!time_after(jiffies, end_time)) {
476 struct intel_engine_cs *engine;
479 for_each_engine(engine, i915, id) {
480 if (!intel_engine_can_store_dword(engine))
484 obj = create_test_object(ctx, file, &objects);
490 if (prandom_u32_state(&prng) & 1)
491 i915_gem_object_set_readonly(obj);
494 intel_runtime_pm_get(i915);
495 err = gpu_fill(obj, ctx, engine, dw);
496 intel_runtime_pm_put(i915);
498 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
499 ndwords, dw, max_dwords(obj),
500 engine->name, ctx->hw_id,
501 yesno(!!ctx->ppgtt), err);
505 if (++dw == max_dwords(obj)) {
512 pr_info("Submitted %lu dwords (across %u engines)\n",
513 ndwords, INTEL_INFO(i915)->num_rings);
516 list_for_each_entry(obj, &objects, st_link) {
518 min_t(unsigned int, ndwords - dw, max_dwords(obj));
519 unsigned int num_writes;
522 if (i915_gem_object_is_readonly(obj))
525 err = cpu_check(obj, num_writes);
533 if (igt_flush_test(i915, I915_WAIT_LOCKED))
535 mutex_unlock(&i915->drm.struct_mutex);
537 mock_file_free(i915, file);
541 static __maybe_unused const char *
542 __engine_name(struct drm_i915_private *i915, unsigned int engines)
544 struct intel_engine_cs *engine;
547 if (engines == ALL_ENGINES)
550 for_each_engine_masked(engine, i915, engines, tmp)
556 static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
557 struct i915_gem_context *ctx,
558 unsigned int engines)
560 struct intel_engine_cs *engine;
564 GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
565 for_each_engine_masked(engine, i915, engines, tmp) {
566 struct i915_request *rq;
568 rq = i915_request_alloc(engine, ctx);
572 i915_request_add(rq);
575 err = i915_gem_switch_to_kernel_context(i915);
579 for_each_engine_masked(engine, i915, engines, tmp) {
580 if (!engine_has_kernel_context_barrier(engine)) {
581 pr_err("kernel context not last on engine %s!\n",
587 err = i915_gem_wait_for_idle(i915,
589 MAX_SCHEDULE_TIMEOUT);
593 GEM_BUG_ON(i915->gt.active_requests);
594 for_each_engine_masked(engine, i915, engines, tmp) {
595 if (engine->last_retired_context->gem_context != i915->kernel_context) {
596 pr_err("engine %s not idling in kernel context!\n",
602 err = i915_gem_switch_to_kernel_context(i915);
606 if (i915->gt.active_requests) {
607 pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n",
608 i915->gt.active_requests);
612 for_each_engine_masked(engine, i915, engines, tmp) {
613 if (!intel_engine_has_kernel_context(engine)) {
614 pr_err("kernel context not last on engine %s!\n",
623 static int igt_switch_to_kernel_context(void *arg)
625 struct drm_i915_private *i915 = arg;
626 struct intel_engine_cs *engine;
627 struct i915_gem_context *ctx;
628 enum intel_engine_id id;
632 * A core premise of switching to the kernel context is that
633 * if an engine is already idling in the kernel context, we
634 * do not emit another request and wake it up. The other being
635 * that we do indeed end up idling in the kernel context.
638 mutex_lock(&i915->drm.struct_mutex);
639 ctx = kernel_context(i915);
641 mutex_unlock(&i915->drm.struct_mutex);
645 /* First check idling each individual engine */
646 for_each_engine(engine, i915, id) {
647 err = __igt_switch_to_kernel_context(i915, ctx, BIT(id));
653 err = __igt_switch_to_kernel_context(i915, ctx, ALL_ENGINES);
658 GEM_TRACE_DUMP_ON(err);
659 if (igt_flush_test(i915, I915_WAIT_LOCKED))
661 mutex_unlock(&i915->drm.struct_mutex);
663 kernel_context_close(ctx);
667 static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
669 struct drm_i915_gem_object *obj;
672 err = i915_gem_init_aliasing_ppgtt(i915);
676 list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
677 struct i915_vma *vma;
679 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
683 vma->flags &= ~I915_VMA_LOCAL_BIND;
689 static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
691 i915_gem_fini_aliasing_ppgtt(i915);
694 int i915_gem_context_mock_selftests(void)
696 static const struct i915_subtest tests[] = {
697 SUBTEST(igt_switch_to_kernel_context),
699 struct drm_i915_private *i915;
702 i915 = mock_gem_device();
706 err = i915_subtests(tests, i915);
708 drm_dev_put(&i915->drm);
712 int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
714 static const struct i915_subtest tests[] = {
715 SUBTEST(igt_switch_to_kernel_context),
716 SUBTEST(igt_ctx_exec),
717 SUBTEST(igt_ctx_readonly),
719 bool fake_alias = false;
722 if (i915_terminally_wedged(&dev_priv->gpu_error))
725 /* Install a fake aliasing gtt for exercise */
726 if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
727 mutex_lock(&dev_priv->drm.struct_mutex);
728 err = fake_aliasing_ppgtt_enable(dev_priv);
729 mutex_unlock(&dev_priv->drm.struct_mutex);
733 GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
737 err = i915_subtests(tests, dev_priv);
740 mutex_lock(&dev_priv->drm.struct_mutex);
741 fake_aliasing_ppgtt_disable(dev_priv);
742 mutex_unlock(&dev_priv->drm.struct_mutex);