2 * Copyright © 2011-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
29 * This file implements HW context support. On gen5+ a HW context consists of an
30 * opaque GPU object which is referenced at times of context saves and restores.
31 * With RC6 enabled, the context is also referenced as the GPU enters and exists
32 * from RC6 (GPU has it's own internal power context, except on gen5). Though
33 * something like a context does exist for the media ring, the code only
34 * supports contexts for the render ring.
36 * In software, there is a distinction between contexts created by the user,
37 * and the default HW context. The default HW context is used by GPU clients
38 * that do not request setup of their own hardware context. The default
39 * context's state is never restored to help prevent programming errors. This
40 * would happen if a client ran and piggy-backed off another clients GPU state.
41 * The default context only exists to give the GPU some offset to load as the
42 * current to invoke a save of the context we actually care about. In fact, the
43 * code could likely be constructed, albeit in a more complicated fashion, to
44 * never use the default context, though that limits the driver's ability to
45 * swap out, and/or destroy other contexts.
47 * All other contexts are created as a request by the GPU client. These contexts
48 * store GPU state, and thus allow GPU clients to not re-emit state (and
49 * potentially query certain state) at any time. The kernel driver makes
50 * certain that the appropriate commands are inserted.
52 * The context life cycle is semi-complicated in that context BOs may live
53 * longer than the context itself because of the way the hardware, and object
54 * tracking works. Below is a very crude representation of the state machine
55 * describing the context life.
56 * refcount pincount active
57 * S0: initial state 0 0 0
58 * S1: context created 1 0 0
59 * S2: context is currently running 2 1 X
60 * S3: GPU referenced, but not current 2 0 1
61 * S4: context is current, but destroyed 1 1 0
62 * S5: like S3, but destroyed 1 0 1
64 * The most common (but not all) transitions:
65 * S0->S1: client creates a context
66 * S1->S2: client submits execbuf with context
67 * S2->S3: other clients submits execbuf with context
68 * S3->S1: context object was retired
69 * S3->S2: clients submits another execbuf
70 * S2->S4: context destroy called with current context
71 * S3->S5->S0: destroy path
72 * S4->S5->S0: destroy path on current context
74 * There are two confusing terms used above:
75 * The "current context" means the context which is currently running on the
76 * GPU. The GPU has loaded its state already and has stored away the gtt
77 * offset of the BO. The GPU is not actively referencing the data at this
78 * offset, but it will on the next context switch. The only way to avoid this
79 * is to do a GPU reset.
81 * An "active context' is one which was previously the "current context" and is
82 * on the active list waiting for the next context switch to occur. Until this
83 * happens, the object must remain at the same gtt offset. It is therefore
84 * possible to destroy a context, but it is still active.
88 #include <linux/log2.h>
90 #include <drm/i915_drm.h>
92 #include "i915_trace.h"
93 #include "intel_workarounds.h"
95 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
97 static void lut_close(struct i915_gem_context *ctx)
99 struct i915_lut_handle *lut, *ln;
100 struct radix_tree_iter iter;
103 list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
104 list_del(&lut->obj_link);
105 kmem_cache_free(ctx->i915->luts, lut);
109 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
110 struct i915_vma *vma = rcu_dereference_raw(*slot);
112 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
113 __i915_gem_object_release_unless_active(vma->obj);
118 static void i915_gem_context_free(struct i915_gem_context *ctx)
122 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
123 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
125 i915_ppgtt_put(ctx->ppgtt);
127 kfree(ctx->jump_whitelist);
129 for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
130 struct intel_context *ce = &ctx->__engine[n];
133 ce->ops->destroy(ce);
139 list_del(&ctx->link);
141 ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id);
145 static void contexts_free(struct drm_i915_private *i915)
147 struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
148 struct i915_gem_context *ctx, *cn;
150 lockdep_assert_held(&i915->drm.struct_mutex);
152 llist_for_each_entry_safe(ctx, cn, freed, free_link)
153 i915_gem_context_free(ctx);
156 static void contexts_free_first(struct drm_i915_private *i915)
158 struct i915_gem_context *ctx;
159 struct llist_node *freed;
161 lockdep_assert_held(&i915->drm.struct_mutex);
163 freed = llist_del_first(&i915->contexts.free_list);
167 ctx = container_of(freed, typeof(*ctx), free_link);
168 i915_gem_context_free(ctx);
171 static void contexts_free_worker(struct work_struct *work)
173 struct drm_i915_private *i915 =
174 container_of(work, typeof(*i915), contexts.free_work);
176 mutex_lock(&i915->drm.struct_mutex);
178 mutex_unlock(&i915->drm.struct_mutex);
181 void i915_gem_context_release(struct kref *ref)
183 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
184 struct drm_i915_private *i915 = ctx->i915;
186 trace_i915_context_free(ctx);
187 if (llist_add(&ctx->free_link, &i915->contexts.free_list))
188 queue_work(i915->wq, &i915->contexts.free_work);
191 static void context_close(struct i915_gem_context *ctx)
193 i915_gem_context_set_closed(ctx);
196 * The LUT uses the VMA as a backpointer to unref the object,
197 * so we need to clear the LUT before we close all the VMA (inside
202 i915_ppgtt_close(&ctx->ppgtt->vm);
204 ctx->file_priv = ERR_PTR(-EBADF);
205 i915_gem_context_put(ctx);
208 static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
213 if (INTEL_GEN(dev_priv) >= 11) {
214 max = GEN11_MAX_CONTEXT_HW_ID;
217 * When using GuC in proxy submission, GuC consumes the
218 * highest bit in the context id to indicate proxy submission.
220 if (USES_GUC_SUBMISSION(dev_priv))
221 max = MAX_GUC_CONTEXT_HW_ID;
223 max = MAX_CONTEXT_HW_ID;
227 ret = ida_simple_get(&dev_priv->contexts.hw_ida,
230 /* Contexts are only released when no longer active.
231 * Flush any pending retires to hopefully release some
232 * stale contexts and try again.
234 i915_retire_requests(dev_priv);
235 ret = ida_simple_get(&dev_priv->contexts.hw_ida,
245 static u32 default_desc_template(const struct drm_i915_private *i915,
246 const struct i915_hw_ppgtt *ppgtt)
251 desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
253 address_mode = INTEL_LEGACY_32B_CONTEXT;
254 if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
255 address_mode = INTEL_LEGACY_64B_CONTEXT;
256 desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
259 desc |= GEN8_CTX_L3LLC_COHERENT;
261 /* TODO: WaDisableLiteRestore when we start using semaphore
262 * signalling between Command Streamers
263 * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
269 static struct i915_gem_context *
270 __create_hw_context(struct drm_i915_private *dev_priv,
271 struct drm_i915_file_private *file_priv)
273 struct i915_gem_context *ctx;
277 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
279 return ERR_PTR(-ENOMEM);
281 ret = assign_hw_id(dev_priv, &ctx->hw_id);
287 kref_init(&ctx->ref);
288 list_add_tail(&ctx->link, &dev_priv->contexts.list);
289 ctx->i915 = dev_priv;
290 ctx->sched.priority = I915_PRIORITY_NORMAL;
292 for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
293 struct intel_context *ce = &ctx->__engine[n];
295 ce->gem_context = ctx;
298 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
299 INIT_LIST_HEAD(&ctx->handles_list);
301 /* Default context will never have a file_priv */
302 ret = DEFAULT_CONTEXT_HANDLE;
304 ret = idr_alloc(&file_priv->context_idr, ctx,
305 DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
309 ctx->user_handle = ret;
311 ctx->file_priv = file_priv;
313 ctx->pid = get_task_pid(current, PIDTYPE_PID);
314 ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
324 /* NB: Mark all slices as needing a remap so that when the context first
325 * loads it will restore whatever remap state already exists. If there
326 * is no remap info, it will be a NOP. */
327 ctx->remap_slice = ALL_L3_SLICES(dev_priv);
329 i915_gem_context_set_bannable(ctx);
330 ctx->ring_size = 4 * PAGE_SIZE;
332 default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
335 * GuC requires the ring to be placed in Non-WOPCM memory. If GuC is not
336 * present or not in use we still need a small bias as ring wraparound
337 * at offset 0 sometimes hangs. No idea why.
339 if (USES_GUC(dev_priv))
340 ctx->ggtt_offset_bias = dev_priv->guc.ggtt_pin_bias;
342 ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
344 ctx->jump_whitelist = NULL;
345 ctx->jump_whitelist_cmds = 0;
351 idr_remove(&file_priv->context_idr, ctx->user_handle);
357 static void __destroy_hw_context(struct i915_gem_context *ctx,
358 struct drm_i915_file_private *file_priv)
360 idr_remove(&file_priv->context_idr, ctx->user_handle);
364 static struct i915_gem_context *
365 i915_gem_create_context(struct drm_i915_private *dev_priv,
366 struct drm_i915_file_private *file_priv)
368 struct i915_gem_context *ctx;
370 lockdep_assert_held(&dev_priv->drm.struct_mutex);
372 /* Reap the most stale context */
373 contexts_free_first(dev_priv);
375 ctx = __create_hw_context(dev_priv, file_priv);
379 if (USES_FULL_PPGTT(dev_priv)) {
380 struct i915_hw_ppgtt *ppgtt;
382 ppgtt = i915_ppgtt_create(dev_priv, file_priv);
384 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
386 __destroy_hw_context(ctx, file_priv);
387 return ERR_CAST(ppgtt);
391 ctx->desc_template = default_desc_template(dev_priv, ppgtt);
394 trace_i915_context_create(ctx);
400 * i915_gem_context_create_gvt - create a GVT GEM context
403 * This function is used to create a GVT specific GEM context.
406 * pointer to i915_gem_context on success, error pointer if failed
409 struct i915_gem_context *
410 i915_gem_context_create_gvt(struct drm_device *dev)
412 struct i915_gem_context *ctx;
415 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
416 return ERR_PTR(-ENODEV);
418 ret = i915_mutex_lock_interruptible(dev);
422 ctx = __create_hw_context(to_i915(dev), NULL);
426 ctx->file_priv = ERR_PTR(-EBADF);
427 i915_gem_context_set_closed(ctx); /* not user accessible */
428 i915_gem_context_clear_bannable(ctx);
429 i915_gem_context_set_force_single_submission(ctx);
430 if (!USES_GUC_SUBMISSION(to_i915(dev)))
431 ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
433 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
435 mutex_unlock(&dev->struct_mutex);
439 struct i915_gem_context *
440 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
442 struct i915_gem_context *ctx;
444 ctx = i915_gem_create_context(i915, NULL);
448 i915_gem_context_clear_bannable(ctx);
449 ctx->sched.priority = prio;
450 ctx->ring_size = PAGE_SIZE;
452 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
458 destroy_kernel_context(struct i915_gem_context **ctxp)
460 struct i915_gem_context *ctx;
462 /* Keep the context ref so that we can free it immediately ourselves */
463 ctx = i915_gem_context_get(fetch_and_zero(ctxp));
464 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
467 i915_gem_context_free(ctx);
470 static bool needs_preempt_context(struct drm_i915_private *i915)
472 return HAS_LOGICAL_RING_PREEMPTION(i915);
475 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
477 struct i915_gem_context *ctx;
480 /* Reassure ourselves we are only called once */
481 GEM_BUG_ON(dev_priv->kernel_context);
482 GEM_BUG_ON(dev_priv->preempt_context);
484 ret = intel_ctx_workarounds_init(dev_priv);
488 INIT_LIST_HEAD(&dev_priv->contexts.list);
489 INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
490 init_llist_head(&dev_priv->contexts.free_list);
492 /* Using the simple ida interface, the max is limited by sizeof(int) */
493 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
494 BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
495 ida_init(&dev_priv->contexts.hw_ida);
497 /* lowest priority; idle task */
498 ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
500 DRM_ERROR("Failed to create default global context\n");
504 * For easy recognisablity, we want the kernel context to be 0 and then
505 * all user contexts will have non-zero hw_id.
507 GEM_BUG_ON(ctx->hw_id);
508 dev_priv->kernel_context = ctx;
510 /* highest priority; preempting task */
511 if (needs_preempt_context(dev_priv)) {
512 ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
514 dev_priv->preempt_context = ctx;
516 DRM_ERROR("Failed to create preempt context; disabling preemption\n");
519 DRM_DEBUG_DRIVER("%s context support initialized\n",
520 DRIVER_CAPS(dev_priv)->has_logical_contexts ?
525 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
527 struct intel_engine_cs *engine;
528 enum intel_engine_id id;
530 lockdep_assert_held(&dev_priv->drm.struct_mutex);
532 for_each_engine(engine, dev_priv, id)
533 intel_engine_lost_context(engine);
536 void i915_gem_contexts_fini(struct drm_i915_private *i915)
538 lockdep_assert_held(&i915->drm.struct_mutex);
540 if (i915->preempt_context)
541 destroy_kernel_context(&i915->preempt_context);
542 destroy_kernel_context(&i915->kernel_context);
544 /* Must free all deferred contexts (via flush_workqueue) first */
545 ida_destroy(&i915->contexts.hw_ida);
548 static int context_idr_cleanup(int id, void *p, void *data)
550 struct i915_gem_context *ctx = p;
556 int i915_gem_context_open(struct drm_i915_private *i915,
557 struct drm_file *file)
559 struct drm_i915_file_private *file_priv = file->driver_priv;
560 struct i915_gem_context *ctx;
562 idr_init(&file_priv->context_idr);
564 mutex_lock(&i915->drm.struct_mutex);
565 ctx = i915_gem_create_context(i915, file_priv);
566 mutex_unlock(&i915->drm.struct_mutex);
568 idr_destroy(&file_priv->context_idr);
572 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
577 void i915_gem_context_close(struct drm_file *file)
579 struct drm_i915_file_private *file_priv = file->driver_priv;
581 lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
583 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
584 idr_destroy(&file_priv->context_idr);
587 static struct i915_request *
588 last_request_on_engine(struct i915_timeline *timeline,
589 struct intel_engine_cs *engine)
591 struct i915_request *rq;
593 GEM_BUG_ON(timeline == &engine->timeline);
595 rq = i915_gem_active_raw(&timeline->last_request,
596 &engine->i915->drm.struct_mutex);
597 if (rq && rq->engine == engine) {
598 GEM_TRACE("last request for %s on engine %s: %llx:%d\n",
599 timeline->name, engine->name,
600 rq->fence.context, rq->fence.seqno);
601 GEM_BUG_ON(rq->timeline != timeline);
608 static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
610 struct drm_i915_private *i915 = engine->i915;
611 const struct intel_context * const ce =
612 to_intel_context(i915->kernel_context, engine);
613 struct i915_timeline *barrier = ce->ring->timeline;
614 struct intel_ring *ring;
615 bool any_active = false;
617 lockdep_assert_held(&i915->drm.struct_mutex);
618 list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
619 struct i915_request *rq;
621 rq = last_request_on_engine(ring->timeline, engine);
627 if (rq->hw_context == ce)
631 * Was this request submitted after the previous
632 * switch-to-kernel-context?
634 if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
635 GEM_TRACE("%s needs barrier for %llx:%d\n",
636 ring->timeline->name,
642 GEM_TRACE("%s has barrier after %llx:%d\n",
643 ring->timeline->name,
649 * If any other timeline was still active and behind the last barrier,
650 * then our last switch-to-kernel-context must still be queued and
651 * will run last (leaving the engine in the kernel context when it
657 /* The engine is idle; check that it is idling in the kernel context. */
658 return engine->last_retired_context == ce;
661 int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
663 struct intel_engine_cs *engine;
664 enum intel_engine_id id;
666 GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
668 lockdep_assert_held(&i915->drm.struct_mutex);
669 GEM_BUG_ON(!i915->kernel_context);
671 i915_retire_requests(i915);
673 for_each_engine(engine, i915, id) {
674 struct intel_ring *ring;
675 struct i915_request *rq;
677 GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
678 if (engine_has_kernel_context_barrier(engine))
681 GEM_TRACE("emit barrier on %s\n", engine->name);
683 rq = i915_request_alloc(engine, i915->kernel_context);
687 /* Queue this switch after all other activity */
688 list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
689 struct i915_request *prev;
691 prev = last_request_on_engine(ring->timeline, engine);
695 if (prev->gem_context == i915->kernel_context)
698 GEM_TRACE("add barrier on %s for %llx:%d\n",
702 i915_sw_fence_await_sw_fence_gfp(&rq->submit,
705 i915_timeline_sync_set(rq->timeline, &prev->fence);
708 i915_request_add(rq);
714 static bool client_is_banned(struct drm_i915_file_private *file_priv)
716 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
719 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
720 struct drm_file *file)
722 struct drm_i915_private *dev_priv = to_i915(dev);
723 struct drm_i915_gem_context_create *args = data;
724 struct drm_i915_file_private *file_priv = file->driver_priv;
725 struct i915_gem_context *ctx;
728 if (!DRIVER_CAPS(dev_priv)->has_logical_contexts)
734 if (client_is_banned(file_priv)) {
735 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
737 pid_nr(get_task_pid(current, PIDTYPE_PID)));
742 ret = i915_mutex_lock_interruptible(dev);
746 ctx = i915_gem_create_context(dev_priv, file_priv);
747 mutex_unlock(&dev->struct_mutex);
751 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
753 args->ctx_id = ctx->user_handle;
754 DRM_DEBUG("HW context %d created\n", args->ctx_id);
759 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
760 struct drm_file *file)
762 struct drm_i915_gem_context_destroy *args = data;
763 struct drm_i915_file_private *file_priv = file->driver_priv;
764 struct i915_gem_context *ctx;
770 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
773 ret = i915_mutex_lock_interruptible(dev);
777 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
779 mutex_unlock(&dev->struct_mutex);
783 __destroy_hw_context(ctx, file_priv);
784 mutex_unlock(&dev->struct_mutex);
786 i915_gem_context_put(ctx);
790 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
791 struct drm_file *file)
793 struct drm_i915_file_private *file_priv = file->driver_priv;
794 struct drm_i915_gem_context_param *args = data;
795 struct i915_gem_context *ctx;
798 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
803 switch (args->param) {
804 case I915_CONTEXT_PARAM_BAN_PERIOD:
807 case I915_CONTEXT_PARAM_NO_ZEROMAP:
808 args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
810 case I915_CONTEXT_PARAM_GTT_SIZE:
812 args->value = ctx->ppgtt->vm.total;
813 else if (to_i915(dev)->mm.aliasing_ppgtt)
814 args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
816 args->value = to_i915(dev)->ggtt.vm.total;
818 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
819 args->value = i915_gem_context_no_error_capture(ctx);
821 case I915_CONTEXT_PARAM_BANNABLE:
822 args->value = i915_gem_context_is_bannable(ctx);
824 case I915_CONTEXT_PARAM_PRIORITY:
825 args->value = ctx->sched.priority;
832 i915_gem_context_put(ctx);
836 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
837 struct drm_file *file)
839 struct drm_i915_file_private *file_priv = file->driver_priv;
840 struct drm_i915_gem_context_param *args = data;
841 struct i915_gem_context *ctx;
844 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
848 ret = i915_mutex_lock_interruptible(dev);
852 switch (args->param) {
853 case I915_CONTEXT_PARAM_BAN_PERIOD:
856 case I915_CONTEXT_PARAM_NO_ZEROMAP:
860 ctx->flags &= ~CONTEXT_NO_ZEROMAP;
861 ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
864 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
867 else if (args->value)
868 i915_gem_context_set_no_error_capture(ctx);
870 i915_gem_context_clear_no_error_capture(ctx);
872 case I915_CONTEXT_PARAM_BANNABLE:
875 else if (!capable(CAP_SYS_ADMIN) && !args->value)
877 else if (args->value)
878 i915_gem_context_set_bannable(ctx);
880 i915_gem_context_clear_bannable(ctx);
883 case I915_CONTEXT_PARAM_PRIORITY:
885 s64 priority = args->value;
889 else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
891 else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
892 priority < I915_CONTEXT_MIN_USER_PRIORITY)
894 else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
895 !capable(CAP_SYS_NICE))
898 ctx->sched.priority = priority;
906 mutex_unlock(&dev->struct_mutex);
909 i915_gem_context_put(ctx);
913 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
914 void *data, struct drm_file *file)
916 struct drm_i915_private *dev_priv = to_i915(dev);
917 struct drm_i915_reset_stats *args = data;
918 struct i915_gem_context *ctx;
921 if (args->flags || args->pad)
926 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
931 * We opt for unserialised reads here. This may result in tearing
932 * in the extremely unlikely event of a GPU hang on this context
933 * as we are querying them. If we need that extra layer of protection,
934 * we should wrap the hangstats with a seqlock.
937 if (capable(CAP_SYS_ADMIN))
938 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
940 args->reset_count = 0;
942 args->batch_active = atomic_read(&ctx->guilty_count);
943 args->batch_pending = atomic_read(&ctx->active_count);
951 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
952 #include "selftests/mock_context.c"
953 #include "selftests/i915_gem_context.c"