1 /**************************************************************************
3 * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include <linux/sync_file.h>
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_reg.h"
31 #include <drm/ttm/ttm_bo_api.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_so.h"
34 #include "vmwgfx_binding.h"
36 #define VMW_RES_HT_ORDER 12
39 * enum vmw_resource_relocation_type - Relocation type for resources
41 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
42 * command stream is replaced with the actual id after validation.
43 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
45 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
46 * after validation is -1, the command is replaced with a NOP. Otherwise no
49 enum vmw_resource_relocation_type {
57 * struct vmw_resource_relocation - Relocation info for resources
59 * @head: List head for the software context's relocation list.
60 * @res: Non-ref-counted pointer to the resource.
61 * @offset: Offset of single byte entries into the command buffer where the
62 * id that needs fixup is located.
63 * @rel_type: Type of relocation.
65 struct vmw_resource_relocation {
66 struct list_head head;
67 const struct vmw_resource *res;
69 enum vmw_resource_relocation_type rel_type:3;
73 * struct vmw_resource_val_node - Validation info for resources
75 * @head: List head for the software context's resource list.
76 * @hash: Hash entry for quick resouce to val_node lookup.
77 * @res: Ref-counted pointer to the resource.
78 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
79 * @new_backup: Refcounted pointer to the new backup buffer.
80 * @staged_bindings: If @res is a context, tracks bindings set up during
81 * the command batch. Otherwise NULL.
82 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
83 * @first_usage: Set to true the first time the resource is referenced in
85 * @switching_backup: The command stream provides a new backup buffer for a
87 * @no_buffer_needed: This means @switching_backup is true on first buffer
88 * reference. So resource reservation does not need to allocate a backup
89 * buffer for the resource.
91 struct vmw_resource_val_node {
92 struct list_head head;
93 struct drm_hash_item hash;
94 struct vmw_resource *res;
95 struct vmw_dma_buffer *new_backup;
96 struct vmw_ctx_binding_state *staged_bindings;
97 unsigned long new_backup_offset;
99 u32 switching_backup : 1;
100 u32 no_buffer_needed : 1;
104 * struct vmw_cmd_entry - Describe a command for the verifier
106 * @user_allow: Whether allowed from the execbuf ioctl.
107 * @gb_disable: Whether disabled if guest-backed objects are available.
108 * @gb_enable: Whether enabled iff guest-backed objects are available.
110 struct vmw_cmd_entry {
111 int (*func) (struct vmw_private *, struct vmw_sw_context *,
116 const char *cmd_name;
119 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
120 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
121 (_gb_disable), (_gb_enable), #_cmd}
123 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
124 struct vmw_sw_context *sw_context,
125 struct vmw_resource *ctx);
126 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
127 struct vmw_sw_context *sw_context,
129 struct vmw_dma_buffer **vmw_bo_p);
130 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
131 struct vmw_dma_buffer *vbo,
132 bool validate_as_mob,
133 uint32_t *p_val_node);
135 * vmw_ptr_diff - Compute the offset from a to b in bytes
137 * @a: A starting pointer.
138 * @b: A pointer offset in the same address space.
140 * Returns: The offset in bytes between the two pointers.
142 static size_t vmw_ptr_diff(void *a, void *b)
144 return (unsigned long) b - (unsigned long) a;
148 * vmw_resources_unreserve - unreserve resources previously reserved for
149 * command submission.
151 * @sw_context: pointer to the software context
152 * @backoff: Whether command submission failed.
154 static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
157 struct vmw_resource_val_node *val;
158 struct list_head *list = &sw_context->resource_list;
160 if (sw_context->dx_query_mob && !backoff)
161 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
162 sw_context->dx_query_mob);
164 list_for_each_entry(val, list, head) {
165 struct vmw_resource *res = val->res;
167 (backoff) ? false : val->switching_backup;
170 * Transfer staged context bindings to the
171 * persistent context binding tracker.
173 if (unlikely(val->staged_bindings)) {
175 vmw_binding_state_commit
176 (vmw_context_binding_state(val->res),
177 val->staged_bindings);
180 if (val->staged_bindings != sw_context->staged_bindings)
181 vmw_binding_state_free(val->staged_bindings);
183 sw_context->staged_bindings_inuse = false;
184 val->staged_bindings = NULL;
186 vmw_resource_unreserve(res, switch_backup, val->new_backup,
187 val->new_backup_offset);
188 vmw_dmabuf_unreference(&val->new_backup);
193 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
194 * added to the validate list.
196 * @dev_priv: Pointer to the device private:
197 * @sw_context: The validation context:
198 * @node: The validation node holding this context.
200 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
201 struct vmw_sw_context *sw_context,
202 struct vmw_resource_val_node *node)
206 ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
207 if (unlikely(ret != 0))
210 if (!sw_context->staged_bindings) {
211 sw_context->staged_bindings =
212 vmw_binding_state_alloc(dev_priv);
213 if (IS_ERR(sw_context->staged_bindings)) {
214 DRM_ERROR("Failed to allocate context binding "
216 ret = PTR_ERR(sw_context->staged_bindings);
217 sw_context->staged_bindings = NULL;
222 if (sw_context->staged_bindings_inuse) {
223 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
224 if (IS_ERR(node->staged_bindings)) {
225 DRM_ERROR("Failed to allocate context binding "
227 ret = PTR_ERR(node->staged_bindings);
228 node->staged_bindings = NULL;
232 node->staged_bindings = sw_context->staged_bindings;
233 sw_context->staged_bindings_inuse = true;
242 * vmw_resource_val_add - Add a resource to the software context's
243 * resource list if it's not already on it.
245 * @sw_context: Pointer to the software context.
246 * @res: Pointer to the resource.
247 * @p_node On successful return points to a valid pointer to a
248 * struct vmw_resource_val_node, if non-NULL on entry.
250 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
251 struct vmw_resource *res,
252 struct vmw_resource_val_node **p_node)
254 struct vmw_private *dev_priv = res->dev_priv;
255 struct vmw_resource_val_node *node;
256 struct drm_hash_item *hash;
259 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
261 node = container_of(hash, struct vmw_resource_val_node, hash);
262 node->first_usage = false;
263 if (unlikely(p_node != NULL))
268 node = kzalloc(sizeof(*node), GFP_KERNEL);
269 if (unlikely(!node)) {
270 DRM_ERROR("Failed to allocate a resource validation "
275 node->hash.key = (unsigned long) res;
276 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
277 if (unlikely(ret != 0)) {
278 DRM_ERROR("Failed to initialize a resource validation "
283 node->res = vmw_resource_reference(res);
284 node->first_usage = true;
285 if (unlikely(p_node != NULL))
288 if (!dev_priv->has_mob) {
289 list_add_tail(&node->head, &sw_context->resource_list);
293 switch (vmw_res_type(res)) {
294 case vmw_res_context:
295 case vmw_res_dx_context:
296 list_add(&node->head, &sw_context->ctx_resource_list);
297 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
299 case vmw_res_cotable:
300 list_add_tail(&node->head, &sw_context->ctx_resource_list);
303 list_add_tail(&node->head, &sw_context->resource_list);
311 * vmw_view_res_val_add - Add a view and the surface it's pointing to
312 * to the validation list
314 * @sw_context: The software context holding the validation list.
315 * @view: Pointer to the view resource.
317 * Returns 0 if success, negative error code otherwise.
319 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
320 struct vmw_resource *view)
325 * First add the resource the view is pointing to, otherwise
326 * it may be swapped out when the view is validated.
328 ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
332 return vmw_resource_val_add(sw_context, view, NULL);
336 * vmw_view_id_val_add - Look up a view and add it and the surface it's
337 * pointing to to the validation list.
339 * @sw_context: The software context holding the validation list.
340 * @view_type: The view type to look up.
341 * @id: view id of the view.
343 * The view is represented by a view id and the DX context it's created on,
344 * or scheduled for creation on. If there is no DX context set, the function
345 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
347 static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
348 enum vmw_view_type view_type, u32 id)
350 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
351 struct vmw_resource *view;
355 DRM_ERROR("DX Context not set.\n");
359 view = vmw_view_lookup(sw_context->man, view_type, id);
361 return PTR_ERR(view);
363 ret = vmw_view_res_val_add(sw_context, view);
364 vmw_resource_unreference(&view);
370 * vmw_resource_context_res_add - Put resources previously bound to a context on
371 * the validation list
373 * @dev_priv: Pointer to a device private structure
374 * @sw_context: Pointer to a software context used for this command submission
375 * @ctx: Pointer to the context resource
377 * This function puts all resources that were previously bound to @ctx on
378 * the resource validation list. This is part of the context state reemission
380 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
381 struct vmw_sw_context *sw_context,
382 struct vmw_resource *ctx)
384 struct list_head *binding_list;
385 struct vmw_ctx_bindinfo *entry;
387 struct vmw_resource *res;
390 /* Add all cotables to the validation list. */
391 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
392 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
393 res = vmw_context_cotable(ctx, i);
397 ret = vmw_resource_val_add(sw_context, res, NULL);
398 vmw_resource_unreference(&res);
399 if (unlikely(ret != 0))
405 /* Add all resources bound to the context to the validation list */
406 mutex_lock(&dev_priv->binding_mutex);
407 binding_list = vmw_context_binding_list(ctx);
409 list_for_each_entry(entry, binding_list, ctx_list) {
410 /* entry->res is not refcounted */
411 res = vmw_resource_reference_unless_doomed(entry->res);
412 if (unlikely(res == NULL))
415 if (vmw_res_type(entry->res) == vmw_res_view)
416 ret = vmw_view_res_val_add(sw_context, entry->res);
418 ret = vmw_resource_val_add(sw_context, entry->res,
420 vmw_resource_unreference(&res);
421 if (unlikely(ret != 0))
425 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
426 struct vmw_dma_buffer *dx_query_mob;
428 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
430 ret = vmw_bo_to_validate_list(sw_context,
435 mutex_unlock(&dev_priv->binding_mutex);
440 * vmw_resource_relocation_add - Add a relocation to the relocation list
442 * @list: Pointer to head of relocation list.
443 * @res: The resource.
444 * @offset: Offset into the command buffer currently being parsed where the
445 * id that needs fixup is located. Granularity is one byte.
446 * @rel_type: Relocation type.
448 static int vmw_resource_relocation_add(struct list_head *list,
449 const struct vmw_resource *res,
450 unsigned long offset,
451 enum vmw_resource_relocation_type
454 struct vmw_resource_relocation *rel;
456 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
457 if (unlikely(!rel)) {
458 DRM_ERROR("Failed to allocate a resource relocation.\n");
463 rel->offset = offset;
464 rel->rel_type = rel_type;
465 list_add_tail(&rel->head, list);
471 * vmw_resource_relocations_free - Free all relocations on a list
473 * @list: Pointer to the head of the relocation list.
475 static void vmw_resource_relocations_free(struct list_head *list)
477 struct vmw_resource_relocation *rel, *n;
479 list_for_each_entry_safe(rel, n, list, head) {
480 list_del(&rel->head);
486 * vmw_resource_relocations_apply - Apply all relocations on a list
488 * @cb: Pointer to the start of the command buffer bein patch. This need
489 * not be the same buffer as the one being parsed when the relocation
490 * list was built, but the contents must be the same modulo the
492 * @list: Pointer to the head of the relocation list.
494 static void vmw_resource_relocations_apply(uint32_t *cb,
495 struct list_head *list)
497 struct vmw_resource_relocation *rel;
499 /* Validate the struct vmw_resource_relocation member size */
500 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
501 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
503 list_for_each_entry(rel, list, head) {
504 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
505 switch (rel->rel_type) {
506 case vmw_res_rel_normal:
507 *addr = rel->res->id;
509 case vmw_res_rel_nop:
510 *addr = SVGA_3D_CMD_NOP;
513 if (rel->res->id == -1)
514 *addr = SVGA_3D_CMD_NOP;
520 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
521 struct vmw_sw_context *sw_context,
522 SVGA3dCmdHeader *header)
527 static int vmw_cmd_ok(struct vmw_private *dev_priv,
528 struct vmw_sw_context *sw_context,
529 SVGA3dCmdHeader *header)
535 * vmw_bo_to_validate_list - add a bo to a validate list
537 * @sw_context: The software context used for this command submission batch.
538 * @bo: The buffer object to add.
539 * @validate_as_mob: Validate this buffer as a MOB.
540 * @p_val_node: If non-NULL Will be updated with the validate node number
543 * Returns -EINVAL if the limit of number of buffer objects per command
544 * submission is reached.
546 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
547 struct vmw_dma_buffer *vbo,
548 bool validate_as_mob,
549 uint32_t *p_val_node)
552 struct vmw_validate_buffer *vval_buf;
553 struct ttm_validate_buffer *val_buf;
554 struct drm_hash_item *hash;
557 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
559 vval_buf = container_of(hash, struct vmw_validate_buffer,
561 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
562 DRM_ERROR("Inconsistent buffer usage.\n");
565 val_buf = &vval_buf->base;
566 val_node = vval_buf - sw_context->val_bufs;
568 val_node = sw_context->cur_val_buf;
569 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
570 DRM_ERROR("Max number of DMA buffers per submission "
574 vval_buf = &sw_context->val_bufs[val_node];
575 vval_buf->hash.key = (unsigned long) vbo;
576 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
577 if (unlikely(ret != 0)) {
578 DRM_ERROR("Failed to initialize a buffer validation "
582 ++sw_context->cur_val_buf;
583 val_buf = &vval_buf->base;
584 val_buf->bo = ttm_bo_reference(&vbo->base);
585 val_buf->shared = false;
586 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
587 vval_buf->validate_as_mob = validate_as_mob;
591 *p_val_node = val_node;
597 * vmw_resources_reserve - Reserve all resources on the sw_context's
600 * @sw_context: Pointer to the software context.
602 * Note that since vmware's command submission currently is protected by
603 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
604 * since only a single thread at once will attempt this.
606 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
608 struct vmw_resource_val_node *val;
611 list_for_each_entry(val, &sw_context->resource_list, head) {
612 struct vmw_resource *res = val->res;
614 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
615 if (unlikely(ret != 0))
619 struct vmw_dma_buffer *vbo = res->backup;
621 ret = vmw_bo_to_validate_list
623 vmw_resource_needs_backup(res), NULL);
625 if (unlikely(ret != 0))
630 if (sw_context->dx_query_mob) {
631 struct vmw_dma_buffer *expected_dx_query_mob;
633 expected_dx_query_mob =
634 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
635 if (expected_dx_query_mob &&
636 expected_dx_query_mob != sw_context->dx_query_mob) {
645 * vmw_resources_validate - Validate all resources on the sw_context's
648 * @sw_context: Pointer to the software context.
650 * Before this function is called, all resource backup buffers must have
653 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
655 struct vmw_resource_val_node *val;
658 list_for_each_entry(val, &sw_context->resource_list, head) {
659 struct vmw_resource *res = val->res;
660 struct vmw_dma_buffer *backup = res->backup;
662 ret = vmw_resource_validate(res);
663 if (unlikely(ret != 0)) {
664 if (ret != -ERESTARTSYS)
665 DRM_ERROR("Failed to validate resource.\n");
669 /* Check if the resource switched backup buffer */
670 if (backup && res->backup && (backup != res->backup)) {
671 struct vmw_dma_buffer *vbo = res->backup;
673 ret = vmw_bo_to_validate_list
675 vmw_resource_needs_backup(res), NULL);
677 ttm_bo_unreserve(&vbo->base);
686 * vmw_cmd_res_reloc_add - Add a resource to a software context's
687 * relocation- and validation lists.
689 * @dev_priv: Pointer to a struct vmw_private identifying the device.
690 * @sw_context: Pointer to the software context.
691 * @id_loc: Pointer to where the id that needs translation is located.
692 * @res: Valid pointer to a struct vmw_resource.
693 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
694 * used for this resource is returned here.
696 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
697 struct vmw_sw_context *sw_context,
699 struct vmw_resource *res,
700 struct vmw_resource_val_node **p_val)
703 struct vmw_resource_val_node *node;
706 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
708 vmw_ptr_diff(sw_context->buf_start,
711 if (unlikely(ret != 0))
714 ret = vmw_resource_val_add(sw_context, res, &node);
715 if (unlikely(ret != 0))
726 * vmw_cmd_res_check - Check that a resource is present and if so, put it
727 * on the resource validate list unless it's already there.
729 * @dev_priv: Pointer to a device private structure.
730 * @sw_context: Pointer to the software context.
731 * @res_type: Resource type.
732 * @converter: User-space visisble type specific information.
733 * @id_loc: Pointer to the location in the command buffer currently being
734 * parsed from where the user-space resource id handle is located.
735 * @p_val: Pointer to pointer to resource validalidation node. Populated
739 vmw_cmd_res_check(struct vmw_private *dev_priv,
740 struct vmw_sw_context *sw_context,
741 enum vmw_res_type res_type,
742 const struct vmw_user_resource_conv *converter,
744 struct vmw_resource_val_node **p_val)
746 struct vmw_res_cache_entry *rcache =
747 &sw_context->res_cache[res_type];
748 struct vmw_resource *res;
749 struct vmw_resource_val_node *node;
752 if (*id_loc == SVGA3D_INVALID_ID) {
755 if (res_type == vmw_res_context) {
756 DRM_ERROR("Illegal context invalid id.\n");
763 * Fastpath in case of repeated commands referencing the same
767 if (likely(rcache->valid && *id_loc == rcache->handle)) {
768 const struct vmw_resource *res = rcache->res;
770 rcache->node->first_usage = false;
772 *p_val = rcache->node;
774 return vmw_resource_relocation_add
775 (&sw_context->res_relocations, res,
776 vmw_ptr_diff(sw_context->buf_start, id_loc),
780 ret = vmw_user_resource_lookup_handle(dev_priv,
781 sw_context->fp->tfile,
785 if (unlikely(ret != 0)) {
786 DRM_ERROR("Could not find or use resource 0x%08x.\n",
792 rcache->valid = true;
794 rcache->handle = *id_loc;
796 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
798 if (unlikely(ret != 0))
804 vmw_resource_unreference(&res);
808 BUG_ON(sw_context->error_resource != NULL);
809 sw_context->error_resource = res;
815 * vmw_rebind_dx_query - Rebind DX query associated with the context
817 * @ctx_res: context the query belongs to
819 * This function assumes binding_mutex is held.
821 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
823 struct vmw_private *dev_priv = ctx_res->dev_priv;
824 struct vmw_dma_buffer *dx_query_mob;
826 SVGA3dCmdHeader header;
827 SVGA3dCmdDXBindAllQuery body;
831 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
833 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
836 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
839 DRM_ERROR("Failed to rebind queries.\n");
843 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
844 cmd->header.size = sizeof(cmd->body);
845 cmd->body.cid = ctx_res->id;
846 cmd->body.mobid = dx_query_mob->base.mem.start;
847 vmw_fifo_commit(dev_priv, sizeof(*cmd));
849 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
855 * vmw_rebind_contexts - Rebind all resources previously bound to
856 * referenced contexts.
858 * @sw_context: Pointer to the software context.
860 * Rebind context binding points that have been scrubbed because of eviction.
862 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
864 struct vmw_resource_val_node *val;
867 list_for_each_entry(val, &sw_context->resource_list, head) {
868 if (unlikely(!val->staged_bindings))
871 ret = vmw_binding_rebind_all
872 (vmw_context_binding_state(val->res));
873 if (unlikely(ret != 0)) {
874 if (ret != -ERESTARTSYS)
875 DRM_ERROR("Failed to rebind context.\n");
879 ret = vmw_rebind_all_dx_query(val->res);
888 * vmw_view_bindings_add - Add an array of view bindings to a context
889 * binding state tracker.
891 * @sw_context: The execbuf state used for this command.
892 * @view_type: View type for the bindings.
893 * @binding_type: Binding type for the bindings.
894 * @shader_slot: The shader slot to user for the bindings.
895 * @view_ids: Array of view ids to be bound.
896 * @num_views: Number of view ids in @view_ids.
897 * @first_slot: The binding slot to be used for the first view id in @view_ids.
899 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
900 enum vmw_view_type view_type,
901 enum vmw_ctx_binding_type binding_type,
903 uint32 view_ids[], u32 num_views,
906 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
907 struct vmw_cmdbuf_res_manager *man;
912 DRM_ERROR("DX Context not set.\n");
916 man = sw_context->man;
917 for (i = 0; i < num_views; ++i) {
918 struct vmw_ctx_bindinfo_view binding;
919 struct vmw_resource *view = NULL;
921 if (view_ids[i] != SVGA3D_INVALID_ID) {
922 view = vmw_view_lookup(man, view_type, view_ids[i]);
924 DRM_ERROR("View not found.\n");
925 return PTR_ERR(view);
928 ret = vmw_view_res_val_add(sw_context, view);
930 DRM_ERROR("Could not add view to "
931 "validation list.\n");
932 vmw_resource_unreference(&view);
936 binding.bi.ctx = ctx_node->res;
937 binding.bi.res = view;
938 binding.bi.bt = binding_type;
939 binding.shader_slot = shader_slot;
940 binding.slot = first_slot + i;
941 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
942 shader_slot, binding.slot);
944 vmw_resource_unreference(&view);
951 * vmw_cmd_cid_check - Check a command header for valid context information.
953 * @dev_priv: Pointer to a device private structure.
954 * @sw_context: Pointer to the software context.
955 * @header: A command header with an embedded user-space context handle.
957 * Convenience function: Call vmw_cmd_res_check with the user-space context
958 * handle embedded in @header.
960 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
961 struct vmw_sw_context *sw_context,
962 SVGA3dCmdHeader *header)
965 SVGA3dCmdHeader header;
969 cmd = container_of(header, struct vmw_cid_cmd, header);
970 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
971 user_context_converter, &cmd->cid, NULL);
974 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
975 struct vmw_sw_context *sw_context,
976 SVGA3dCmdHeader *header)
979 SVGA3dCmdHeader header;
980 SVGA3dCmdSetRenderTarget body;
982 struct vmw_resource_val_node *ctx_node;
983 struct vmw_resource_val_node *res_node;
986 cmd = container_of(header, struct vmw_sid_cmd, header);
988 if (cmd->body.type >= SVGA3D_RT_MAX) {
989 DRM_ERROR("Illegal render target type %u.\n",
990 (unsigned) cmd->body.type);
994 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
995 user_context_converter, &cmd->body.cid,
997 if (unlikely(ret != 0))
1000 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1001 user_surface_converter,
1002 &cmd->body.target.sid, &res_node);
1003 if (unlikely(ret != 0))
1006 if (dev_priv->has_mob) {
1007 struct vmw_ctx_bindinfo_view binding;
1009 binding.bi.ctx = ctx_node->res;
1010 binding.bi.res = res_node ? res_node->res : NULL;
1011 binding.bi.bt = vmw_ctx_binding_rt;
1012 binding.slot = cmd->body.type;
1013 vmw_binding_add(ctx_node->staged_bindings,
1014 &binding.bi, 0, binding.slot);
1020 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
1021 struct vmw_sw_context *sw_context,
1022 SVGA3dCmdHeader *header)
1024 struct vmw_sid_cmd {
1025 SVGA3dCmdHeader header;
1026 SVGA3dCmdSurfaceCopy body;
1030 cmd = container_of(header, struct vmw_sid_cmd, header);
1032 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1033 user_surface_converter,
1034 &cmd->body.src.sid, NULL);
1038 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1039 user_surface_converter,
1040 &cmd->body.dest.sid, NULL);
1043 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
1044 struct vmw_sw_context *sw_context,
1045 SVGA3dCmdHeader *header)
1048 SVGA3dCmdHeader header;
1049 SVGA3dCmdDXBufferCopy body;
1053 cmd = container_of(header, typeof(*cmd), header);
1054 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1055 user_surface_converter,
1056 &cmd->body.src, NULL);
1060 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1061 user_surface_converter,
1062 &cmd->body.dest, NULL);
1065 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1066 struct vmw_sw_context *sw_context,
1067 SVGA3dCmdHeader *header)
1070 SVGA3dCmdHeader header;
1071 SVGA3dCmdDXPredCopyRegion body;
1075 cmd = container_of(header, typeof(*cmd), header);
1076 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1077 user_surface_converter,
1078 &cmd->body.srcSid, NULL);
1082 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1083 user_surface_converter,
1084 &cmd->body.dstSid, NULL);
1087 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1088 struct vmw_sw_context *sw_context,
1089 SVGA3dCmdHeader *header)
1091 struct vmw_sid_cmd {
1092 SVGA3dCmdHeader header;
1093 SVGA3dCmdSurfaceStretchBlt body;
1097 cmd = container_of(header, struct vmw_sid_cmd, header);
1098 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1099 user_surface_converter,
1100 &cmd->body.src.sid, NULL);
1101 if (unlikely(ret != 0))
1103 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1104 user_surface_converter,
1105 &cmd->body.dest.sid, NULL);
1108 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1109 struct vmw_sw_context *sw_context,
1110 SVGA3dCmdHeader *header)
1112 struct vmw_sid_cmd {
1113 SVGA3dCmdHeader header;
1114 SVGA3dCmdBlitSurfaceToScreen body;
1117 cmd = container_of(header, struct vmw_sid_cmd, header);
1119 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1120 user_surface_converter,
1121 &cmd->body.srcImage.sid, NULL);
1124 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1125 struct vmw_sw_context *sw_context,
1126 SVGA3dCmdHeader *header)
1128 struct vmw_sid_cmd {
1129 SVGA3dCmdHeader header;
1130 SVGA3dCmdPresent body;
1134 cmd = container_of(header, struct vmw_sid_cmd, header);
1136 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1137 user_surface_converter, &cmd->body.sid,
1142 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1144 * @dev_priv: The device private structure.
1145 * @new_query_bo: The new buffer holding query results.
1146 * @sw_context: The software context used for this command submission.
1148 * This function checks whether @new_query_bo is suitable for holding
1149 * query results, and if another buffer currently is pinned for query
1150 * results. If so, the function prepares the state of @sw_context for
1151 * switching pinned buffers after successful submission of the current
1154 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1155 struct vmw_dma_buffer *new_query_bo,
1156 struct vmw_sw_context *sw_context)
1158 struct vmw_res_cache_entry *ctx_entry =
1159 &sw_context->res_cache[vmw_res_context];
1162 BUG_ON(!ctx_entry->valid);
1163 sw_context->last_query_ctx = ctx_entry->res;
1165 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1167 if (unlikely(new_query_bo->base.num_pages > 4)) {
1168 DRM_ERROR("Query buffer too large.\n");
1172 if (unlikely(sw_context->cur_query_bo != NULL)) {
1173 sw_context->needs_post_query_barrier = true;
1174 ret = vmw_bo_to_validate_list(sw_context,
1175 sw_context->cur_query_bo,
1176 dev_priv->has_mob, NULL);
1177 if (unlikely(ret != 0))
1180 sw_context->cur_query_bo = new_query_bo;
1182 ret = vmw_bo_to_validate_list(sw_context,
1183 dev_priv->dummy_query_bo,
1184 dev_priv->has_mob, NULL);
1185 if (unlikely(ret != 0))
1195 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1197 * @dev_priv: The device private structure.
1198 * @sw_context: The software context used for this command submission batch.
1200 * This function will check if we're switching query buffers, and will then,
1201 * issue a dummy occlusion query wait used as a query barrier. When the fence
1202 * object following that query wait has signaled, we are sure that all
1203 * preceding queries have finished, and the old query buffer can be unpinned.
1204 * However, since both the new query buffer and the old one are fenced with
1205 * that fence, we can do an asynchronus unpin now, and be sure that the
1206 * old query buffer won't be moved until the fence has signaled.
1208 * As mentioned above, both the new - and old query buffers need to be fenced
1209 * using a sequence emitted *after* calling this function.
1211 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1212 struct vmw_sw_context *sw_context)
1215 * The validate list should still hold references to all
1219 if (sw_context->needs_post_query_barrier) {
1220 struct vmw_res_cache_entry *ctx_entry =
1221 &sw_context->res_cache[vmw_res_context];
1222 struct vmw_resource *ctx;
1225 BUG_ON(!ctx_entry->valid);
1226 ctx = ctx_entry->res;
1228 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1230 if (unlikely(ret != 0))
1231 DRM_ERROR("Out of fifo space for dummy query.\n");
1234 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1235 if (dev_priv->pinned_bo) {
1236 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1237 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
1240 if (!sw_context->needs_post_query_barrier) {
1241 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1244 * We pin also the dummy_query_bo buffer so that we
1245 * don't need to validate it when emitting
1246 * dummy queries in context destroy paths.
1249 if (!dev_priv->dummy_query_bo_pinned) {
1250 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1252 dev_priv->dummy_query_bo_pinned = true;
1255 BUG_ON(sw_context->last_query_ctx == NULL);
1256 dev_priv->query_cid = sw_context->last_query_ctx->id;
1257 dev_priv->query_cid_valid = true;
1258 dev_priv->pinned_bo =
1259 vmw_dmabuf_reference(sw_context->cur_query_bo);
1265 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1266 * handle to a MOB id.
1268 * @dev_priv: Pointer to a device private structure.
1269 * @sw_context: The software context used for this command batch validation.
1270 * @id: Pointer to the user-space handle to be translated.
1271 * @vmw_bo_p: Points to a location that, on successful return will carry
1272 * a reference-counted pointer to the DMA buffer identified by the
1273 * user-space handle in @id.
1275 * This function saves information needed to translate a user-space buffer
1276 * handle to a MOB id. The translation does not take place immediately, but
1277 * during a call to vmw_apply_relocations(). This function builds a relocation
1278 * list and a list of buffers to validate. The former needs to be freed using
1279 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1280 * needs to be freed using vmw_clear_validations.
1282 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1283 struct vmw_sw_context *sw_context,
1285 struct vmw_dma_buffer **vmw_bo_p)
1287 struct vmw_dma_buffer *vmw_bo = NULL;
1288 uint32_t handle = *id;
1289 struct vmw_relocation *reloc;
1292 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1294 if (unlikely(ret != 0)) {
1295 DRM_ERROR("Could not find or use MOB buffer.\n");
1300 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1301 DRM_ERROR("Max number relocations per submission"
1307 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1308 reloc->mob_loc = id;
1309 reloc->location = NULL;
1311 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
1312 if (unlikely(ret != 0))
1319 vmw_dmabuf_unreference(&vmw_bo);
1325 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1326 * handle to a valid SVGAGuestPtr
1328 * @dev_priv: Pointer to a device private structure.
1329 * @sw_context: The software context used for this command batch validation.
1330 * @ptr: Pointer to the user-space handle to be translated.
1331 * @vmw_bo_p: Points to a location that, on successful return will carry
1332 * a reference-counted pointer to the DMA buffer identified by the
1333 * user-space handle in @id.
1335 * This function saves information needed to translate a user-space buffer
1336 * handle to a valid SVGAGuestPtr. The translation does not take place
1337 * immediately, but during a call to vmw_apply_relocations().
1338 * This function builds a relocation list and a list of buffers to validate.
1339 * The former needs to be freed using either vmw_apply_relocations() or
1340 * vmw_free_relocations(). The latter needs to be freed using
1341 * vmw_clear_validations.
1343 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1344 struct vmw_sw_context *sw_context,
1346 struct vmw_dma_buffer **vmw_bo_p)
1348 struct vmw_dma_buffer *vmw_bo = NULL;
1349 uint32_t handle = ptr->gmrId;
1350 struct vmw_relocation *reloc;
1353 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1355 if (unlikely(ret != 0)) {
1356 DRM_ERROR("Could not find or use GMR region.\n");
1361 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1362 DRM_ERROR("Max number relocations per submission"
1368 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1369 reloc->location = ptr;
1371 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
1372 if (unlikely(ret != 0))
1379 vmw_dmabuf_unreference(&vmw_bo);
1387 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1389 * @dev_priv: Pointer to a device private struct.
1390 * @sw_context: The software context used for this command submission.
1391 * @header: Pointer to the command header in the command stream.
1393 * This function adds the new query into the query COTABLE
1395 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1396 struct vmw_sw_context *sw_context,
1397 SVGA3dCmdHeader *header)
1399 struct vmw_dx_define_query_cmd {
1400 SVGA3dCmdHeader header;
1401 SVGA3dCmdDXDefineQuery q;
1405 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1406 struct vmw_resource *cotable_res;
1409 if (ctx_node == NULL) {
1410 DRM_ERROR("DX Context not set for query.\n");
1414 cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1416 if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
1417 cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1420 cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1421 ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1422 vmw_resource_unreference(&cotable_res);
1430 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1432 * @dev_priv: Pointer to a device private struct.
1433 * @sw_context: The software context used for this command submission.
1434 * @header: Pointer to the command header in the command stream.
1436 * The query bind operation will eventually associate the query ID
1437 * with its backing MOB. In this function, we take the user mode
1438 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1439 * kernel mode equivalent.
1441 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1442 struct vmw_sw_context *sw_context,
1443 SVGA3dCmdHeader *header)
1445 struct vmw_dx_bind_query_cmd {
1446 SVGA3dCmdHeader header;
1447 SVGA3dCmdDXBindQuery q;
1450 struct vmw_dma_buffer *vmw_bo;
1454 cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1457 * Look up the buffer pointed to by q.mobid, put it on the relocation
1458 * list so its kernel mode MOB ID can be filled in later
1460 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1466 sw_context->dx_query_mob = vmw_bo;
1467 sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1469 vmw_dmabuf_unreference(&vmw_bo);
1477 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
1479 * @dev_priv: Pointer to a device private struct.
1480 * @sw_context: The software context used for this command submission.
1481 * @header: Pointer to the command header in the command stream.
1483 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1484 struct vmw_sw_context *sw_context,
1485 SVGA3dCmdHeader *header)
1487 struct vmw_begin_gb_query_cmd {
1488 SVGA3dCmdHeader header;
1489 SVGA3dCmdBeginGBQuery q;
1492 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1495 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1496 user_context_converter, &cmd->q.cid,
1501 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1503 * @dev_priv: Pointer to a device private struct.
1504 * @sw_context: The software context used for this command submission.
1505 * @header: Pointer to the command header in the command stream.
1507 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1508 struct vmw_sw_context *sw_context,
1509 SVGA3dCmdHeader *header)
1511 struct vmw_begin_query_cmd {
1512 SVGA3dCmdHeader header;
1513 SVGA3dCmdBeginQuery q;
1516 cmd = container_of(header, struct vmw_begin_query_cmd,
1519 if (unlikely(dev_priv->has_mob)) {
1521 SVGA3dCmdHeader header;
1522 SVGA3dCmdBeginGBQuery q;
1525 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1527 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1528 gb_cmd.header.size = cmd->header.size;
1529 gb_cmd.q.cid = cmd->q.cid;
1530 gb_cmd.q.type = cmd->q.type;
1532 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1533 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1536 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1537 user_context_converter, &cmd->q.cid,
1542 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1544 * @dev_priv: Pointer to a device private struct.
1545 * @sw_context: The software context used for this command submission.
1546 * @header: Pointer to the command header in the command stream.
1548 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1549 struct vmw_sw_context *sw_context,
1550 SVGA3dCmdHeader *header)
1552 struct vmw_dma_buffer *vmw_bo;
1553 struct vmw_query_cmd {
1554 SVGA3dCmdHeader header;
1555 SVGA3dCmdEndGBQuery q;
1559 cmd = container_of(header, struct vmw_query_cmd, header);
1560 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1561 if (unlikely(ret != 0))
1564 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1567 if (unlikely(ret != 0))
1570 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1572 vmw_dmabuf_unreference(&vmw_bo);
1577 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1579 * @dev_priv: Pointer to a device private struct.
1580 * @sw_context: The software context used for this command submission.
1581 * @header: Pointer to the command header in the command stream.
1583 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1584 struct vmw_sw_context *sw_context,
1585 SVGA3dCmdHeader *header)
1587 struct vmw_dma_buffer *vmw_bo;
1588 struct vmw_query_cmd {
1589 SVGA3dCmdHeader header;
1590 SVGA3dCmdEndQuery q;
1594 cmd = container_of(header, struct vmw_query_cmd, header);
1595 if (dev_priv->has_mob) {
1597 SVGA3dCmdHeader header;
1598 SVGA3dCmdEndGBQuery q;
1601 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1603 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1604 gb_cmd.header.size = cmd->header.size;
1605 gb_cmd.q.cid = cmd->q.cid;
1606 gb_cmd.q.type = cmd->q.type;
1607 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1608 gb_cmd.q.offset = cmd->q.guestResult.offset;
1610 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1611 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1614 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1615 if (unlikely(ret != 0))
1618 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1619 &cmd->q.guestResult,
1621 if (unlikely(ret != 0))
1624 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1626 vmw_dmabuf_unreference(&vmw_bo);
1631 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1633 * @dev_priv: Pointer to a device private struct.
1634 * @sw_context: The software context used for this command submission.
1635 * @header: Pointer to the command header in the command stream.
1637 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1638 struct vmw_sw_context *sw_context,
1639 SVGA3dCmdHeader *header)
1641 struct vmw_dma_buffer *vmw_bo;
1642 struct vmw_query_cmd {
1643 SVGA3dCmdHeader header;
1644 SVGA3dCmdWaitForGBQuery q;
1648 cmd = container_of(header, struct vmw_query_cmd, header);
1649 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1650 if (unlikely(ret != 0))
1653 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1656 if (unlikely(ret != 0))
1659 vmw_dmabuf_unreference(&vmw_bo);
1664 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1666 * @dev_priv: Pointer to a device private struct.
1667 * @sw_context: The software context used for this command submission.
1668 * @header: Pointer to the command header in the command stream.
1670 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1671 struct vmw_sw_context *sw_context,
1672 SVGA3dCmdHeader *header)
1674 struct vmw_dma_buffer *vmw_bo;
1675 struct vmw_query_cmd {
1676 SVGA3dCmdHeader header;
1677 SVGA3dCmdWaitForQuery q;
1681 cmd = container_of(header, struct vmw_query_cmd, header);
1682 if (dev_priv->has_mob) {
1684 SVGA3dCmdHeader header;
1685 SVGA3dCmdWaitForGBQuery q;
1688 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1690 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1691 gb_cmd.header.size = cmd->header.size;
1692 gb_cmd.q.cid = cmd->q.cid;
1693 gb_cmd.q.type = cmd->q.type;
1694 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1695 gb_cmd.q.offset = cmd->q.guestResult.offset;
1697 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1698 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1701 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1702 if (unlikely(ret != 0))
1705 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1706 &cmd->q.guestResult,
1708 if (unlikely(ret != 0))
1711 vmw_dmabuf_unreference(&vmw_bo);
1715 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1716 struct vmw_sw_context *sw_context,
1717 SVGA3dCmdHeader *header)
1719 struct vmw_dma_buffer *vmw_bo = NULL;
1720 struct vmw_surface *srf = NULL;
1721 struct vmw_dma_cmd {
1722 SVGA3dCmdHeader header;
1723 SVGA3dCmdSurfaceDMA dma;
1726 SVGA3dCmdSurfaceDMASuffix *suffix;
1729 cmd = container_of(header, struct vmw_dma_cmd, header);
1730 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1731 header->size - sizeof(*suffix));
1733 /* Make sure device and verifier stays in sync. */
1734 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1735 DRM_ERROR("Invalid DMA suffix size.\n");
1739 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1740 &cmd->dma.guest.ptr,
1742 if (unlikely(ret != 0))
1745 /* Make sure DMA doesn't cross BO boundaries. */
1746 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1747 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1748 DRM_ERROR("Invalid DMA offset.\n");
1752 bo_size -= cmd->dma.guest.ptr.offset;
1753 if (unlikely(suffix->maximumOffset > bo_size))
1754 suffix->maximumOffset = bo_size;
1756 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1757 user_surface_converter, &cmd->dma.host.sid,
1759 if (unlikely(ret != 0)) {
1760 if (unlikely(ret != -ERESTARTSYS))
1761 DRM_ERROR("could not find surface for DMA.\n");
1762 goto out_no_surface;
1765 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1767 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1771 vmw_dmabuf_unreference(&vmw_bo);
1775 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1776 struct vmw_sw_context *sw_context,
1777 SVGA3dCmdHeader *header)
1779 struct vmw_draw_cmd {
1780 SVGA3dCmdHeader header;
1781 SVGA3dCmdDrawPrimitives body;
1783 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1784 (unsigned long)header + sizeof(*cmd));
1785 SVGA3dPrimitiveRange *range;
1790 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1791 if (unlikely(ret != 0))
1794 cmd = container_of(header, struct vmw_draw_cmd, header);
1795 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1797 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1798 DRM_ERROR("Illegal number of vertex declarations.\n");
1802 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1803 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1804 user_surface_converter,
1805 &decl->array.surfaceId, NULL);
1806 if (unlikely(ret != 0))
1810 maxnum = (header->size - sizeof(cmd->body) -
1811 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1812 if (unlikely(cmd->body.numRanges > maxnum)) {
1813 DRM_ERROR("Illegal number of index ranges.\n");
1817 range = (SVGA3dPrimitiveRange *) decl;
1818 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1819 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1820 user_surface_converter,
1821 &range->indexArray.surfaceId, NULL);
1822 if (unlikely(ret != 0))
1829 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1830 struct vmw_sw_context *sw_context,
1831 SVGA3dCmdHeader *header)
1833 struct vmw_tex_state_cmd {
1834 SVGA3dCmdHeader header;
1835 SVGA3dCmdSetTextureState state;
1838 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1839 ((unsigned long) header + header->size + sizeof(header));
1840 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1841 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1842 struct vmw_resource_val_node *ctx_node;
1843 struct vmw_resource_val_node *res_node;
1846 cmd = container_of(header, struct vmw_tex_state_cmd,
1849 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1850 user_context_converter, &cmd->state.cid,
1852 if (unlikely(ret != 0))
1855 for (; cur_state < last_state; ++cur_state) {
1856 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1859 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1860 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1861 (unsigned) cur_state->stage);
1865 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1866 user_surface_converter,
1867 &cur_state->value, &res_node);
1868 if (unlikely(ret != 0))
1871 if (dev_priv->has_mob) {
1872 struct vmw_ctx_bindinfo_tex binding;
1874 binding.bi.ctx = ctx_node->res;
1875 binding.bi.res = res_node ? res_node->res : NULL;
1876 binding.bi.bt = vmw_ctx_binding_tex;
1877 binding.texture_stage = cur_state->stage;
1878 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1879 0, binding.texture_stage);
1886 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1887 struct vmw_sw_context *sw_context,
1890 struct vmw_dma_buffer *vmw_bo;
1895 SVGAFifoCmdDefineGMRFB body;
1898 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1901 if (unlikely(ret != 0))
1904 vmw_dmabuf_unreference(&vmw_bo);
1911 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1914 * @dev_priv: Pointer to a device private struct.
1915 * @sw_context: The software context being used for this batch.
1916 * @val_node: The validation node representing the resource.
1917 * @buf_id: Pointer to the user-space backup buffer handle in the command
1919 * @backup_offset: Offset of backup into MOB.
1921 * This function prepares for registering a switch of backup buffers
1922 * in the resource metadata just prior to unreserving. It's basically a wrapper
1923 * around vmw_cmd_res_switch_backup with a different interface.
1925 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1926 struct vmw_sw_context *sw_context,
1927 struct vmw_resource_val_node *val_node,
1929 unsigned long backup_offset)
1931 struct vmw_dma_buffer *dma_buf;
1934 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1938 val_node->switching_backup = true;
1939 if (val_node->first_usage)
1940 val_node->no_buffer_needed = true;
1942 vmw_dmabuf_unreference(&val_node->new_backup);
1943 val_node->new_backup = dma_buf;
1944 val_node->new_backup_offset = backup_offset;
1951 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1953 * @dev_priv: Pointer to a device private struct.
1954 * @sw_context: The software context being used for this batch.
1955 * @res_type: The resource type.
1956 * @converter: Information about user-space binding for this resource type.
1957 * @res_id: Pointer to the user-space resource handle in the command stream.
1958 * @buf_id: Pointer to the user-space backup buffer handle in the command
1960 * @backup_offset: Offset of backup into MOB.
1962 * This function prepares for registering a switch of backup buffers
1963 * in the resource metadata just prior to unreserving. It's basically a wrapper
1964 * around vmw_cmd_res_switch_backup with a different interface.
1966 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1967 struct vmw_sw_context *sw_context,
1968 enum vmw_res_type res_type,
1969 const struct vmw_user_resource_conv
1973 unsigned long backup_offset)
1975 struct vmw_resource_val_node *val_node;
1978 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1979 converter, res_id, &val_node);
1983 return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1984 buf_id, backup_offset);
1988 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1991 * @dev_priv: Pointer to a device private struct.
1992 * @sw_context: The software context being used for this batch.
1993 * @header: Pointer to the command header in the command stream.
1995 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1996 struct vmw_sw_context *sw_context,
1997 SVGA3dCmdHeader *header)
1999 struct vmw_bind_gb_surface_cmd {
2000 SVGA3dCmdHeader header;
2001 SVGA3dCmdBindGBSurface body;
2004 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
2006 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
2007 user_surface_converter,
2008 &cmd->body.sid, &cmd->body.mobid,
2013 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
2016 * @dev_priv: Pointer to a device private struct.
2017 * @sw_context: The software context being used for this batch.
2018 * @header: Pointer to the command header in the command stream.
2020 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
2021 struct vmw_sw_context *sw_context,
2022 SVGA3dCmdHeader *header)
2024 struct vmw_gb_surface_cmd {
2025 SVGA3dCmdHeader header;
2026 SVGA3dCmdUpdateGBImage body;
2029 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2031 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2032 user_surface_converter,
2033 &cmd->body.image.sid, NULL);
2037 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
2040 * @dev_priv: Pointer to a device private struct.
2041 * @sw_context: The software context being used for this batch.
2042 * @header: Pointer to the command header in the command stream.
2044 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
2045 struct vmw_sw_context *sw_context,
2046 SVGA3dCmdHeader *header)
2048 struct vmw_gb_surface_cmd {
2049 SVGA3dCmdHeader header;
2050 SVGA3dCmdUpdateGBSurface body;
2053 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2055 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2056 user_surface_converter,
2057 &cmd->body.sid, NULL);
2061 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2064 * @dev_priv: Pointer to a device private struct.
2065 * @sw_context: The software context being used for this batch.
2066 * @header: Pointer to the command header in the command stream.
2068 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2069 struct vmw_sw_context *sw_context,
2070 SVGA3dCmdHeader *header)
2072 struct vmw_gb_surface_cmd {
2073 SVGA3dCmdHeader header;
2074 SVGA3dCmdReadbackGBImage body;
2077 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2079 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2080 user_surface_converter,
2081 &cmd->body.image.sid, NULL);
2085 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2088 * @dev_priv: Pointer to a device private struct.
2089 * @sw_context: The software context being used for this batch.
2090 * @header: Pointer to the command header in the command stream.
2092 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2093 struct vmw_sw_context *sw_context,
2094 SVGA3dCmdHeader *header)
2096 struct vmw_gb_surface_cmd {
2097 SVGA3dCmdHeader header;
2098 SVGA3dCmdReadbackGBSurface body;
2101 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2103 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2104 user_surface_converter,
2105 &cmd->body.sid, NULL);
2109 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2112 * @dev_priv: Pointer to a device private struct.
2113 * @sw_context: The software context being used for this batch.
2114 * @header: Pointer to the command header in the command stream.
2116 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2117 struct vmw_sw_context *sw_context,
2118 SVGA3dCmdHeader *header)
2120 struct vmw_gb_surface_cmd {
2121 SVGA3dCmdHeader header;
2122 SVGA3dCmdInvalidateGBImage body;
2125 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2127 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2128 user_surface_converter,
2129 &cmd->body.image.sid, NULL);
2133 * vmw_cmd_invalidate_gb_surface - Validate an
2134 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2136 * @dev_priv: Pointer to a device private struct.
2137 * @sw_context: The software context being used for this batch.
2138 * @header: Pointer to the command header in the command stream.
2140 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2141 struct vmw_sw_context *sw_context,
2142 SVGA3dCmdHeader *header)
2144 struct vmw_gb_surface_cmd {
2145 SVGA3dCmdHeader header;
2146 SVGA3dCmdInvalidateGBSurface body;
2149 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2151 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2152 user_surface_converter,
2153 &cmd->body.sid, NULL);
2158 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2161 * @dev_priv: Pointer to a device private struct.
2162 * @sw_context: The software context being used for this batch.
2163 * @header: Pointer to the command header in the command stream.
2165 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2166 struct vmw_sw_context *sw_context,
2167 SVGA3dCmdHeader *header)
2169 struct vmw_shader_define_cmd {
2170 SVGA3dCmdHeader header;
2171 SVGA3dCmdDefineShader body;
2175 struct vmw_resource_val_node *val;
2177 cmd = container_of(header, struct vmw_shader_define_cmd,
2180 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2181 user_context_converter, &cmd->body.cid,
2183 if (unlikely(ret != 0))
2186 if (unlikely(!dev_priv->has_mob))
2189 size = cmd->header.size - sizeof(cmd->body);
2190 ret = vmw_compat_shader_add(dev_priv,
2191 vmw_context_res_man(val->res),
2192 cmd->body.shid, cmd + 1,
2193 cmd->body.type, size,
2194 &sw_context->staged_cmd_res);
2195 if (unlikely(ret != 0))
2198 return vmw_resource_relocation_add(&sw_context->res_relocations,
2200 vmw_ptr_diff(sw_context->buf_start,
2206 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2209 * @dev_priv: Pointer to a device private struct.
2210 * @sw_context: The software context being used for this batch.
2211 * @header: Pointer to the command header in the command stream.
2213 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2214 struct vmw_sw_context *sw_context,
2215 SVGA3dCmdHeader *header)
2217 struct vmw_shader_destroy_cmd {
2218 SVGA3dCmdHeader header;
2219 SVGA3dCmdDestroyShader body;
2222 struct vmw_resource_val_node *val;
2224 cmd = container_of(header, struct vmw_shader_destroy_cmd,
2227 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2228 user_context_converter, &cmd->body.cid,
2230 if (unlikely(ret != 0))
2233 if (unlikely(!dev_priv->has_mob))
2236 ret = vmw_shader_remove(vmw_context_res_man(val->res),
2239 &sw_context->staged_cmd_res);
2240 if (unlikely(ret != 0))
2243 return vmw_resource_relocation_add(&sw_context->res_relocations,
2245 vmw_ptr_diff(sw_context->buf_start,
2251 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2254 * @dev_priv: Pointer to a device private struct.
2255 * @sw_context: The software context being used for this batch.
2256 * @header: Pointer to the command header in the command stream.
2258 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2259 struct vmw_sw_context *sw_context,
2260 SVGA3dCmdHeader *header)
2262 struct vmw_set_shader_cmd {
2263 SVGA3dCmdHeader header;
2264 SVGA3dCmdSetShader body;
2266 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2267 struct vmw_ctx_bindinfo_shader binding;
2268 struct vmw_resource *res = NULL;
2271 cmd = container_of(header, struct vmw_set_shader_cmd,
2274 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2275 DRM_ERROR("Illegal shader type %u.\n",
2276 (unsigned) cmd->body.type);
2280 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2281 user_context_converter, &cmd->body.cid,
2283 if (unlikely(ret != 0))
2286 if (!dev_priv->has_mob)
2289 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2290 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2295 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2296 &cmd->body.shid, res,
2298 vmw_resource_unreference(&res);
2299 if (unlikely(ret != 0))
2305 ret = vmw_cmd_res_check(dev_priv, sw_context,
2307 user_shader_converter,
2308 &cmd->body.shid, &res_node);
2309 if (unlikely(ret != 0))
2313 binding.bi.ctx = ctx_node->res;
2314 binding.bi.res = res_node ? res_node->res : NULL;
2315 binding.bi.bt = vmw_ctx_binding_shader;
2316 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2317 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2318 binding.shader_slot, 0);
2323 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2326 * @dev_priv: Pointer to a device private struct.
2327 * @sw_context: The software context being used for this batch.
2328 * @header: Pointer to the command header in the command stream.
2330 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2331 struct vmw_sw_context *sw_context,
2332 SVGA3dCmdHeader *header)
2334 struct vmw_set_shader_const_cmd {
2335 SVGA3dCmdHeader header;
2336 SVGA3dCmdSetShaderConst body;
2340 cmd = container_of(header, struct vmw_set_shader_const_cmd,
2343 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2344 user_context_converter, &cmd->body.cid,
2346 if (unlikely(ret != 0))
2349 if (dev_priv->has_mob)
2350 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2356 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2359 * @dev_priv: Pointer to a device private struct.
2360 * @sw_context: The software context being used for this batch.
2361 * @header: Pointer to the command header in the command stream.
2363 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2364 struct vmw_sw_context *sw_context,
2365 SVGA3dCmdHeader *header)
2367 struct vmw_bind_gb_shader_cmd {
2368 SVGA3dCmdHeader header;
2369 SVGA3dCmdBindGBShader body;
2372 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2375 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2376 user_shader_converter,
2377 &cmd->body.shid, &cmd->body.mobid,
2378 cmd->body.offsetInBytes);
2382 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2383 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2385 * @dev_priv: Pointer to a device private struct.
2386 * @sw_context: The software context being used for this batch.
2387 * @header: Pointer to the command header in the command stream.
2390 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2391 struct vmw_sw_context *sw_context,
2392 SVGA3dCmdHeader *header)
2395 SVGA3dCmdHeader header;
2396 SVGA3dCmdDXSetSingleConstantBuffer body;
2398 struct vmw_resource_val_node *res_node = NULL;
2399 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2400 struct vmw_ctx_bindinfo_cb binding;
2403 if (unlikely(ctx_node == NULL)) {
2404 DRM_ERROR("DX Context not set.\n");
2408 cmd = container_of(header, typeof(*cmd), header);
2409 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2410 user_surface_converter,
2411 &cmd->body.sid, &res_node);
2412 if (unlikely(ret != 0))
2415 binding.bi.ctx = ctx_node->res;
2416 binding.bi.res = res_node ? res_node->res : NULL;
2417 binding.bi.bt = vmw_ctx_binding_cb;
2418 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2419 binding.offset = cmd->body.offsetInBytes;
2420 binding.size = cmd->body.sizeInBytes;
2421 binding.slot = cmd->body.slot;
2423 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2424 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2425 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2426 (unsigned) cmd->body.type,
2427 (unsigned) binding.slot);
2431 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2432 binding.shader_slot, binding.slot);
2438 * vmw_cmd_dx_set_shader_res - Validate an
2439 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2441 * @dev_priv: Pointer to a device private struct.
2442 * @sw_context: The software context being used for this batch.
2443 * @header: Pointer to the command header in the command stream.
2445 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2446 struct vmw_sw_context *sw_context,
2447 SVGA3dCmdHeader *header)
2450 SVGA3dCmdHeader header;
2451 SVGA3dCmdDXSetShaderResources body;
2452 } *cmd = container_of(header, typeof(*cmd), header);
2453 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2454 sizeof(SVGA3dShaderResourceViewId);
2456 if ((u64) cmd->body.startView + (u64) num_sr_view >
2457 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2458 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2459 DRM_ERROR("Invalid shader binding.\n");
2463 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2465 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2466 (void *) &cmd[1], num_sr_view,
2467 cmd->body.startView);
2471 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2474 * @dev_priv: Pointer to a device private struct.
2475 * @sw_context: The software context being used for this batch.
2476 * @header: Pointer to the command header in the command stream.
2478 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2479 struct vmw_sw_context *sw_context,
2480 SVGA3dCmdHeader *header)
2483 SVGA3dCmdHeader header;
2484 SVGA3dCmdDXSetShader body;
2486 struct vmw_resource *res = NULL;
2487 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2488 struct vmw_ctx_bindinfo_shader binding;
2491 if (unlikely(ctx_node == NULL)) {
2492 DRM_ERROR("DX Context not set.\n");
2496 cmd = container_of(header, typeof(*cmd), header);
2498 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
2499 cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2500 DRM_ERROR("Illegal shader type %u.\n",
2501 (unsigned) cmd->body.type);
2505 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2506 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2508 DRM_ERROR("Could not find shader for binding.\n");
2509 return PTR_ERR(res);
2512 ret = vmw_resource_val_add(sw_context, res, NULL);
2517 binding.bi.ctx = ctx_node->res;
2518 binding.bi.res = res;
2519 binding.bi.bt = vmw_ctx_binding_dx_shader;
2520 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2522 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2523 binding.shader_slot, 0);
2526 vmw_resource_unreference(&res);
2532 * vmw_cmd_dx_set_vertex_buffers - Validates an
2533 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2535 * @dev_priv: Pointer to a device private struct.
2536 * @sw_context: The software context being used for this batch.
2537 * @header: Pointer to the command header in the command stream.
2539 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2540 struct vmw_sw_context *sw_context,
2541 SVGA3dCmdHeader *header)
2543 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2544 struct vmw_ctx_bindinfo_vb binding;
2545 struct vmw_resource_val_node *res_node;
2547 SVGA3dCmdHeader header;
2548 SVGA3dCmdDXSetVertexBuffers body;
2549 SVGA3dVertexBuffer buf[];
2553 if (unlikely(ctx_node == NULL)) {
2554 DRM_ERROR("DX Context not set.\n");
2558 cmd = container_of(header, typeof(*cmd), header);
2559 num = (cmd->header.size - sizeof(cmd->body)) /
2560 sizeof(SVGA3dVertexBuffer);
2561 if ((u64)num + (u64)cmd->body.startBuffer >
2562 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2563 DRM_ERROR("Invalid number of vertex buffers.\n");
2567 for (i = 0; i < num; i++) {
2568 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2569 user_surface_converter,
2570 &cmd->buf[i].sid, &res_node);
2571 if (unlikely(ret != 0))
2574 binding.bi.ctx = ctx_node->res;
2575 binding.bi.bt = vmw_ctx_binding_vb;
2576 binding.bi.res = ((res_node) ? res_node->res : NULL);
2577 binding.offset = cmd->buf[i].offset;
2578 binding.stride = cmd->buf[i].stride;
2579 binding.slot = i + cmd->body.startBuffer;
2581 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2589 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2590 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2592 * @dev_priv: Pointer to a device private struct.
2593 * @sw_context: The software context being used for this batch.
2594 * @header: Pointer to the command header in the command stream.
2596 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2597 struct vmw_sw_context *sw_context,
2598 SVGA3dCmdHeader *header)
2600 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2601 struct vmw_ctx_bindinfo_ib binding;
2602 struct vmw_resource_val_node *res_node;
2604 SVGA3dCmdHeader header;
2605 SVGA3dCmdDXSetIndexBuffer body;
2609 if (unlikely(ctx_node == NULL)) {
2610 DRM_ERROR("DX Context not set.\n");
2614 cmd = container_of(header, typeof(*cmd), header);
2615 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2616 user_surface_converter,
2617 &cmd->body.sid, &res_node);
2618 if (unlikely(ret != 0))
2621 binding.bi.ctx = ctx_node->res;
2622 binding.bi.res = ((res_node) ? res_node->res : NULL);
2623 binding.bi.bt = vmw_ctx_binding_ib;
2624 binding.offset = cmd->body.offset;
2625 binding.format = cmd->body.format;
2627 vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2633 * vmw_cmd_dx_set_rendertarget - Validate an
2634 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2636 * @dev_priv: Pointer to a device private struct.
2637 * @sw_context: The software context being used for this batch.
2638 * @header: Pointer to the command header in the command stream.
2640 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2641 struct vmw_sw_context *sw_context,
2642 SVGA3dCmdHeader *header)
2645 SVGA3dCmdHeader header;
2646 SVGA3dCmdDXSetRenderTargets body;
2647 } *cmd = container_of(header, typeof(*cmd), header);
2649 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2650 sizeof(SVGA3dRenderTargetViewId);
2652 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2653 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2657 ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2658 vmw_ctx_binding_ds, 0,
2659 &cmd->body.depthStencilViewId, 1, 0);
2663 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2664 vmw_ctx_binding_dx_rt, 0,
2665 (void *)&cmd[1], num_rt_view, 0);
2669 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2670 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2672 * @dev_priv: Pointer to a device private struct.
2673 * @sw_context: The software context being used for this batch.
2674 * @header: Pointer to the command header in the command stream.
2676 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2677 struct vmw_sw_context *sw_context,
2678 SVGA3dCmdHeader *header)
2681 SVGA3dCmdHeader header;
2682 SVGA3dCmdDXClearRenderTargetView body;
2683 } *cmd = container_of(header, typeof(*cmd), header);
2685 return vmw_view_id_val_add(sw_context, vmw_view_rt,
2686 cmd->body.renderTargetViewId);
2690 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2691 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2693 * @dev_priv: Pointer to a device private struct.
2694 * @sw_context: The software context being used for this batch.
2695 * @header: Pointer to the command header in the command stream.
2697 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2698 struct vmw_sw_context *sw_context,
2699 SVGA3dCmdHeader *header)
2702 SVGA3dCmdHeader header;
2703 SVGA3dCmdDXClearDepthStencilView body;
2704 } *cmd = container_of(header, typeof(*cmd), header);
2706 return vmw_view_id_val_add(sw_context, vmw_view_ds,
2707 cmd->body.depthStencilViewId);
2710 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2711 struct vmw_sw_context *sw_context,
2712 SVGA3dCmdHeader *header)
2714 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2715 struct vmw_resource_val_node *srf_node;
2716 struct vmw_resource *res;
2717 enum vmw_view_type view_type;
2720 * This is based on the fact that all affected define commands have
2721 * the same initial command body layout.
2724 SVGA3dCmdHeader header;
2729 if (unlikely(ctx_node == NULL)) {
2730 DRM_ERROR("DX Context not set.\n");
2734 view_type = vmw_view_cmd_to_type(header->id);
2735 if (view_type == vmw_view_max)
2737 cmd = container_of(header, typeof(*cmd), header);
2738 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2739 DRM_ERROR("Invalid surface id.\n");
2742 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2743 user_surface_converter,
2744 &cmd->sid, &srf_node);
2745 if (unlikely(ret != 0))
2748 res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2749 ret = vmw_cotable_notify(res, cmd->defined_id);
2750 vmw_resource_unreference(&res);
2751 if (unlikely(ret != 0))
2754 return vmw_view_add(sw_context->man,
2760 header->size + sizeof(*header),
2761 &sw_context->staged_cmd_res);
2765 * vmw_cmd_dx_set_so_targets - Validate an
2766 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2768 * @dev_priv: Pointer to a device private struct.
2769 * @sw_context: The software context being used for this batch.
2770 * @header: Pointer to the command header in the command stream.
2772 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2773 struct vmw_sw_context *sw_context,
2774 SVGA3dCmdHeader *header)
2776 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2777 struct vmw_ctx_bindinfo_so binding;
2778 struct vmw_resource_val_node *res_node;
2780 SVGA3dCmdHeader header;
2781 SVGA3dCmdDXSetSOTargets body;
2782 SVGA3dSoTarget targets[];
2786 if (unlikely(ctx_node == NULL)) {
2787 DRM_ERROR("DX Context not set.\n");
2791 cmd = container_of(header, typeof(*cmd), header);
2792 num = (cmd->header.size - sizeof(cmd->body)) /
2793 sizeof(SVGA3dSoTarget);
2795 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2796 DRM_ERROR("Invalid DX SO binding.\n");
2800 for (i = 0; i < num; i++) {
2801 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2802 user_surface_converter,
2803 &cmd->targets[i].sid, &res_node);
2804 if (unlikely(ret != 0))
2807 binding.bi.ctx = ctx_node->res;
2808 binding.bi.res = ((res_node) ? res_node->res : NULL);
2809 binding.bi.bt = vmw_ctx_binding_so,
2810 binding.offset = cmd->targets[i].offset;
2811 binding.size = cmd->targets[i].sizeInBytes;
2814 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2821 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2822 struct vmw_sw_context *sw_context,
2823 SVGA3dCmdHeader *header)
2825 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2826 struct vmw_resource *res;
2828 * This is based on the fact that all affected define commands have
2829 * the same initial command body layout.
2832 SVGA3dCmdHeader header;
2835 enum vmw_so_type so_type;
2838 if (unlikely(ctx_node == NULL)) {
2839 DRM_ERROR("DX Context not set.\n");
2843 so_type = vmw_so_cmd_to_type(header->id);
2844 res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2845 cmd = container_of(header, typeof(*cmd), header);
2846 ret = vmw_cotable_notify(res, cmd->defined_id);
2847 vmw_resource_unreference(&res);
2853 * vmw_cmd_dx_check_subresource - Validate an
2854 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2856 * @dev_priv: Pointer to a device private struct.
2857 * @sw_context: The software context being used for this batch.
2858 * @header: Pointer to the command header in the command stream.
2860 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2861 struct vmw_sw_context *sw_context,
2862 SVGA3dCmdHeader *header)
2865 SVGA3dCmdHeader header;
2867 SVGA3dCmdDXReadbackSubResource r_body;
2868 SVGA3dCmdDXInvalidateSubResource i_body;
2869 SVGA3dCmdDXUpdateSubResource u_body;
2870 SVGA3dSurfaceId sid;
2874 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2875 offsetof(typeof(*cmd), sid));
2876 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2877 offsetof(typeof(*cmd), sid));
2878 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2879 offsetof(typeof(*cmd), sid));
2881 cmd = container_of(header, typeof(*cmd), header);
2883 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2884 user_surface_converter,
2888 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2889 struct vmw_sw_context *sw_context,
2890 SVGA3dCmdHeader *header)
2892 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2894 if (unlikely(ctx_node == NULL)) {
2895 DRM_ERROR("DX Context not set.\n");
2903 * vmw_cmd_dx_view_remove - validate a view remove command and
2904 * schedule the view resource for removal.
2906 * @dev_priv: Pointer to a device private struct.
2907 * @sw_context: The software context being used for this batch.
2908 * @header: Pointer to the command header in the command stream.
2910 * Check that the view exists, and if it was not created using this
2911 * command batch, conditionally make this command a NOP.
2913 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2914 struct vmw_sw_context *sw_context,
2915 SVGA3dCmdHeader *header)
2917 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2919 SVGA3dCmdHeader header;
2920 union vmw_view_destroy body;
2921 } *cmd = container_of(header, typeof(*cmd), header);
2922 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2923 struct vmw_resource *view;
2927 DRM_ERROR("DX Context not set.\n");
2931 ret = vmw_view_remove(sw_context->man,
2932 cmd->body.view_id, view_type,
2933 &sw_context->staged_cmd_res,
2939 * If the view wasn't created during this command batch, it might
2940 * have been removed due to a context swapout, so add a
2941 * relocation to conditionally make this command a NOP to avoid
2944 return vmw_resource_relocation_add(&sw_context->res_relocations,
2946 vmw_ptr_diff(sw_context->buf_start,
2948 vmw_res_rel_cond_nop);
2952 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2955 * @dev_priv: Pointer to a device private struct.
2956 * @sw_context: The software context being used for this batch.
2957 * @header: Pointer to the command header in the command stream.
2959 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2960 struct vmw_sw_context *sw_context,
2961 SVGA3dCmdHeader *header)
2963 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2964 struct vmw_resource *res;
2966 SVGA3dCmdHeader header;
2967 SVGA3dCmdDXDefineShader body;
2968 } *cmd = container_of(header, typeof(*cmd), header);
2972 DRM_ERROR("DX Context not set.\n");
2976 res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2977 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2978 vmw_resource_unreference(&res);
2982 return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2983 cmd->body.shaderId, cmd->body.type,
2984 &sw_context->staged_cmd_res);
2988 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2991 * @dev_priv: Pointer to a device private struct.
2992 * @sw_context: The software context being used for this batch.
2993 * @header: Pointer to the command header in the command stream.
2995 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2996 struct vmw_sw_context *sw_context,
2997 SVGA3dCmdHeader *header)
2999 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
3001 SVGA3dCmdHeader header;
3002 SVGA3dCmdDXDestroyShader body;
3003 } *cmd = container_of(header, typeof(*cmd), header);
3007 DRM_ERROR("DX Context not set.\n");
3011 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
3012 &sw_context->staged_cmd_res);
3014 DRM_ERROR("Could not find shader to remove.\n");
3020 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
3023 * @dev_priv: Pointer to a device private struct.
3024 * @sw_context: The software context being used for this batch.
3025 * @header: Pointer to the command header in the command stream.
3027 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
3028 struct vmw_sw_context *sw_context,
3029 SVGA3dCmdHeader *header)
3031 struct vmw_resource_val_node *ctx_node;
3032 struct vmw_resource_val_node *res_node;
3033 struct vmw_resource *res;
3035 SVGA3dCmdHeader header;
3036 SVGA3dCmdDXBindShader body;
3037 } *cmd = container_of(header, typeof(*cmd), header);
3040 if (cmd->body.cid != SVGA3D_INVALID_ID) {
3041 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
3042 user_context_converter,
3043 &cmd->body.cid, &ctx_node);
3047 ctx_node = sw_context->dx_ctx_node;
3049 DRM_ERROR("DX Context not set.\n");
3054 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
3057 DRM_ERROR("Could not find shader to bind.\n");
3058 return PTR_ERR(res);
3061 ret = vmw_resource_val_add(sw_context, res, &res_node);
3063 DRM_ERROR("Error creating resource validation node.\n");
3068 ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3070 cmd->body.offsetInBytes);
3072 vmw_resource_unreference(&res);
3078 * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
3080 * @dev_priv: Pointer to a device private struct.
3081 * @sw_context: The software context being used for this batch.
3082 * @header: Pointer to the command header in the command stream.
3084 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
3085 struct vmw_sw_context *sw_context,
3086 SVGA3dCmdHeader *header)
3089 SVGA3dCmdHeader header;
3090 SVGA3dCmdDXGenMips body;
3091 } *cmd = container_of(header, typeof(*cmd), header);
3093 return vmw_view_id_val_add(sw_context, vmw_view_sr,
3094 cmd->body.shaderResourceViewId);
3098 * vmw_cmd_dx_transfer_from_buffer -
3099 * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
3101 * @dev_priv: Pointer to a device private struct.
3102 * @sw_context: The software context being used for this batch.
3103 * @header: Pointer to the command header in the command stream.
3105 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
3106 struct vmw_sw_context *sw_context,
3107 SVGA3dCmdHeader *header)
3110 SVGA3dCmdHeader header;
3111 SVGA3dCmdDXTransferFromBuffer body;
3112 } *cmd = container_of(header, typeof(*cmd), header);
3115 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3116 user_surface_converter,
3117 &cmd->body.srcSid, NULL);
3121 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3122 user_surface_converter,
3123 &cmd->body.destSid, NULL);
3126 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3127 struct vmw_sw_context *sw_context,
3128 void *buf, uint32_t *size)
3130 uint32_t size_remaining = *size;
3133 cmd_id = ((uint32_t *)buf)[0];
3135 case SVGA_CMD_UPDATE:
3136 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3138 case SVGA_CMD_DEFINE_GMRFB:
3139 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3141 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3142 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3144 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3145 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3148 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3152 if (*size > size_remaining) {
3153 DRM_ERROR("Invalid SVGA command (size mismatch):"
3158 if (unlikely(!sw_context->kernel)) {
3159 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3163 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3164 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3169 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3170 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3171 false, false, false),
3172 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3173 false, false, false),
3174 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3175 true, false, false),
3176 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3177 true, false, false),
3178 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3179 true, false, false),
3180 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3181 false, false, false),
3182 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3183 false, false, false),
3184 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3185 true, false, false),
3186 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3187 true, false, false),
3188 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3189 true, false, false),
3190 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3191 &vmw_cmd_set_render_target_check, true, false, false),
3192 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3193 true, false, false),
3194 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3195 true, false, false),
3196 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3197 true, false, false),
3198 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3199 true, false, false),
3200 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3201 true, false, false),
3202 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3203 true, false, false),
3204 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3205 true, false, false),
3206 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3207 false, false, false),
3208 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3209 true, false, false),
3210 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3211 true, false, false),
3212 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3213 true, false, false),
3214 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3215 true, false, false),
3216 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3217 true, false, false),
3218 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3219 true, false, false),
3220 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3221 true, false, false),
3222 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3223 true, false, false),
3224 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3225 true, false, false),
3226 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3227 true, false, false),
3228 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3229 &vmw_cmd_blt_surf_screen_check, false, false, false),
3230 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3231 false, false, false),
3232 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3233 false, false, false),
3234 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3235 false, false, false),
3236 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3237 false, false, false),
3238 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3239 false, false, false),
3240 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
3241 false, false, false),
3242 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
3243 false, false, false),
3244 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3245 false, false, false),
3246 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3247 false, false, false),
3248 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3249 false, false, false),
3250 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3251 false, false, false),
3252 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3253 false, false, false),
3254 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3255 false, false, false),
3256 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3257 false, false, true),
3258 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3259 false, false, true),
3260 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3261 false, false, true),
3262 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3263 false, false, true),
3264 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3265 false, false, true),
3266 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3267 false, false, true),
3268 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3269 false, false, true),
3270 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3271 false, false, true),
3272 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3274 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3275 false, false, true),
3276 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3278 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3279 &vmw_cmd_update_gb_surface, true, false, true),
3280 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3281 &vmw_cmd_readback_gb_image, true, false, true),
3282 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3283 &vmw_cmd_readback_gb_surface, true, false, true),
3284 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3285 &vmw_cmd_invalidate_gb_image, true, false, true),
3286 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3287 &vmw_cmd_invalidate_gb_surface, true, false, true),
3288 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3289 false, false, true),
3290 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3291 false, false, true),
3292 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3293 false, false, true),
3294 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3295 false, false, true),
3296 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3297 false, false, true),
3298 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3299 false, false, true),
3300 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3302 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3303 false, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3305 false, false, false),
3306 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3308 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3310 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3312 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3314 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3316 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3317 false, false, true),
3318 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3319 false, false, true),
3320 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3321 false, false, true),
3322 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3323 false, false, true),
3324 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3325 false, false, true),
3326 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3327 false, false, true),
3328 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3329 false, false, true),
3330 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3331 false, false, true),
3332 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3333 false, false, true),
3334 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3335 false, false, true),
3336 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3338 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3339 false, false, true),
3340 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3341 false, false, true),
3342 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3343 false, false, true),
3344 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3345 false, false, true),
3350 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3351 false, false, true),
3352 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3353 false, false, true),
3354 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3355 false, false, true),
3356 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3357 false, false, true),
3358 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3359 false, false, true),
3360 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3361 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3362 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3363 &vmw_cmd_dx_set_shader_res, true, false, true),
3364 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3366 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3368 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3370 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3372 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3374 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3375 &vmw_cmd_dx_cid_check, true, false, true),
3376 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3378 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3379 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3380 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3381 &vmw_cmd_dx_set_index_buffer, true, false, true),
3382 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3383 &vmw_cmd_dx_set_rendertargets, true, false, true),
3384 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3386 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3387 &vmw_cmd_dx_cid_check, true, false, true),
3388 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3389 &vmw_cmd_dx_cid_check, true, false, true),
3390 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3392 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3394 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3396 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3397 &vmw_cmd_dx_cid_check, true, false, true),
3398 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3400 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3402 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3404 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3406 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3408 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3410 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3411 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3412 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3413 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3414 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3416 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3418 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3419 &vmw_cmd_dx_check_subresource, true, false, true),
3420 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3421 &vmw_cmd_dx_check_subresource, true, false, true),
3422 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3423 &vmw_cmd_dx_check_subresource, true, false, true),
3424 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3425 &vmw_cmd_dx_view_define, true, false, true),
3426 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3427 &vmw_cmd_dx_view_remove, true, false, true),
3428 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3429 &vmw_cmd_dx_view_define, true, false, true),
3430 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3431 &vmw_cmd_dx_view_remove, true, false, true),
3432 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3433 &vmw_cmd_dx_view_define, true, false, true),
3434 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3435 &vmw_cmd_dx_view_remove, true, false, true),
3436 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3437 &vmw_cmd_dx_so_define, true, false, true),
3438 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3439 &vmw_cmd_dx_cid_check, true, false, true),
3440 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3441 &vmw_cmd_dx_so_define, true, false, true),
3442 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3443 &vmw_cmd_dx_cid_check, true, false, true),
3444 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3445 &vmw_cmd_dx_so_define, true, false, true),
3446 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3447 &vmw_cmd_dx_cid_check, true, false, true),
3448 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3449 &vmw_cmd_dx_so_define, true, false, true),
3450 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3451 &vmw_cmd_dx_cid_check, true, false, true),
3452 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3453 &vmw_cmd_dx_so_define, true, false, true),
3454 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3455 &vmw_cmd_dx_cid_check, true, false, true),
3456 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3457 &vmw_cmd_dx_define_shader, true, false, true),
3458 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3459 &vmw_cmd_dx_destroy_shader, true, false, true),
3460 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3461 &vmw_cmd_dx_bind_shader, true, false, true),
3462 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3463 &vmw_cmd_dx_so_define, true, false, true),
3464 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3465 &vmw_cmd_dx_cid_check, true, false, true),
3466 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3468 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3469 &vmw_cmd_dx_set_so_targets, true, false, true),
3470 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3471 &vmw_cmd_dx_cid_check, true, false, true),
3472 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3473 &vmw_cmd_dx_cid_check, true, false, true),
3474 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3475 &vmw_cmd_buffer_copy_check, true, false, true),
3476 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3477 &vmw_cmd_pred_copy_check, true, false, true),
3478 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3479 &vmw_cmd_dx_transfer_from_buffer,
3483 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3485 u32 cmd_id = ((u32 *) buf)[0];
3487 if (cmd_id >= SVGA_CMD_MAX) {
3488 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3489 const struct vmw_cmd_entry *entry;
3491 *size = header->size + sizeof(SVGA3dCmdHeader);
3492 cmd_id = header->id;
3493 if (cmd_id >= SVGA_3D_CMD_MAX)
3496 cmd_id -= SVGA_3D_CMD_BASE;
3497 entry = &vmw_cmd_entries[cmd_id];
3498 *cmd = entry->cmd_name;
3503 case SVGA_CMD_UPDATE:
3504 *cmd = "SVGA_CMD_UPDATE";
3505 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3507 case SVGA_CMD_DEFINE_GMRFB:
3508 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3509 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3511 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3512 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3513 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3515 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3516 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3517 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3528 static int vmw_cmd_check(struct vmw_private *dev_priv,
3529 struct vmw_sw_context *sw_context,
3530 void *buf, uint32_t *size)
3533 uint32_t size_remaining = *size;
3534 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3536 const struct vmw_cmd_entry *entry;
3537 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3539 cmd_id = ((uint32_t *)buf)[0];
3540 /* Handle any none 3D commands */
3541 if (unlikely(cmd_id < SVGA_CMD_MAX))
3542 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3545 cmd_id = header->id;
3546 *size = header->size + sizeof(SVGA3dCmdHeader);
3548 cmd_id -= SVGA_3D_CMD_BASE;
3549 if (unlikely(*size > size_remaining))
3552 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3555 entry = &vmw_cmd_entries[cmd_id];
3556 if (unlikely(!entry->func))
3559 if (unlikely(!entry->user_allow && !sw_context->kernel))
3560 goto out_privileged;
3562 if (unlikely(entry->gb_disable && gb))
3565 if (unlikely(entry->gb_enable && !gb))
3568 ret = entry->func(dev_priv, sw_context, header);
3569 if (unlikely(ret != 0))
3574 DRM_ERROR("Invalid SVGA3D command: %d\n",
3575 cmd_id + SVGA_3D_CMD_BASE);
3578 DRM_ERROR("Privileged SVGA3D command: %d\n",
3579 cmd_id + SVGA_3D_CMD_BASE);
3582 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3583 cmd_id + SVGA_3D_CMD_BASE);
3586 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3587 cmd_id + SVGA_3D_CMD_BASE);
3591 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3592 struct vmw_sw_context *sw_context,
3596 int32_t cur_size = size;
3599 sw_context->buf_start = buf;
3601 while (cur_size > 0) {
3603 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3604 if (unlikely(ret != 0))
3606 buf = (void *)((unsigned long) buf + size);
3610 if (unlikely(cur_size != 0)) {
3611 DRM_ERROR("Command verifier out of sync.\n");
3618 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3620 sw_context->cur_reloc = 0;
3623 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3626 struct vmw_relocation *reloc;
3627 struct ttm_validate_buffer *validate;
3628 struct ttm_buffer_object *bo;
3630 for (i = 0; i < sw_context->cur_reloc; ++i) {
3631 reloc = &sw_context->relocs[i];
3632 validate = &sw_context->val_bufs[reloc->index].base;
3634 switch (bo->mem.mem_type) {
3636 reloc->location->offset += bo->offset;
3637 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3640 reloc->location->gmrId = bo->mem.start;
3643 *reloc->mob_loc = bo->mem.start;
3649 vmw_free_relocations(sw_context);
3653 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3654 * all resources referenced by it.
3656 * @list: The resource list.
3658 static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3659 struct list_head *list)
3661 struct vmw_resource_val_node *val, *val_next;
3664 * Drop references to resources held during command submission.
3667 list_for_each_entry_safe(val, val_next, list, head) {
3668 list_del_init(&val->head);
3669 vmw_resource_unreference(&val->res);
3671 if (val->staged_bindings) {
3672 if (val->staged_bindings != sw_context->staged_bindings)
3673 vmw_binding_state_free(val->staged_bindings);
3675 sw_context->staged_bindings_inuse = false;
3676 val->staged_bindings = NULL;
3683 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3685 struct vmw_validate_buffer *entry, *next;
3686 struct vmw_resource_val_node *val;
3689 * Drop references to DMA buffers held during command submission.
3691 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
3693 list_del(&entry->base.head);
3694 ttm_bo_unref(&entry->base.bo);
3695 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
3696 sw_context->cur_val_buf--;
3698 BUG_ON(sw_context->cur_val_buf != 0);
3700 list_for_each_entry(val, &sw_context->resource_list, head)
3701 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
3704 int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3705 struct ttm_buffer_object *bo,
3707 bool validate_as_mob)
3709 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3713 if (vbo->pin_count > 0)
3716 if (validate_as_mob)
3717 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3721 * Put BO in VRAM if there is space, otherwise as a GMR.
3722 * If there is no space in VRAM and GMR ids are all used up,
3723 * start evicting GMRs to make room. If the DMA buffer can't be
3724 * used as a GMR, this will return -ENOMEM.
3727 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3729 if (likely(ret == 0 || ret == -ERESTARTSYS))
3733 * If that failed, try VRAM again, this time evicting
3734 * previous contents.
3737 ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
3741 static int vmw_validate_buffers(struct vmw_private *dev_priv,
3742 struct vmw_sw_context *sw_context)
3744 struct vmw_validate_buffer *entry;
3747 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3748 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3750 entry->validate_as_mob);
3751 if (unlikely(ret != 0))
3757 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3760 if (likely(sw_context->cmd_bounce_size >= size))
3763 if (sw_context->cmd_bounce_size == 0)
3764 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3766 while (sw_context->cmd_bounce_size < size) {
3767 sw_context->cmd_bounce_size =
3768 PAGE_ALIGN(sw_context->cmd_bounce_size +
3769 (sw_context->cmd_bounce_size >> 1));
3772 vfree(sw_context->cmd_bounce);
3773 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3775 if (sw_context->cmd_bounce == NULL) {
3776 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3777 sw_context->cmd_bounce_size = 0;
3785 * vmw_execbuf_fence_commands - create and submit a command stream fence
3787 * Creates a fence object and submits a command stream marker.
3788 * If this fails for some reason, We sync the fifo and return NULL.
3789 * It is then safe to fence buffers with a NULL pointer.
3791 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3792 * a userspace handle if @p_handle is not NULL, otherwise not.
3795 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3796 struct vmw_private *dev_priv,
3797 struct vmw_fence_obj **p_fence,
3802 bool synced = false;
3804 /* p_handle implies file_priv. */
3805 BUG_ON(p_handle != NULL && file_priv == NULL);
3807 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3808 if (unlikely(ret != 0)) {
3809 DRM_ERROR("Fence submission error. Syncing.\n");
3813 if (p_handle != NULL)
3814 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3815 sequence, p_fence, p_handle);
3817 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3819 if (unlikely(ret != 0 && !synced)) {
3820 (void) vmw_fallback_wait(dev_priv, false, false,
3822 VMW_FENCE_WAIT_TIMEOUT);
3830 * vmw_execbuf_copy_fence_user - copy fence object information to
3833 * @dev_priv: Pointer to a vmw_private struct.
3834 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3835 * @ret: Return value from fence object creation.
3836 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3837 * which the information should be copied.
3838 * @fence: Pointer to the fenc object.
3839 * @fence_handle: User-space fence handle.
3840 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3841 * @sync_file: Only used to clean up in case of an error in this function.
3843 * This function copies fence information to user-space. If copying fails,
3844 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3845 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3846 * the error will hopefully be detected.
3847 * Also if copying fails, user-space will be unable to signal the fence
3848 * object so we wait for it immediately, and then unreference the
3849 * user-space reference.
3852 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3853 struct vmw_fpriv *vmw_fp,
3855 struct drm_vmw_fence_rep __user *user_fence_rep,
3856 struct vmw_fence_obj *fence,
3857 uint32_t fence_handle,
3858 int32_t out_fence_fd)
3860 struct drm_vmw_fence_rep fence_rep;
3862 if (user_fence_rep == NULL)
3865 memset(&fence_rep, 0, sizeof(fence_rep));
3867 fence_rep.error = ret;
3868 fence_rep.fd = out_fence_fd;
3870 BUG_ON(fence == NULL);
3872 fence_rep.handle = fence_handle;
3873 fence_rep.seqno = fence->base.seqno;
3874 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3875 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3879 * copy_to_user errors will be detected by user space not
3880 * seeing fence_rep::error filled in. Typically
3881 * user-space would have pre-set that member to -EFAULT.
3883 ret = copy_to_user(user_fence_rep, &fence_rep,
3887 * User-space lost the fence object. We need to sync
3888 * and unreference the handle.
3890 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3891 ttm_ref_object_base_unref(vmw_fp->tfile,
3892 fence_handle, TTM_REF_USAGE);
3893 DRM_ERROR("Fence copy error. Syncing.\n");
3894 (void) vmw_fence_obj_wait(fence, false, false,
3895 VMW_FENCE_WAIT_TIMEOUT);
3898 return ret ? -EFAULT : 0;
3902 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3905 * @dev_priv: Pointer to a device private structure.
3906 * @kernel_commands: Pointer to the unpatched command batch.
3907 * @command_size: Size of the unpatched command batch.
3908 * @sw_context: Structure holding the relocation lists.
3910 * Side effects: If this function returns 0, then the command batch
3911 * pointed to by @kernel_commands will have been modified.
3913 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3914 void *kernel_commands,
3916 struct vmw_sw_context *sw_context)
3920 if (sw_context->dx_ctx_node)
3921 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3922 sw_context->dx_ctx_node->res->id);
3924 cmd = vmw_fifo_reserve(dev_priv, command_size);
3926 DRM_ERROR("Failed reserving fifo space for commands.\n");
3930 vmw_apply_relocations(sw_context);
3931 memcpy(cmd, kernel_commands, command_size);
3932 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3933 vmw_resource_relocations_free(&sw_context->res_relocations);
3934 vmw_fifo_commit(dev_priv, command_size);
3940 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3941 * the command buffer manager.
3943 * @dev_priv: Pointer to a device private structure.
3944 * @header: Opaque handle to the command buffer allocation.
3945 * @command_size: Size of the unpatched command batch.
3946 * @sw_context: Structure holding the relocation lists.
3948 * Side effects: If this function returns 0, then the command buffer
3949 * represented by @header will have been modified.
3951 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3952 struct vmw_cmdbuf_header *header,
3954 struct vmw_sw_context *sw_context)
3956 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3958 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3961 vmw_apply_relocations(sw_context);
3962 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3963 vmw_resource_relocations_free(&sw_context->res_relocations);
3964 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3970 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3971 * submission using a command buffer.
3973 * @dev_priv: Pointer to a device private structure.
3974 * @user_commands: User-space pointer to the commands to be submitted.
3975 * @command_size: Size of the unpatched command batch.
3976 * @header: Out parameter returning the opaque pointer to the command buffer.
3978 * This function checks whether we can use the command buffer manager for
3979 * submission and if so, creates a command buffer of suitable size and
3980 * copies the user data into that buffer.
3982 * On successful return, the function returns a pointer to the data in the
3983 * command buffer and *@header is set to non-NULL.
3984 * If command buffers could not be used, the function will return the value
3985 * of @kernel_commands on function call. That value may be NULL. In that case,
3986 * the value of *@header will be set to NULL.
3987 * If an error is encountered, the function will return a pointer error value.
3988 * If the function is interrupted by a signal while sleeping, it will return
3989 * -ERESTARTSYS casted to a pointer error value.
3991 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3992 void __user *user_commands,
3993 void *kernel_commands,
3995 struct vmw_cmdbuf_header **header)
4001 if (command_size > SVGA_CB_MAX_SIZE) {
4002 DRM_ERROR("Command buffer is too large.\n");
4003 return ERR_PTR(-EINVAL);
4006 if (!dev_priv->cman || kernel_commands)
4007 return kernel_commands;
4009 /* If possible, add a little space for fencing. */
4010 cmdbuf_size = command_size + 512;
4011 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4012 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
4014 if (IS_ERR(kernel_commands))
4015 return kernel_commands;
4017 ret = copy_from_user(kernel_commands, user_commands,
4020 DRM_ERROR("Failed copying commands.\n");
4021 vmw_cmdbuf_header_free(*header);
4023 return ERR_PTR(-EFAULT);
4026 return kernel_commands;
4029 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4030 struct vmw_sw_context *sw_context,
4033 struct vmw_resource_val_node *ctx_node;
4034 struct vmw_resource *res;
4037 if (handle == SVGA3D_INVALID_ID)
4040 ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
4041 handle, user_context_converter,
4043 if (unlikely(ret != 0)) {
4044 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
4049 ret = vmw_resource_val_add(sw_context, res, &ctx_node);
4050 if (unlikely(ret != 0))
4053 sw_context->dx_ctx_node = ctx_node;
4054 sw_context->man = vmw_context_res_man(res);
4056 vmw_resource_unreference(&res);
4060 int vmw_execbuf_process(struct drm_file *file_priv,
4061 struct vmw_private *dev_priv,
4062 void __user *user_commands,
4063 void *kernel_commands,
4064 uint32_t command_size,
4065 uint64_t throttle_us,
4066 uint32_t dx_context_handle,
4067 struct drm_vmw_fence_rep __user *user_fence_rep,
4068 struct vmw_fence_obj **out_fence,
4071 struct vmw_sw_context *sw_context = &dev_priv->ctx;
4072 struct vmw_fence_obj *fence = NULL;
4073 struct vmw_resource *error_resource;
4074 struct list_head resource_list;
4075 struct vmw_cmdbuf_header *header;
4076 struct ww_acquire_ctx ticket;
4079 int32_t out_fence_fd = -1;
4080 struct sync_file *sync_file = NULL;
4083 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4084 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4085 if (out_fence_fd < 0) {
4086 DRM_ERROR("Failed to get a fence file descriptor.\n");
4087 return out_fence_fd;
4092 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
4096 goto out_free_fence_fd;
4099 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4100 kernel_commands, command_size,
4102 if (IS_ERR(kernel_commands)) {
4103 ret = PTR_ERR(kernel_commands);
4104 goto out_free_fence_fd;
4107 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4110 goto out_free_header;
4113 sw_context->kernel = false;
4114 if (kernel_commands == NULL) {
4115 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4116 if (unlikely(ret != 0))
4120 ret = copy_from_user(sw_context->cmd_bounce,
4121 user_commands, command_size);
4123 if (unlikely(ret != 0)) {
4125 DRM_ERROR("Failed copying commands.\n");
4128 kernel_commands = sw_context->cmd_bounce;
4130 sw_context->kernel = true;
4132 sw_context->fp = vmw_fpriv(file_priv);
4133 sw_context->cur_reloc = 0;
4134 sw_context->cur_val_buf = 0;
4135 INIT_LIST_HEAD(&sw_context->resource_list);
4136 INIT_LIST_HEAD(&sw_context->ctx_resource_list);
4137 sw_context->cur_query_bo = dev_priv->pinned_bo;
4138 sw_context->last_query_ctx = NULL;
4139 sw_context->needs_post_query_barrier = false;
4140 sw_context->dx_ctx_node = NULL;
4141 sw_context->dx_query_mob = NULL;
4142 sw_context->dx_query_ctx = NULL;
4143 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4144 INIT_LIST_HEAD(&sw_context->validate_nodes);
4145 INIT_LIST_HEAD(&sw_context->res_relocations);
4146 if (sw_context->staged_bindings)
4147 vmw_binding_state_reset(sw_context->staged_bindings);
4149 if (!sw_context->res_ht_initialized) {
4150 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4151 if (unlikely(ret != 0))
4153 sw_context->res_ht_initialized = true;
4155 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4156 INIT_LIST_HEAD(&resource_list);
4157 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4158 if (unlikely(ret != 0)) {
4159 list_splice_init(&sw_context->ctx_resource_list,
4160 &sw_context->resource_list);
4164 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4167 * Merge the resource lists before checking the return status
4168 * from vmd_cmd_check_all so that all the open hashtabs will
4169 * be handled properly even if vmw_cmd_check_all fails.
4171 list_splice_init(&sw_context->ctx_resource_list,
4172 &sw_context->resource_list);
4174 if (unlikely(ret != 0))
4177 ret = vmw_resources_reserve(sw_context);
4178 if (unlikely(ret != 0))
4181 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
4183 if (unlikely(ret != 0))
4186 ret = vmw_validate_buffers(dev_priv, sw_context);
4187 if (unlikely(ret != 0))
4190 ret = vmw_resources_validate(sw_context);
4191 if (unlikely(ret != 0))
4194 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4195 if (unlikely(ret != 0)) {
4200 if (dev_priv->has_mob) {
4201 ret = vmw_rebind_contexts(sw_context);
4202 if (unlikely(ret != 0))
4203 goto out_unlock_binding;
4207 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4208 command_size, sw_context);
4210 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4214 mutex_unlock(&dev_priv->binding_mutex);
4218 vmw_query_bo_switch_commit(dev_priv, sw_context);
4219 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4221 (user_fence_rep) ? &handle : NULL);
4223 * This error is harmless, because if fence submission fails,
4224 * vmw_fifo_send_fence will sync. The error will be propagated to
4225 * user-space in @fence_rep
4229 DRM_ERROR("Fence submission error. Syncing.\n");
4231 vmw_resources_unreserve(sw_context, false);
4233 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
4236 if (unlikely(dev_priv->pinned_bo != NULL &&
4237 !dev_priv->query_cid_valid))
4238 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4240 vmw_clear_validations(sw_context);
4243 * If anything fails here, give up trying to export the fence
4244 * and do a sync since the user mode will not be able to sync
4245 * the fence itself. This ensures we are still functionally
4248 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4250 sync_file = sync_file_create(&fence->base);
4252 DRM_ERROR("Unable to create sync file for fence\n");
4253 put_unused_fd(out_fence_fd);
4256 (void) vmw_fence_obj_wait(fence, false, false,
4257 VMW_FENCE_WAIT_TIMEOUT);
4261 ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4262 user_fence_rep, fence, handle, out_fence_fd);
4266 /* usercopy of fence failed, put the file object */
4267 fput(sync_file->file);
4268 put_unused_fd(out_fence_fd);
4270 /* Link the fence with the FD created earlier */
4271 fd_install(out_fence_fd, sync_file->file);
4275 /* Don't unreference when handing fence out */
4276 if (unlikely(out_fence != NULL)) {
4279 } else if (likely(fence != NULL)) {
4280 vmw_fence_obj_unreference(&fence);
4283 list_splice_init(&sw_context->resource_list, &resource_list);
4284 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4285 mutex_unlock(&dev_priv->cmdbuf_mutex);
4288 * Unreference resources outside of the cmdbuf_mutex to
4289 * avoid deadlocks in resource destruction paths.
4291 vmw_resource_list_unreference(sw_context, &resource_list);
4296 mutex_unlock(&dev_priv->binding_mutex);
4298 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4300 vmw_resources_unreserve(sw_context, true);
4301 vmw_resource_relocations_free(&sw_context->res_relocations);
4302 vmw_free_relocations(sw_context);
4303 vmw_clear_validations(sw_context);
4304 if (unlikely(dev_priv->pinned_bo != NULL &&
4305 !dev_priv->query_cid_valid))
4306 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4308 list_splice_init(&sw_context->resource_list, &resource_list);
4309 error_resource = sw_context->error_resource;
4310 sw_context->error_resource = NULL;
4311 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4312 mutex_unlock(&dev_priv->cmdbuf_mutex);
4315 * Unreference resources outside of the cmdbuf_mutex to
4316 * avoid deadlocks in resource destruction paths.
4318 vmw_resource_list_unreference(sw_context, &resource_list);
4319 if (unlikely(error_resource != NULL))
4320 vmw_resource_unreference(&error_resource);
4323 vmw_cmdbuf_header_free(header);
4325 if (out_fence_fd >= 0)
4326 put_unused_fd(out_fence_fd);
4332 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4334 * @dev_priv: The device private structure.
4336 * This function is called to idle the fifo and unpin the query buffer
4337 * if the normal way to do this hits an error, which should typically be
4340 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4342 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4344 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4345 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4346 if (dev_priv->dummy_query_bo_pinned) {
4347 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4348 dev_priv->dummy_query_bo_pinned = false;
4354 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4357 * @dev_priv: The device private structure.
4358 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4359 * _after_ a query barrier that flushes all queries touching the current
4360 * buffer pointed to by @dev_priv->pinned_bo
4362 * This function should be used to unpin the pinned query bo, or
4363 * as a query barrier when we need to make sure that all queries have
4364 * finished before the next fifo command. (For example on hardware
4365 * context destructions where the hardware may otherwise leak unfinished
4368 * This function does not return any failure codes, but make attempts
4369 * to do safe unpinning in case of errors.
4371 * The function will synchronize on the previous query barrier, and will
4372 * thus not finish until that barrier has executed.
4374 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4375 * before calling this function.
4377 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4378 struct vmw_fence_obj *fence)
4381 struct list_head validate_list;
4382 struct ttm_validate_buffer pinned_val, query_val;
4383 struct vmw_fence_obj *lfence = NULL;
4384 struct ww_acquire_ctx ticket;
4386 if (dev_priv->pinned_bo == NULL)
4389 INIT_LIST_HEAD(&validate_list);
4391 pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
4392 pinned_val.shared = false;
4393 list_add_tail(&pinned_val.head, &validate_list);
4395 query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
4396 query_val.shared = false;
4397 list_add_tail(&query_val.head, &validate_list);
4399 ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4401 if (unlikely(ret != 0)) {
4402 vmw_execbuf_unpin_panic(dev_priv);
4403 goto out_no_reserve;
4406 if (dev_priv->query_cid_valid) {
4407 BUG_ON(fence != NULL);
4408 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4409 if (unlikely(ret != 0)) {
4410 vmw_execbuf_unpin_panic(dev_priv);
4413 dev_priv->query_cid_valid = false;
4416 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4417 if (dev_priv->dummy_query_bo_pinned) {
4418 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4419 dev_priv->dummy_query_bo_pinned = false;
4421 if (fence == NULL) {
4422 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4426 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
4428 vmw_fence_obj_unreference(&lfence);
4430 ttm_bo_unref(&query_val.bo);
4431 ttm_bo_unref(&pinned_val.bo);
4432 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4437 ttm_eu_backoff_reservation(&ticket, &validate_list);
4439 ttm_bo_unref(&query_val.bo);
4440 ttm_bo_unref(&pinned_val.bo);
4441 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4445 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4448 * @dev_priv: The device private structure.
4450 * This function should be used to unpin the pinned query bo, or
4451 * as a query barrier when we need to make sure that all queries have
4452 * finished before the next fifo command. (For example on hardware
4453 * context destructions where the hardware may otherwise leak unfinished
4456 * This function does not return any failure codes, but make attempts
4457 * to do safe unpinning in case of errors.
4459 * The function will synchronize on the previous query barrier, and will
4460 * thus not finish until that barrier has executed.
4462 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4464 mutex_lock(&dev_priv->cmdbuf_mutex);
4465 if (dev_priv->query_cid_valid)
4466 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4467 mutex_unlock(&dev_priv->cmdbuf_mutex);
4470 int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4471 struct drm_file *file_priv, size_t size)
4473 struct vmw_private *dev_priv = vmw_priv(dev);
4474 struct drm_vmw_execbuf_arg arg;
4476 static const size_t copy_offset[] = {
4477 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4478 sizeof(struct drm_vmw_execbuf_arg)};
4479 struct dma_fence *in_fence = NULL;
4481 if (unlikely(size < copy_offset[0])) {
4482 DRM_ERROR("Invalid command size, ioctl %d\n",
4487 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4491 * Extend the ioctl argument while
4492 * maintaining backwards compatibility:
4493 * We take different code paths depending on the value of
4497 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4498 arg.version == 0)) {
4499 DRM_ERROR("Incorrect execbuf version.\n");
4503 if (arg.version > 1 &&
4504 copy_from_user(&arg.context_handle,
4505 (void __user *) (data + copy_offset[0]),
4506 copy_offset[arg.version - 1] -
4507 copy_offset[0]) != 0)
4510 switch (arg.version) {
4512 arg.context_handle = (uint32_t) -1;
4520 /* If imported a fence FD from elsewhere, then wait on it */
4521 if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4522 in_fence = sync_file_get_fence(arg.imported_fence_fd);
4525 DRM_ERROR("Cannot get imported fence\n");
4529 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4534 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4535 if (unlikely(ret != 0))
4538 ret = vmw_execbuf_process(file_priv, dev_priv,
4539 (void __user *)(unsigned long)arg.commands,
4540 NULL, arg.command_size, arg.throttle_us,
4542 (void __user *)(unsigned long)arg.fence_rep,
4545 ttm_read_unlock(&dev_priv->reservation_sem);
4546 if (unlikely(ret != 0))
4549 vmw_kms_cursor_post_execbuf(dev_priv);
4553 dma_fence_put(in_fence);