GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include <linux/sync_file.h>
28
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_reg.h"
31 #include <drm/ttm/ttm_bo_api.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_so.h"
34 #include "vmwgfx_binding.h"
35
36 #define VMW_RES_HT_ORDER 12
37
38 /**
39  * enum vmw_resource_relocation_type - Relocation type for resources
40  *
41  * @vmw_res_rel_normal: Traditional relocation. The resource id in the
42  * command stream is replaced with the actual id after validation.
43  * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
44  * with a NOP.
45  * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
46  * after validation is -1, the command is replaced with a NOP. Otherwise no
47  * action.
48  */
49 enum vmw_resource_relocation_type {
50         vmw_res_rel_normal,
51         vmw_res_rel_nop,
52         vmw_res_rel_cond_nop,
53         vmw_res_rel_max
54 };
55
56 /**
57  * struct vmw_resource_relocation - Relocation info for resources
58  *
59  * @head: List head for the software context's relocation list.
60  * @res: Non-ref-counted pointer to the resource.
61  * @offset: Offset of single byte entries into the command buffer where the
62  * id that needs fixup is located.
63  * @rel_type: Type of relocation.
64  */
65 struct vmw_resource_relocation {
66         struct list_head head;
67         const struct vmw_resource *res;
68         u32 offset:29;
69         enum vmw_resource_relocation_type rel_type:3;
70 };
71
72 /**
73  * struct vmw_resource_val_node - Validation info for resources
74  *
75  * @head: List head for the software context's resource list.
76  * @hash: Hash entry for quick resouce to val_node lookup.
77  * @res: Ref-counted pointer to the resource.
78  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
79  * @new_backup: Refcounted pointer to the new backup buffer.
80  * @staged_bindings: If @res is a context, tracks bindings set up during
81  * the command batch. Otherwise NULL.
82  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
83  * @first_usage: Set to true the first time the resource is referenced in
84  * the command stream.
85  * @switching_backup: The command stream provides a new backup buffer for a
86  * resource.
87  * @no_buffer_needed: This means @switching_backup is true on first buffer
88  * reference. So resource reservation does not need to allocate a backup
89  * buffer for the resource.
90  */
91 struct vmw_resource_val_node {
92         struct list_head head;
93         struct drm_hash_item hash;
94         struct vmw_resource *res;
95         struct vmw_buffer_object *new_backup;
96         struct vmw_ctx_binding_state *staged_bindings;
97         unsigned long new_backup_offset;
98         u32 first_usage : 1;
99         u32 switching_backup : 1;
100         u32 no_buffer_needed : 1;
101 };
102
103 /**
104  * struct vmw_cmd_entry - Describe a command for the verifier
105  *
106  * @user_allow: Whether allowed from the execbuf ioctl.
107  * @gb_disable: Whether disabled if guest-backed objects are available.
108  * @gb_enable: Whether enabled iff guest-backed objects are available.
109  */
110 struct vmw_cmd_entry {
111         int (*func) (struct vmw_private *, struct vmw_sw_context *,
112                      SVGA3dCmdHeader *);
113         bool user_allow;
114         bool gb_disable;
115         bool gb_enable;
116         const char *cmd_name;
117 };
118
119 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)  \
120         [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
121                                        (_gb_disable), (_gb_enable), #_cmd}
122
123 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
124                                         struct vmw_sw_context *sw_context,
125                                         struct vmw_resource *ctx);
126 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
127                                  struct vmw_sw_context *sw_context,
128                                  SVGAMobId *id,
129                                  struct vmw_buffer_object **vmw_bo_p);
130 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
131                                    struct vmw_buffer_object *vbo,
132                                    bool validate_as_mob,
133                                    uint32_t *p_val_node);
134 /**
135  * vmw_ptr_diff - Compute the offset from a to b in bytes
136  *
137  * @a: A starting pointer.
138  * @b: A pointer offset in the same address space.
139  *
140  * Returns: The offset in bytes between the two pointers.
141  */
142 static size_t vmw_ptr_diff(void *a, void *b)
143 {
144         return (unsigned long) b - (unsigned long) a;
145 }
146
147 /**
148  * vmw_resources_unreserve - unreserve resources previously reserved for
149  * command submission.
150  *
151  * @sw_context: pointer to the software context
152  * @backoff: Whether command submission failed.
153  */
154 static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
155                                     bool backoff)
156 {
157         struct vmw_resource_val_node *val;
158         struct list_head *list = &sw_context->resource_list;
159
160         if (sw_context->dx_query_mob && !backoff)
161                 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
162                                           sw_context->dx_query_mob);
163
164         list_for_each_entry(val, list, head) {
165                 struct vmw_resource *res = val->res;
166                 bool switch_backup =
167                         (backoff) ? false : val->switching_backup;
168
169                 /*
170                  * Transfer staged context bindings to the
171                  * persistent context binding tracker.
172                  */
173                 if (unlikely(val->staged_bindings)) {
174                         if (!backoff) {
175                                 vmw_binding_state_commit
176                                         (vmw_context_binding_state(val->res),
177                                          val->staged_bindings);
178                         }
179
180                         if (val->staged_bindings != sw_context->staged_bindings)
181                                 vmw_binding_state_free(val->staged_bindings);
182                         else
183                                 sw_context->staged_bindings_inuse = false;
184                         val->staged_bindings = NULL;
185                 }
186                 vmw_resource_unreserve(res, switch_backup, val->new_backup,
187                                        val->new_backup_offset);
188                 vmw_bo_unreference(&val->new_backup);
189         }
190 }
191
192 /**
193  * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
194  * added to the validate list.
195  *
196  * @dev_priv: Pointer to the device private:
197  * @sw_context: The validation context:
198  * @node: The validation node holding this context.
199  */
200 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
201                                    struct vmw_sw_context *sw_context,
202                                    struct vmw_resource_val_node *node)
203 {
204         int ret;
205
206         ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
207         if (unlikely(ret != 0))
208                 goto out_err;
209
210         if (!sw_context->staged_bindings) {
211                 sw_context->staged_bindings =
212                         vmw_binding_state_alloc(dev_priv);
213                 if (IS_ERR(sw_context->staged_bindings)) {
214                         DRM_ERROR("Failed to allocate context binding "
215                                   "information.\n");
216                         ret = PTR_ERR(sw_context->staged_bindings);
217                         sw_context->staged_bindings = NULL;
218                         goto out_err;
219                 }
220         }
221
222         if (sw_context->staged_bindings_inuse) {
223                 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
224                 if (IS_ERR(node->staged_bindings)) {
225                         DRM_ERROR("Failed to allocate context binding "
226                                   "information.\n");
227                         ret = PTR_ERR(node->staged_bindings);
228                         node->staged_bindings = NULL;
229                         goto out_err;
230                 }
231         } else {
232                 node->staged_bindings = sw_context->staged_bindings;
233                 sw_context->staged_bindings_inuse = true;
234         }
235
236         return 0;
237 out_err:
238         return ret;
239 }
240
241 /**
242  * vmw_resource_val_add - Add a resource to the software context's
243  * resource list if it's not already on it.
244  *
245  * @sw_context: Pointer to the software context.
246  * @res: Pointer to the resource.
247  * @p_node On successful return points to a valid pointer to a
248  * struct vmw_resource_val_node, if non-NULL on entry.
249  */
250 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
251                                 struct vmw_resource *res,
252                                 struct vmw_resource_val_node **p_node)
253 {
254         struct vmw_private *dev_priv = res->dev_priv;
255         struct vmw_resource_val_node *node;
256         struct drm_hash_item *hash;
257         int ret;
258
259         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
260                                     &hash) == 0)) {
261                 node = container_of(hash, struct vmw_resource_val_node, hash);
262                 node->first_usage = false;
263                 if (unlikely(p_node != NULL))
264                         *p_node = node;
265                 return 0;
266         }
267
268         node = kzalloc(sizeof(*node), GFP_KERNEL);
269         if (unlikely(!node)) {
270                 DRM_ERROR("Failed to allocate a resource validation "
271                           "entry.\n");
272                 return -ENOMEM;
273         }
274
275         node->hash.key = (unsigned long) res;
276         ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
277         if (unlikely(ret != 0)) {
278                 DRM_ERROR("Failed to initialize a resource validation "
279                           "entry.\n");
280                 kfree(node);
281                 return ret;
282         }
283         node->res = vmw_resource_reference(res);
284         node->first_usage = true;
285         if (unlikely(p_node != NULL))
286                 *p_node = node;
287
288         if (!dev_priv->has_mob) {
289                 list_add_tail(&node->head, &sw_context->resource_list);
290                 return 0;
291         }
292
293         switch (vmw_res_type(res)) {
294         case vmw_res_context:
295         case vmw_res_dx_context:
296                 list_add(&node->head, &sw_context->ctx_resource_list);
297                 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
298                 break;
299         case vmw_res_cotable:
300                 list_add_tail(&node->head, &sw_context->ctx_resource_list);
301                 break;
302         default:
303                 list_add_tail(&node->head, &sw_context->resource_list);
304                 break;
305         }
306
307         return ret;
308 }
309
310 /**
311  * vmw_view_res_val_add - Add a view and the surface it's pointing to
312  * to the validation list
313  *
314  * @sw_context: The software context holding the validation list.
315  * @view: Pointer to the view resource.
316  *
317  * Returns 0 if success, negative error code otherwise.
318  */
319 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
320                                 struct vmw_resource *view)
321 {
322         int ret;
323
324         /*
325          * First add the resource the view is pointing to, otherwise
326          * it may be swapped out when the view is validated.
327          */
328         ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
329         if (ret)
330                 return ret;
331
332         return vmw_resource_val_add(sw_context, view, NULL);
333 }
334
335 /**
336  * vmw_view_id_val_add - Look up a view and add it and the surface it's
337  * pointing to to the validation list.
338  *
339  * @sw_context: The software context holding the validation list.
340  * @view_type: The view type to look up.
341  * @id: view id of the view.
342  *
343  * The view is represented by a view id and the DX context it's created on,
344  * or scheduled for creation on. If there is no DX context set, the function
345  * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
346  */
347 static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
348                                enum vmw_view_type view_type, u32 id)
349 {
350         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
351         struct vmw_resource *view;
352         int ret;
353
354         if (!ctx_node) {
355                 DRM_ERROR("DX Context not set.\n");
356                 return -EINVAL;
357         }
358
359         view = vmw_view_lookup(sw_context->man, view_type, id);
360         if (IS_ERR(view))
361                 return PTR_ERR(view);
362
363         ret = vmw_view_res_val_add(sw_context, view);
364         vmw_resource_unreference(&view);
365
366         return ret;
367 }
368
369 /**
370  * vmw_resource_context_res_add - Put resources previously bound to a context on
371  * the validation list
372  *
373  * @dev_priv: Pointer to a device private structure
374  * @sw_context: Pointer to a software context used for this command submission
375  * @ctx: Pointer to the context resource
376  *
377  * This function puts all resources that were previously bound to @ctx on
378  * the resource validation list. This is part of the context state reemission
379  */
380 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
381                                         struct vmw_sw_context *sw_context,
382                                         struct vmw_resource *ctx)
383 {
384         struct list_head *binding_list;
385         struct vmw_ctx_bindinfo *entry;
386         int ret = 0;
387         struct vmw_resource *res;
388         u32 i;
389
390         /* Add all cotables to the validation list. */
391         if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
392                 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
393                         res = vmw_context_cotable(ctx, i);
394                         if (IS_ERR(res))
395                                 continue;
396
397                         ret = vmw_resource_val_add(sw_context, res, NULL);
398                         vmw_resource_unreference(&res);
399                         if (unlikely(ret != 0))
400                                 return ret;
401                 }
402         }
403
404
405         /* Add all resources bound to the context to the validation list */
406         mutex_lock(&dev_priv->binding_mutex);
407         binding_list = vmw_context_binding_list(ctx);
408
409         list_for_each_entry(entry, binding_list, ctx_list) {
410                 /* entry->res is not refcounted */
411                 res = vmw_resource_reference_unless_doomed(entry->res);
412                 if (unlikely(res == NULL))
413                         continue;
414
415                 if (vmw_res_type(entry->res) == vmw_res_view)
416                         ret = vmw_view_res_val_add(sw_context, entry->res);
417                 else
418                         ret = vmw_resource_val_add(sw_context, entry->res,
419                                                    NULL);
420                 vmw_resource_unreference(&res);
421                 if (unlikely(ret != 0))
422                         break;
423         }
424
425         if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
426                 struct vmw_buffer_object *dx_query_mob;
427
428                 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
429                 if (dx_query_mob)
430                         ret = vmw_bo_to_validate_list(sw_context,
431                                                       dx_query_mob,
432                                                       true, NULL);
433         }
434
435         mutex_unlock(&dev_priv->binding_mutex);
436         return ret;
437 }
438
439 /**
440  * vmw_resource_relocation_add - Add a relocation to the relocation list
441  *
442  * @list: Pointer to head of relocation list.
443  * @res: The resource.
444  * @offset: Offset into the command buffer currently being parsed where the
445  * id that needs fixup is located. Granularity is one byte.
446  * @rel_type: Relocation type.
447  */
448 static int vmw_resource_relocation_add(struct list_head *list,
449                                        const struct vmw_resource *res,
450                                        unsigned long offset,
451                                        enum vmw_resource_relocation_type
452                                        rel_type)
453 {
454         struct vmw_resource_relocation *rel;
455
456         rel = kmalloc(sizeof(*rel), GFP_KERNEL);
457         if (unlikely(!rel)) {
458                 DRM_ERROR("Failed to allocate a resource relocation.\n");
459                 return -ENOMEM;
460         }
461
462         rel->res = res;
463         rel->offset = offset;
464         rel->rel_type = rel_type;
465         list_add_tail(&rel->head, list);
466
467         return 0;
468 }
469
470 /**
471  * vmw_resource_relocations_free - Free all relocations on a list
472  *
473  * @list: Pointer to the head of the relocation list.
474  */
475 static void vmw_resource_relocations_free(struct list_head *list)
476 {
477         struct vmw_resource_relocation *rel, *n;
478
479         list_for_each_entry_safe(rel, n, list, head) {
480                 list_del(&rel->head);
481                 kfree(rel);
482         }
483 }
484
485 /**
486  * vmw_resource_relocations_apply - Apply all relocations on a list
487  *
488  * @cb: Pointer to the start of the command buffer bein patch. This need
489  * not be the same buffer as the one being parsed when the relocation
490  * list was built, but the contents must be the same modulo the
491  * resource ids.
492  * @list: Pointer to the head of the relocation list.
493  */
494 static void vmw_resource_relocations_apply(uint32_t *cb,
495                                            struct list_head *list)
496 {
497         struct vmw_resource_relocation *rel;
498
499         /* Validate the struct vmw_resource_relocation member size */
500         BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
501         BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
502
503         list_for_each_entry(rel, list, head) {
504                 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
505                 switch (rel->rel_type) {
506                 case vmw_res_rel_normal:
507                         *addr = rel->res->id;
508                         break;
509                 case vmw_res_rel_nop:
510                         *addr = SVGA_3D_CMD_NOP;
511                         break;
512                 default:
513                         if (rel->res->id == -1)
514                                 *addr = SVGA_3D_CMD_NOP;
515                         break;
516                 }
517         }
518 }
519
520 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
521                            struct vmw_sw_context *sw_context,
522                            SVGA3dCmdHeader *header)
523 {
524         return -EINVAL;
525 }
526
527 static int vmw_cmd_ok(struct vmw_private *dev_priv,
528                       struct vmw_sw_context *sw_context,
529                       SVGA3dCmdHeader *header)
530 {
531         return 0;
532 }
533
534 /**
535  * vmw_bo_to_validate_list - add a bo to a validate list
536  *
537  * @sw_context: The software context used for this command submission batch.
538  * @bo: The buffer object to add.
539  * @validate_as_mob: Validate this buffer as a MOB.
540  * @p_val_node: If non-NULL Will be updated with the validate node number
541  * on return.
542  *
543  * Returns -EINVAL if the limit of number of buffer objects per command
544  * submission is reached.
545  */
546 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
547                                    struct vmw_buffer_object *vbo,
548                                    bool validate_as_mob,
549                                    uint32_t *p_val_node)
550 {
551         uint32_t val_node;
552         struct vmw_validate_buffer *vval_buf;
553         struct ttm_validate_buffer *val_buf;
554         struct drm_hash_item *hash;
555         int ret;
556
557         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
558                                     &hash) == 0)) {
559                 vval_buf = container_of(hash, struct vmw_validate_buffer,
560                                         hash);
561                 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
562                         DRM_ERROR("Inconsistent buffer usage.\n");
563                         return -EINVAL;
564                 }
565                 val_buf = &vval_buf->base;
566                 val_node = vval_buf - sw_context->val_bufs;
567         } else {
568                 val_node = sw_context->cur_val_buf;
569                 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
570                         DRM_ERROR("Max number of DMA buffers per submission "
571                                   "exceeded.\n");
572                         return -EINVAL;
573                 }
574                 vval_buf = &sw_context->val_bufs[val_node];
575                 vval_buf->hash.key = (unsigned long) vbo;
576                 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
577                 if (unlikely(ret != 0)) {
578                         DRM_ERROR("Failed to initialize a buffer validation "
579                                   "entry.\n");
580                         return ret;
581                 }
582                 ++sw_context->cur_val_buf;
583                 val_buf = &vval_buf->base;
584                 val_buf->bo = ttm_bo_reference(&vbo->base);
585                 val_buf->shared = false;
586                 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
587                 vval_buf->validate_as_mob = validate_as_mob;
588         }
589
590         if (p_val_node)
591                 *p_val_node = val_node;
592
593         return 0;
594 }
595
596 /**
597  * vmw_resources_reserve - Reserve all resources on the sw_context's
598  * resource list.
599  *
600  * @sw_context: Pointer to the software context.
601  *
602  * Note that since vmware's command submission currently is protected by
603  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
604  * since only a single thread at once will attempt this.
605  */
606 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
607 {
608         struct vmw_resource_val_node *val;
609         int ret = 0;
610
611         list_for_each_entry(val, &sw_context->resource_list, head) {
612                 struct vmw_resource *res = val->res;
613
614                 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
615                 if (unlikely(ret != 0))
616                         return ret;
617
618                 if (res->backup) {
619                         struct vmw_buffer_object *vbo = res->backup;
620
621                         ret = vmw_bo_to_validate_list
622                                 (sw_context, vbo,
623                                  vmw_resource_needs_backup(res), NULL);
624
625                         if (unlikely(ret != 0))
626                                 return ret;
627                 }
628         }
629
630         if (sw_context->dx_query_mob) {
631                 struct vmw_buffer_object *expected_dx_query_mob;
632
633                 expected_dx_query_mob =
634                         vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
635                 if (expected_dx_query_mob &&
636                     expected_dx_query_mob != sw_context->dx_query_mob) {
637                         ret = -EINVAL;
638                 }
639         }
640
641         return ret;
642 }
643
644 /**
645  * vmw_resources_validate - Validate all resources on the sw_context's
646  * resource list.
647  *
648  * @sw_context: Pointer to the software context.
649  *
650  * Before this function is called, all resource backup buffers must have
651  * been validated.
652  */
653 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
654 {
655         struct vmw_resource_val_node *val;
656         int ret;
657
658         list_for_each_entry(val, &sw_context->resource_list, head) {
659                 struct vmw_resource *res = val->res;
660                 struct vmw_buffer_object *backup = res->backup;
661
662                 ret = vmw_resource_validate(res);
663                 if (unlikely(ret != 0)) {
664                         if (ret != -ERESTARTSYS)
665                                 DRM_ERROR("Failed to validate resource.\n");
666                         return ret;
667                 }
668
669                 /* Check if the resource switched backup buffer */
670                 if (backup && res->backup && (backup != res->backup)) {
671                         struct vmw_buffer_object *vbo = res->backup;
672
673                         ret = vmw_bo_to_validate_list
674                                 (sw_context, vbo,
675                                  vmw_resource_needs_backup(res), NULL);
676                         if (ret) {
677                                 ttm_bo_unreserve(&vbo->base);
678                                 return ret;
679                         }
680                 }
681         }
682         return 0;
683 }
684
685 /**
686  * vmw_cmd_res_reloc_add - Add a resource to a software context's
687  * relocation- and validation lists.
688  *
689  * @dev_priv: Pointer to a struct vmw_private identifying the device.
690  * @sw_context: Pointer to the software context.
691  * @id_loc: Pointer to where the id that needs translation is located.
692  * @res: Valid pointer to a struct vmw_resource.
693  * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
694  * used for this resource is returned here.
695  */
696 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
697                                  struct vmw_sw_context *sw_context,
698                                  uint32_t *id_loc,
699                                  struct vmw_resource *res,
700                                  struct vmw_resource_val_node **p_val)
701 {
702         int ret;
703         struct vmw_resource_val_node *node;
704
705         *p_val = NULL;
706         ret = vmw_resource_relocation_add(&sw_context->res_relocations,
707                                           res,
708                                           vmw_ptr_diff(sw_context->buf_start,
709                                                        id_loc),
710                                           vmw_res_rel_normal);
711         if (unlikely(ret != 0))
712                 return ret;
713
714         ret = vmw_resource_val_add(sw_context, res, &node);
715         if (unlikely(ret != 0))
716                 return ret;
717
718         if (p_val)
719                 *p_val = node;
720
721         return 0;
722 }
723
724
725 /**
726  * vmw_cmd_res_check - Check that a resource is present and if so, put it
727  * on the resource validate list unless it's already there.
728  *
729  * @dev_priv: Pointer to a device private structure.
730  * @sw_context: Pointer to the software context.
731  * @res_type: Resource type.
732  * @converter: User-space visisble type specific information.
733  * @id_loc: Pointer to the location in the command buffer currently being
734  * parsed from where the user-space resource id handle is located.
735  * @p_val: Pointer to pointer to resource validalidation node. Populated
736  * on exit.
737  */
738 static int
739 vmw_cmd_res_check(struct vmw_private *dev_priv,
740                   struct vmw_sw_context *sw_context,
741                   enum vmw_res_type res_type,
742                   const struct vmw_user_resource_conv *converter,
743                   uint32_t *id_loc,
744                   struct vmw_resource_val_node **p_val)
745 {
746         struct vmw_res_cache_entry *rcache =
747                 &sw_context->res_cache[res_type];
748         struct vmw_resource *res;
749         struct vmw_resource_val_node *node;
750         int ret;
751
752         if (*id_loc == SVGA3D_INVALID_ID) {
753                 if (p_val)
754                         *p_val = NULL;
755                 if (res_type == vmw_res_context) {
756                         DRM_ERROR("Illegal context invalid id.\n");
757                         return -EINVAL;
758                 }
759                 return 0;
760         }
761
762         /*
763          * Fastpath in case of repeated commands referencing the same
764          * resource
765          */
766
767         if (likely(rcache->valid && *id_loc == rcache->handle)) {
768                 const struct vmw_resource *res = rcache->res;
769
770                 rcache->node->first_usage = false;
771                 if (p_val)
772                         *p_val = rcache->node;
773
774                 return vmw_resource_relocation_add
775                         (&sw_context->res_relocations, res,
776                          vmw_ptr_diff(sw_context->buf_start, id_loc),
777                          vmw_res_rel_normal);
778         }
779
780         ret = vmw_user_resource_lookup_handle(dev_priv,
781                                               sw_context->fp->tfile,
782                                               *id_loc,
783                                               converter,
784                                               &res);
785         if (unlikely(ret != 0)) {
786                 DRM_ERROR("Could not find or use resource 0x%08x.\n",
787                           (unsigned) *id_loc);
788                 dump_stack();
789                 return ret;
790         }
791
792         rcache->valid = true;
793         rcache->res = res;
794         rcache->handle = *id_loc;
795
796         ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
797                                     res, &node);
798         if (unlikely(ret != 0))
799                 goto out_no_reloc;
800
801         rcache->node = node;
802         if (p_val)
803                 *p_val = node;
804         vmw_resource_unreference(&res);
805         return 0;
806
807 out_no_reloc:
808         BUG_ON(sw_context->error_resource != NULL);
809         sw_context->error_resource = res;
810
811         return ret;
812 }
813
814 /**
815  * vmw_rebind_dx_query - Rebind DX query associated with the context
816  *
817  * @ctx_res: context the query belongs to
818  *
819  * This function assumes binding_mutex is held.
820  */
821 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
822 {
823         struct vmw_private *dev_priv = ctx_res->dev_priv;
824         struct vmw_buffer_object *dx_query_mob;
825         struct {
826                 SVGA3dCmdHeader header;
827                 SVGA3dCmdDXBindAllQuery body;
828         } *cmd;
829
830
831         dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
832
833         if (!dx_query_mob || dx_query_mob->dx_query_ctx)
834                 return 0;
835
836         cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
837
838         if (cmd == NULL) {
839                 DRM_ERROR("Failed to rebind queries.\n");
840                 return -ENOMEM;
841         }
842
843         cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
844         cmd->header.size = sizeof(cmd->body);
845         cmd->body.cid = ctx_res->id;
846         cmd->body.mobid = dx_query_mob->base.mem.start;
847         vmw_fifo_commit(dev_priv, sizeof(*cmd));
848
849         vmw_context_bind_dx_query(ctx_res, dx_query_mob);
850
851         return 0;
852 }
853
854 /**
855  * vmw_rebind_contexts - Rebind all resources previously bound to
856  * referenced contexts.
857  *
858  * @sw_context: Pointer to the software context.
859  *
860  * Rebind context binding points that have been scrubbed because of eviction.
861  */
862 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
863 {
864         struct vmw_resource_val_node *val;
865         int ret;
866
867         list_for_each_entry(val, &sw_context->resource_list, head) {
868                 if (unlikely(!val->staged_bindings))
869                         break;
870
871                 ret = vmw_binding_rebind_all
872                         (vmw_context_binding_state(val->res));
873                 if (unlikely(ret != 0)) {
874                         if (ret != -ERESTARTSYS)
875                                 DRM_ERROR("Failed to rebind context.\n");
876                         return ret;
877                 }
878
879                 ret = vmw_rebind_all_dx_query(val->res);
880                 if (ret != 0)
881                         return ret;
882         }
883
884         return 0;
885 }
886
887 /**
888  * vmw_view_bindings_add - Add an array of view bindings to a context
889  * binding state tracker.
890  *
891  * @sw_context: The execbuf state used for this command.
892  * @view_type: View type for the bindings.
893  * @binding_type: Binding type for the bindings.
894  * @shader_slot: The shader slot to user for the bindings.
895  * @view_ids: Array of view ids to be bound.
896  * @num_views: Number of view ids in @view_ids.
897  * @first_slot: The binding slot to be used for the first view id in @view_ids.
898  */
899 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
900                                  enum vmw_view_type view_type,
901                                  enum vmw_ctx_binding_type binding_type,
902                                  uint32 shader_slot,
903                                  uint32 view_ids[], u32 num_views,
904                                  u32 first_slot)
905 {
906         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
907         struct vmw_cmdbuf_res_manager *man;
908         u32 i;
909         int ret;
910
911         if (!ctx_node) {
912                 DRM_ERROR("DX Context not set.\n");
913                 return -EINVAL;
914         }
915
916         man = sw_context->man;
917         for (i = 0; i < num_views; ++i) {
918                 struct vmw_ctx_bindinfo_view binding;
919                 struct vmw_resource *view = NULL;
920
921                 if (view_ids[i] != SVGA3D_INVALID_ID) {
922                         view = vmw_view_lookup(man, view_type, view_ids[i]);
923                         if (IS_ERR(view)) {
924                                 DRM_ERROR("View not found.\n");
925                                 return PTR_ERR(view);
926                         }
927
928                         ret = vmw_view_res_val_add(sw_context, view);
929                         if (ret) {
930                                 DRM_ERROR("Could not add view to "
931                                           "validation list.\n");
932                                 vmw_resource_unreference(&view);
933                                 return ret;
934                         }
935                 }
936                 binding.bi.ctx = ctx_node->res;
937                 binding.bi.res = view;
938                 binding.bi.bt = binding_type;
939                 binding.shader_slot = shader_slot;
940                 binding.slot = first_slot + i;
941                 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
942                                 shader_slot, binding.slot);
943                 if (view)
944                         vmw_resource_unreference(&view);
945         }
946
947         return 0;
948 }
949
950 /**
951  * vmw_cmd_cid_check - Check a command header for valid context information.
952  *
953  * @dev_priv: Pointer to a device private structure.
954  * @sw_context: Pointer to the software context.
955  * @header: A command header with an embedded user-space context handle.
956  *
957  * Convenience function: Call vmw_cmd_res_check with the user-space context
958  * handle embedded in @header.
959  */
960 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
961                              struct vmw_sw_context *sw_context,
962                              SVGA3dCmdHeader *header)
963 {
964         struct vmw_cid_cmd {
965                 SVGA3dCmdHeader header;
966                 uint32_t cid;
967         } *cmd;
968
969         cmd = container_of(header, struct vmw_cid_cmd, header);
970         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
971                                  user_context_converter, &cmd->cid, NULL);
972 }
973
974 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
975                                            struct vmw_sw_context *sw_context,
976                                            SVGA3dCmdHeader *header)
977 {
978         struct vmw_sid_cmd {
979                 SVGA3dCmdHeader header;
980                 SVGA3dCmdSetRenderTarget body;
981         } *cmd;
982         struct vmw_resource_val_node *ctx_node;
983         struct vmw_resource_val_node *res_node;
984         int ret;
985
986         cmd = container_of(header, struct vmw_sid_cmd, header);
987
988         if (cmd->body.type >= SVGA3D_RT_MAX) {
989                 DRM_ERROR("Illegal render target type %u.\n",
990                           (unsigned) cmd->body.type);
991                 return -EINVAL;
992         }
993
994         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
995                                 user_context_converter, &cmd->body.cid,
996                                 &ctx_node);
997         if (unlikely(ret != 0))
998                 return ret;
999
1000         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1001                                 user_surface_converter,
1002                                 &cmd->body.target.sid, &res_node);
1003         if (unlikely(ret != 0))
1004                 return ret;
1005
1006         if (dev_priv->has_mob) {
1007                 struct vmw_ctx_bindinfo_view binding;
1008
1009                 binding.bi.ctx = ctx_node->res;
1010                 binding.bi.res = res_node ? res_node->res : NULL;
1011                 binding.bi.bt = vmw_ctx_binding_rt;
1012                 binding.slot = cmd->body.type;
1013                 vmw_binding_add(ctx_node->staged_bindings,
1014                                 &binding.bi, 0, binding.slot);
1015         }
1016
1017         return 0;
1018 }
1019
1020 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
1021                                       struct vmw_sw_context *sw_context,
1022                                       SVGA3dCmdHeader *header)
1023 {
1024         struct vmw_sid_cmd {
1025                 SVGA3dCmdHeader header;
1026                 SVGA3dCmdSurfaceCopy body;
1027         } *cmd;
1028         int ret;
1029
1030         cmd = container_of(header, struct vmw_sid_cmd, header);
1031
1032         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1033                                 user_surface_converter,
1034                                 &cmd->body.src.sid, NULL);
1035         if (ret)
1036                 return ret;
1037
1038         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1039                                  user_surface_converter,
1040                                  &cmd->body.dest.sid, NULL);
1041 }
1042
1043 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
1044                                       struct vmw_sw_context *sw_context,
1045                                       SVGA3dCmdHeader *header)
1046 {
1047         struct {
1048                 SVGA3dCmdHeader header;
1049                 SVGA3dCmdDXBufferCopy body;
1050         } *cmd;
1051         int ret;
1052
1053         cmd = container_of(header, typeof(*cmd), header);
1054         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1055                                 user_surface_converter,
1056                                 &cmd->body.src, NULL);
1057         if (ret != 0)
1058                 return ret;
1059
1060         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1061                                  user_surface_converter,
1062                                  &cmd->body.dest, NULL);
1063 }
1064
1065 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1066                                    struct vmw_sw_context *sw_context,
1067                                    SVGA3dCmdHeader *header)
1068 {
1069         struct {
1070                 SVGA3dCmdHeader header;
1071                 SVGA3dCmdDXPredCopyRegion body;
1072         } *cmd;
1073         int ret;
1074
1075         cmd = container_of(header, typeof(*cmd), header);
1076         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1077                                 user_surface_converter,
1078                                 &cmd->body.srcSid, NULL);
1079         if (ret != 0)
1080                 return ret;
1081
1082         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1083                                  user_surface_converter,
1084                                  &cmd->body.dstSid, NULL);
1085 }
1086
1087 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1088                                      struct vmw_sw_context *sw_context,
1089                                      SVGA3dCmdHeader *header)
1090 {
1091         struct vmw_sid_cmd {
1092                 SVGA3dCmdHeader header;
1093                 SVGA3dCmdSurfaceStretchBlt body;
1094         } *cmd;
1095         int ret;
1096
1097         cmd = container_of(header, struct vmw_sid_cmd, header);
1098         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1099                                 user_surface_converter,
1100                                 &cmd->body.src.sid, NULL);
1101         if (unlikely(ret != 0))
1102                 return ret;
1103         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1104                                  user_surface_converter,
1105                                  &cmd->body.dest.sid, NULL);
1106 }
1107
1108 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1109                                          struct vmw_sw_context *sw_context,
1110                                          SVGA3dCmdHeader *header)
1111 {
1112         struct vmw_sid_cmd {
1113                 SVGA3dCmdHeader header;
1114                 SVGA3dCmdBlitSurfaceToScreen body;
1115         } *cmd;
1116
1117         cmd = container_of(header, struct vmw_sid_cmd, header);
1118
1119         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1120                                  user_surface_converter,
1121                                  &cmd->body.srcImage.sid, NULL);
1122 }
1123
1124 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1125                                  struct vmw_sw_context *sw_context,
1126                                  SVGA3dCmdHeader *header)
1127 {
1128         struct vmw_sid_cmd {
1129                 SVGA3dCmdHeader header;
1130                 SVGA3dCmdPresent body;
1131         } *cmd;
1132
1133
1134         cmd = container_of(header, struct vmw_sid_cmd, header);
1135
1136         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1137                                  user_surface_converter, &cmd->body.sid,
1138                                  NULL);
1139 }
1140
1141 /**
1142  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1143  *
1144  * @dev_priv: The device private structure.
1145  * @new_query_bo: The new buffer holding query results.
1146  * @sw_context: The software context used for this command submission.
1147  *
1148  * This function checks whether @new_query_bo is suitable for holding
1149  * query results, and if another buffer currently is pinned for query
1150  * results. If so, the function prepares the state of @sw_context for
1151  * switching pinned buffers after successful submission of the current
1152  * command batch.
1153  */
1154 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1155                                        struct vmw_buffer_object *new_query_bo,
1156                                        struct vmw_sw_context *sw_context)
1157 {
1158         struct vmw_res_cache_entry *ctx_entry =
1159                 &sw_context->res_cache[vmw_res_context];
1160         int ret;
1161
1162         BUG_ON(!ctx_entry->valid);
1163         sw_context->last_query_ctx = ctx_entry->res;
1164
1165         if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1166
1167                 if (unlikely(new_query_bo->base.num_pages > 4)) {
1168                         DRM_ERROR("Query buffer too large.\n");
1169                         return -EINVAL;
1170                 }
1171
1172                 if (unlikely(sw_context->cur_query_bo != NULL)) {
1173                         sw_context->needs_post_query_barrier = true;
1174                         ret = vmw_bo_to_validate_list(sw_context,
1175                                                       sw_context->cur_query_bo,
1176                                                       dev_priv->has_mob, NULL);
1177                         if (unlikely(ret != 0))
1178                                 return ret;
1179                 }
1180                 sw_context->cur_query_bo = new_query_bo;
1181
1182                 ret = vmw_bo_to_validate_list(sw_context,
1183                                               dev_priv->dummy_query_bo,
1184                                               dev_priv->has_mob, NULL);
1185                 if (unlikely(ret != 0))
1186                         return ret;
1187
1188         }
1189
1190         return 0;
1191 }
1192
1193
1194 /**
1195  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1196  *
1197  * @dev_priv: The device private structure.
1198  * @sw_context: The software context used for this command submission batch.
1199  *
1200  * This function will check if we're switching query buffers, and will then,
1201  * issue a dummy occlusion query wait used as a query barrier. When the fence
1202  * object following that query wait has signaled, we are sure that all
1203  * preceding queries have finished, and the old query buffer can be unpinned.
1204  * However, since both the new query buffer and the old one are fenced with
1205  * that fence, we can do an asynchronus unpin now, and be sure that the
1206  * old query buffer won't be moved until the fence has signaled.
1207  *
1208  * As mentioned above, both the new - and old query buffers need to be fenced
1209  * using a sequence emitted *after* calling this function.
1210  */
1211 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1212                                      struct vmw_sw_context *sw_context)
1213 {
1214         /*
1215          * The validate list should still hold references to all
1216          * contexts here.
1217          */
1218
1219         if (sw_context->needs_post_query_barrier) {
1220                 struct vmw_res_cache_entry *ctx_entry =
1221                         &sw_context->res_cache[vmw_res_context];
1222                 struct vmw_resource *ctx;
1223                 int ret;
1224
1225                 BUG_ON(!ctx_entry->valid);
1226                 ctx = ctx_entry->res;
1227
1228                 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1229
1230                 if (unlikely(ret != 0))
1231                         DRM_ERROR("Out of fifo space for dummy query.\n");
1232         }
1233
1234         if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1235                 if (dev_priv->pinned_bo) {
1236                         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1237                         vmw_bo_unreference(&dev_priv->pinned_bo);
1238                 }
1239
1240                 if (!sw_context->needs_post_query_barrier) {
1241                         vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1242
1243                         /*
1244                          * We pin also the dummy_query_bo buffer so that we
1245                          * don't need to validate it when emitting
1246                          * dummy queries in context destroy paths.
1247                          */
1248
1249                         if (!dev_priv->dummy_query_bo_pinned) {
1250                                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1251                                                     true);
1252                                 dev_priv->dummy_query_bo_pinned = true;
1253                         }
1254
1255                         BUG_ON(sw_context->last_query_ctx == NULL);
1256                         dev_priv->query_cid = sw_context->last_query_ctx->id;
1257                         dev_priv->query_cid_valid = true;
1258                         dev_priv->pinned_bo =
1259                                 vmw_bo_reference(sw_context->cur_query_bo);
1260                 }
1261         }
1262 }
1263
1264 /**
1265  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1266  * handle to a MOB id.
1267  *
1268  * @dev_priv: Pointer to a device private structure.
1269  * @sw_context: The software context used for this command batch validation.
1270  * @id: Pointer to the user-space handle to be translated.
1271  * @vmw_bo_p: Points to a location that, on successful return will carry
1272  * a reference-counted pointer to the DMA buffer identified by the
1273  * user-space handle in @id.
1274  *
1275  * This function saves information needed to translate a user-space buffer
1276  * handle to a MOB id. The translation does not take place immediately, but
1277  * during a call to vmw_apply_relocations(). This function builds a relocation
1278  * list and a list of buffers to validate. The former needs to be freed using
1279  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1280  * needs to be freed using vmw_clear_validations.
1281  */
1282 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1283                                  struct vmw_sw_context *sw_context,
1284                                  SVGAMobId *id,
1285                                  struct vmw_buffer_object **vmw_bo_p)
1286 {
1287         struct vmw_buffer_object *vmw_bo = NULL;
1288         uint32_t handle = *id;
1289         struct vmw_relocation *reloc;
1290         int ret;
1291
1292         ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
1293         if (unlikely(ret != 0)) {
1294                 DRM_ERROR("Could not find or use MOB buffer.\n");
1295                 ret = -EINVAL;
1296                 goto out_no_reloc;
1297         }
1298
1299         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1300                 DRM_ERROR("Max number relocations per submission"
1301                           " exceeded\n");
1302                 ret = -EINVAL;
1303                 goto out_no_reloc;
1304         }
1305
1306         reloc = &sw_context->relocs[sw_context->cur_reloc++];
1307         reloc->mob_loc = id;
1308         reloc->location = NULL;
1309
1310         ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
1311         if (unlikely(ret != 0))
1312                 goto out_no_reloc;
1313
1314         *vmw_bo_p = vmw_bo;
1315         return 0;
1316
1317 out_no_reloc:
1318         vmw_bo_unreference(&vmw_bo);
1319         *vmw_bo_p = NULL;
1320         return ret;
1321 }
1322
1323 /**
1324  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1325  * handle to a valid SVGAGuestPtr
1326  *
1327  * @dev_priv: Pointer to a device private structure.
1328  * @sw_context: The software context used for this command batch validation.
1329  * @ptr: Pointer to the user-space handle to be translated.
1330  * @vmw_bo_p: Points to a location that, on successful return will carry
1331  * a reference-counted pointer to the DMA buffer identified by the
1332  * user-space handle in @id.
1333  *
1334  * This function saves information needed to translate a user-space buffer
1335  * handle to a valid SVGAGuestPtr. The translation does not take place
1336  * immediately, but during a call to vmw_apply_relocations().
1337  * This function builds a relocation list and a list of buffers to validate.
1338  * The former needs to be freed using either vmw_apply_relocations() or
1339  * vmw_free_relocations(). The latter needs to be freed using
1340  * vmw_clear_validations.
1341  */
1342 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1343                                    struct vmw_sw_context *sw_context,
1344                                    SVGAGuestPtr *ptr,
1345                                    struct vmw_buffer_object **vmw_bo_p)
1346 {
1347         struct vmw_buffer_object *vmw_bo = NULL;
1348         uint32_t handle = ptr->gmrId;
1349         struct vmw_relocation *reloc;
1350         int ret;
1351
1352         ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
1353         if (unlikely(ret != 0)) {
1354                 DRM_ERROR("Could not find or use GMR region.\n");
1355                 ret = -EINVAL;
1356                 goto out_no_reloc;
1357         }
1358
1359         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1360                 DRM_ERROR("Max number relocations per submission"
1361                           " exceeded\n");
1362                 ret = -EINVAL;
1363                 goto out_no_reloc;
1364         }
1365
1366         reloc = &sw_context->relocs[sw_context->cur_reloc++];
1367         reloc->location = ptr;
1368
1369         ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
1370         if (unlikely(ret != 0))
1371                 goto out_no_reloc;
1372
1373         *vmw_bo_p = vmw_bo;
1374         return 0;
1375
1376 out_no_reloc:
1377         vmw_bo_unreference(&vmw_bo);
1378         *vmw_bo_p = NULL;
1379         return ret;
1380 }
1381
1382
1383
1384 /**
1385  * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1386  *
1387  * @dev_priv: Pointer to a device private struct.
1388  * @sw_context: The software context used for this command submission.
1389  * @header: Pointer to the command header in the command stream.
1390  *
1391  * This function adds the new query into the query COTABLE
1392  */
1393 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1394                                    struct vmw_sw_context *sw_context,
1395                                    SVGA3dCmdHeader *header)
1396 {
1397         struct vmw_dx_define_query_cmd {
1398                 SVGA3dCmdHeader header;
1399                 SVGA3dCmdDXDefineQuery q;
1400         } *cmd;
1401
1402         int    ret;
1403         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1404         struct vmw_resource *cotable_res;
1405
1406
1407         if (ctx_node == NULL) {
1408                 DRM_ERROR("DX Context not set for query.\n");
1409                 return -EINVAL;
1410         }
1411
1412         cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1413
1414         if (cmd->q.type <  SVGA3D_QUERYTYPE_MIN ||
1415             cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1416                 return -EINVAL;
1417
1418         cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1419         ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1420         vmw_resource_unreference(&cotable_res);
1421
1422         return ret;
1423 }
1424
1425
1426
1427 /**
1428  * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1429  *
1430  * @dev_priv: Pointer to a device private struct.
1431  * @sw_context: The software context used for this command submission.
1432  * @header: Pointer to the command header in the command stream.
1433  *
1434  * The query bind operation will eventually associate the query ID
1435  * with its backing MOB.  In this function, we take the user mode
1436  * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1437  * kernel mode equivalent.
1438  */
1439 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1440                                  struct vmw_sw_context *sw_context,
1441                                  SVGA3dCmdHeader *header)
1442 {
1443         struct vmw_dx_bind_query_cmd {
1444                 SVGA3dCmdHeader header;
1445                 SVGA3dCmdDXBindQuery q;
1446         } *cmd;
1447
1448         struct vmw_buffer_object *vmw_bo;
1449         int    ret;
1450
1451
1452         cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1453
1454         /*
1455          * Look up the buffer pointed to by q.mobid, put it on the relocation
1456          * list so its kernel mode MOB ID can be filled in later
1457          */
1458         ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1459                                     &vmw_bo);
1460
1461         if (ret != 0)
1462                 return ret;
1463
1464         sw_context->dx_query_mob = vmw_bo;
1465         sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1466
1467         vmw_bo_unreference(&vmw_bo);
1468
1469         return ret;
1470 }
1471
1472
1473
1474 /**
1475  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
1476  *
1477  * @dev_priv: Pointer to a device private struct.
1478  * @sw_context: The software context used for this command submission.
1479  * @header: Pointer to the command header in the command stream.
1480  */
1481 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1482                                   struct vmw_sw_context *sw_context,
1483                                   SVGA3dCmdHeader *header)
1484 {
1485         struct vmw_begin_gb_query_cmd {
1486                 SVGA3dCmdHeader header;
1487                 SVGA3dCmdBeginGBQuery q;
1488         } *cmd;
1489
1490         cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1491                            header);
1492
1493         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1494                                  user_context_converter, &cmd->q.cid,
1495                                  NULL);
1496 }
1497
1498 /**
1499  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1500  *
1501  * @dev_priv: Pointer to a device private struct.
1502  * @sw_context: The software context used for this command submission.
1503  * @header: Pointer to the command header in the command stream.
1504  */
1505 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1506                                struct vmw_sw_context *sw_context,
1507                                SVGA3dCmdHeader *header)
1508 {
1509         struct vmw_begin_query_cmd {
1510                 SVGA3dCmdHeader header;
1511                 SVGA3dCmdBeginQuery q;
1512         } *cmd;
1513
1514         cmd = container_of(header, struct vmw_begin_query_cmd,
1515                            header);
1516
1517         if (unlikely(dev_priv->has_mob)) {
1518                 struct {
1519                         SVGA3dCmdHeader header;
1520                         SVGA3dCmdBeginGBQuery q;
1521                 } gb_cmd;
1522
1523                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1524
1525                 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1526                 gb_cmd.header.size = cmd->header.size;
1527                 gb_cmd.q.cid = cmd->q.cid;
1528                 gb_cmd.q.type = cmd->q.type;
1529
1530                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1531                 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1532         }
1533
1534         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1535                                  user_context_converter, &cmd->q.cid,
1536                                  NULL);
1537 }
1538
1539 /**
1540  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1541  *
1542  * @dev_priv: Pointer to a device private struct.
1543  * @sw_context: The software context used for this command submission.
1544  * @header: Pointer to the command header in the command stream.
1545  */
1546 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1547                                 struct vmw_sw_context *sw_context,
1548                                 SVGA3dCmdHeader *header)
1549 {
1550         struct vmw_buffer_object *vmw_bo;
1551         struct vmw_query_cmd {
1552                 SVGA3dCmdHeader header;
1553                 SVGA3dCmdEndGBQuery q;
1554         } *cmd;
1555         int ret;
1556
1557         cmd = container_of(header, struct vmw_query_cmd, header);
1558         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1559         if (unlikely(ret != 0))
1560                 return ret;
1561
1562         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1563                                     &cmd->q.mobid,
1564                                     &vmw_bo);
1565         if (unlikely(ret != 0))
1566                 return ret;
1567
1568         ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1569
1570         vmw_bo_unreference(&vmw_bo);
1571         return ret;
1572 }
1573
1574 /**
1575  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1576  *
1577  * @dev_priv: Pointer to a device private struct.
1578  * @sw_context: The software context used for this command submission.
1579  * @header: Pointer to the command header in the command stream.
1580  */
1581 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1582                              struct vmw_sw_context *sw_context,
1583                              SVGA3dCmdHeader *header)
1584 {
1585         struct vmw_buffer_object *vmw_bo;
1586         struct vmw_query_cmd {
1587                 SVGA3dCmdHeader header;
1588                 SVGA3dCmdEndQuery q;
1589         } *cmd;
1590         int ret;
1591
1592         cmd = container_of(header, struct vmw_query_cmd, header);
1593         if (dev_priv->has_mob) {
1594                 struct {
1595                         SVGA3dCmdHeader header;
1596                         SVGA3dCmdEndGBQuery q;
1597                 } gb_cmd;
1598
1599                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1600
1601                 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1602                 gb_cmd.header.size = cmd->header.size;
1603                 gb_cmd.q.cid = cmd->q.cid;
1604                 gb_cmd.q.type = cmd->q.type;
1605                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1606                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1607
1608                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1609                 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1610         }
1611
1612         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1613         if (unlikely(ret != 0))
1614                 return ret;
1615
1616         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1617                                       &cmd->q.guestResult,
1618                                       &vmw_bo);
1619         if (unlikely(ret != 0))
1620                 return ret;
1621
1622         ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1623
1624         vmw_bo_unreference(&vmw_bo);
1625         return ret;
1626 }
1627
1628 /**
1629  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1630  *
1631  * @dev_priv: Pointer to a device private struct.
1632  * @sw_context: The software context used for this command submission.
1633  * @header: Pointer to the command header in the command stream.
1634  */
1635 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1636                                  struct vmw_sw_context *sw_context,
1637                                  SVGA3dCmdHeader *header)
1638 {
1639         struct vmw_buffer_object *vmw_bo;
1640         struct vmw_query_cmd {
1641                 SVGA3dCmdHeader header;
1642                 SVGA3dCmdWaitForGBQuery q;
1643         } *cmd;
1644         int ret;
1645
1646         cmd = container_of(header, struct vmw_query_cmd, header);
1647         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1648         if (unlikely(ret != 0))
1649                 return ret;
1650
1651         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1652                                     &cmd->q.mobid,
1653                                     &vmw_bo);
1654         if (unlikely(ret != 0))
1655                 return ret;
1656
1657         vmw_bo_unreference(&vmw_bo);
1658         return 0;
1659 }
1660
1661 /**
1662  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1663  *
1664  * @dev_priv: Pointer to a device private struct.
1665  * @sw_context: The software context used for this command submission.
1666  * @header: Pointer to the command header in the command stream.
1667  */
1668 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1669                               struct vmw_sw_context *sw_context,
1670                               SVGA3dCmdHeader *header)
1671 {
1672         struct vmw_buffer_object *vmw_bo;
1673         struct vmw_query_cmd {
1674                 SVGA3dCmdHeader header;
1675                 SVGA3dCmdWaitForQuery q;
1676         } *cmd;
1677         int ret;
1678
1679         cmd = container_of(header, struct vmw_query_cmd, header);
1680         if (dev_priv->has_mob) {
1681                 struct {
1682                         SVGA3dCmdHeader header;
1683                         SVGA3dCmdWaitForGBQuery q;
1684                 } gb_cmd;
1685
1686                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1687
1688                 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1689                 gb_cmd.header.size = cmd->header.size;
1690                 gb_cmd.q.cid = cmd->q.cid;
1691                 gb_cmd.q.type = cmd->q.type;
1692                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1693                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1694
1695                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1696                 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1697         }
1698
1699         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1700         if (unlikely(ret != 0))
1701                 return ret;
1702
1703         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1704                                       &cmd->q.guestResult,
1705                                       &vmw_bo);
1706         if (unlikely(ret != 0))
1707                 return ret;
1708
1709         vmw_bo_unreference(&vmw_bo);
1710         return 0;
1711 }
1712
1713 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1714                        struct vmw_sw_context *sw_context,
1715                        SVGA3dCmdHeader *header)
1716 {
1717         struct vmw_buffer_object *vmw_bo = NULL;
1718         struct vmw_surface *srf = NULL;
1719         struct vmw_dma_cmd {
1720                 SVGA3dCmdHeader header;
1721                 SVGA3dCmdSurfaceDMA dma;
1722         } *cmd;
1723         int ret;
1724         SVGA3dCmdSurfaceDMASuffix *suffix;
1725         uint32_t bo_size;
1726
1727         cmd = container_of(header, struct vmw_dma_cmd, header);
1728         suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1729                                                header->size - sizeof(*suffix));
1730
1731         /* Make sure device and verifier stays in sync. */
1732         if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1733                 DRM_ERROR("Invalid DMA suffix size.\n");
1734                 return -EINVAL;
1735         }
1736
1737         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1738                                       &cmd->dma.guest.ptr,
1739                                       &vmw_bo);
1740         if (unlikely(ret != 0))
1741                 return ret;
1742
1743         /* Make sure DMA doesn't cross BO boundaries. */
1744         bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1745         if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1746                 DRM_ERROR("Invalid DMA offset.\n");
1747                 return -EINVAL;
1748         }
1749
1750         bo_size -= cmd->dma.guest.ptr.offset;
1751         if (unlikely(suffix->maximumOffset > bo_size))
1752                 suffix->maximumOffset = bo_size;
1753
1754         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1755                                 user_surface_converter, &cmd->dma.host.sid,
1756                                 NULL);
1757         if (unlikely(ret != 0)) {
1758                 if (unlikely(ret != -ERESTARTSYS))
1759                         DRM_ERROR("could not find surface for DMA.\n");
1760                 goto out_no_surface;
1761         }
1762
1763         srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1764
1765         vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1766                              header);
1767
1768 out_no_surface:
1769         vmw_bo_unreference(&vmw_bo);
1770         return ret;
1771 }
1772
1773 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1774                         struct vmw_sw_context *sw_context,
1775                         SVGA3dCmdHeader *header)
1776 {
1777         struct vmw_draw_cmd {
1778                 SVGA3dCmdHeader header;
1779                 SVGA3dCmdDrawPrimitives body;
1780         } *cmd;
1781         SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1782                 (unsigned long)header + sizeof(*cmd));
1783         SVGA3dPrimitiveRange *range;
1784         uint32_t i;
1785         uint32_t maxnum;
1786         int ret;
1787
1788         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1789         if (unlikely(ret != 0))
1790                 return ret;
1791
1792         cmd = container_of(header, struct vmw_draw_cmd, header);
1793         maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1794
1795         if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1796                 DRM_ERROR("Illegal number of vertex declarations.\n");
1797                 return -EINVAL;
1798         }
1799
1800         for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1801                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1802                                         user_surface_converter,
1803                                         &decl->array.surfaceId, NULL);
1804                 if (unlikely(ret != 0))
1805                         return ret;
1806         }
1807
1808         maxnum = (header->size - sizeof(cmd->body) -
1809                   cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1810         if (unlikely(cmd->body.numRanges > maxnum)) {
1811                 DRM_ERROR("Illegal number of index ranges.\n");
1812                 return -EINVAL;
1813         }
1814
1815         range = (SVGA3dPrimitiveRange *) decl;
1816         for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1817                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1818                                         user_surface_converter,
1819                                         &range->indexArray.surfaceId, NULL);
1820                 if (unlikely(ret != 0))
1821                         return ret;
1822         }
1823         return 0;
1824 }
1825
1826
1827 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1828                              struct vmw_sw_context *sw_context,
1829                              SVGA3dCmdHeader *header)
1830 {
1831         struct vmw_tex_state_cmd {
1832                 SVGA3dCmdHeader header;
1833                 SVGA3dCmdSetTextureState state;
1834         } *cmd;
1835
1836         SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1837           ((unsigned long) header + header->size + sizeof(header));
1838         SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1839                 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1840         struct vmw_resource_val_node *ctx_node;
1841         struct vmw_resource_val_node *res_node;
1842         int ret;
1843
1844         cmd = container_of(header, struct vmw_tex_state_cmd,
1845                            header);
1846
1847         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1848                                 user_context_converter, &cmd->state.cid,
1849                                 &ctx_node);
1850         if (unlikely(ret != 0))
1851                 return ret;
1852
1853         for (; cur_state < last_state; ++cur_state) {
1854                 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1855                         continue;
1856
1857                 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1858                         DRM_ERROR("Illegal texture/sampler unit %u.\n",
1859                                   (unsigned) cur_state->stage);
1860                         return -EINVAL;
1861                 }
1862
1863                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1864                                         user_surface_converter,
1865                                         &cur_state->value, &res_node);
1866                 if (unlikely(ret != 0))
1867                         return ret;
1868
1869                 if (dev_priv->has_mob) {
1870                         struct vmw_ctx_bindinfo_tex binding;
1871
1872                         binding.bi.ctx = ctx_node->res;
1873                         binding.bi.res = res_node ? res_node->res : NULL;
1874                         binding.bi.bt = vmw_ctx_binding_tex;
1875                         binding.texture_stage = cur_state->stage;
1876                         vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1877                                         0, binding.texture_stage);
1878                 }
1879         }
1880
1881         return 0;
1882 }
1883
1884 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1885                                       struct vmw_sw_context *sw_context,
1886                                       void *buf)
1887 {
1888         struct vmw_buffer_object *vmw_bo;
1889         int ret;
1890
1891         struct {
1892                 uint32_t header;
1893                 SVGAFifoCmdDefineGMRFB body;
1894         } *cmd = buf;
1895
1896         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1897                                       &cmd->body.ptr,
1898                                       &vmw_bo);
1899         if (unlikely(ret != 0))
1900                 return ret;
1901
1902         vmw_bo_unreference(&vmw_bo);
1903
1904         return ret;
1905 }
1906
1907
1908 /**
1909  * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1910  * switching
1911  *
1912  * @dev_priv: Pointer to a device private struct.
1913  * @sw_context: The software context being used for this batch.
1914  * @val_node: The validation node representing the resource.
1915  * @buf_id: Pointer to the user-space backup buffer handle in the command
1916  * stream.
1917  * @backup_offset: Offset of backup into MOB.
1918  *
1919  * This function prepares for registering a switch of backup buffers
1920  * in the resource metadata just prior to unreserving. It's basically a wrapper
1921  * around vmw_cmd_res_switch_backup with a different interface.
1922  */
1923 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1924                                      struct vmw_sw_context *sw_context,
1925                                      struct vmw_resource_val_node *val_node,
1926                                      uint32_t *buf_id,
1927                                      unsigned long backup_offset)
1928 {
1929         struct vmw_buffer_object *dma_buf;
1930         int ret;
1931
1932         ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1933         if (ret)
1934                 return ret;
1935
1936         val_node->switching_backup = true;
1937         if (val_node->first_usage)
1938                 val_node->no_buffer_needed = true;
1939
1940         vmw_bo_unreference(&val_node->new_backup);
1941         val_node->new_backup = dma_buf;
1942         val_node->new_backup_offset = backup_offset;
1943
1944         return 0;
1945 }
1946
1947
1948 /**
1949  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1950  *
1951  * @dev_priv: Pointer to a device private struct.
1952  * @sw_context: The software context being used for this batch.
1953  * @res_type: The resource type.
1954  * @converter: Information about user-space binding for this resource type.
1955  * @res_id: Pointer to the user-space resource handle in the command stream.
1956  * @buf_id: Pointer to the user-space backup buffer handle in the command
1957  * stream.
1958  * @backup_offset: Offset of backup into MOB.
1959  *
1960  * This function prepares for registering a switch of backup buffers
1961  * in the resource metadata just prior to unreserving. It's basically a wrapper
1962  * around vmw_cmd_res_switch_backup with a different interface.
1963  */
1964 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1965                                  struct vmw_sw_context *sw_context,
1966                                  enum vmw_res_type res_type,
1967                                  const struct vmw_user_resource_conv
1968                                  *converter,
1969                                  uint32_t *res_id,
1970                                  uint32_t *buf_id,
1971                                  unsigned long backup_offset)
1972 {
1973         struct vmw_resource_val_node *val_node;
1974         int ret;
1975
1976         ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1977                                 converter, res_id, &val_node);
1978         if (ret)
1979                 return ret;
1980
1981         return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1982                                          buf_id, backup_offset);
1983 }
1984
1985 /**
1986  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1987  * command
1988  *
1989  * @dev_priv: Pointer to a device private struct.
1990  * @sw_context: The software context being used for this batch.
1991  * @header: Pointer to the command header in the command stream.
1992  */
1993 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1994                                    struct vmw_sw_context *sw_context,
1995                                    SVGA3dCmdHeader *header)
1996 {
1997         struct vmw_bind_gb_surface_cmd {
1998                 SVGA3dCmdHeader header;
1999                 SVGA3dCmdBindGBSurface body;
2000         } *cmd;
2001
2002         cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
2003
2004         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
2005                                      user_surface_converter,
2006                                      &cmd->body.sid, &cmd->body.mobid,
2007                                      0);
2008 }
2009
2010 /**
2011  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
2012  * command
2013  *
2014  * @dev_priv: Pointer to a device private struct.
2015  * @sw_context: The software context being used for this batch.
2016  * @header: Pointer to the command header in the command stream.
2017  */
2018 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
2019                                    struct vmw_sw_context *sw_context,
2020                                    SVGA3dCmdHeader *header)
2021 {
2022         struct vmw_gb_surface_cmd {
2023                 SVGA3dCmdHeader header;
2024                 SVGA3dCmdUpdateGBImage body;
2025         } *cmd;
2026
2027         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2028
2029         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2030                                  user_surface_converter,
2031                                  &cmd->body.image.sid, NULL);
2032 }
2033
2034 /**
2035  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
2036  * command
2037  *
2038  * @dev_priv: Pointer to a device private struct.
2039  * @sw_context: The software context being used for this batch.
2040  * @header: Pointer to the command header in the command stream.
2041  */
2042 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
2043                                      struct vmw_sw_context *sw_context,
2044                                      SVGA3dCmdHeader *header)
2045 {
2046         struct vmw_gb_surface_cmd {
2047                 SVGA3dCmdHeader header;
2048                 SVGA3dCmdUpdateGBSurface body;
2049         } *cmd;
2050
2051         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2052
2053         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2054                                  user_surface_converter,
2055                                  &cmd->body.sid, NULL);
2056 }
2057
2058 /**
2059  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2060  * command
2061  *
2062  * @dev_priv: Pointer to a device private struct.
2063  * @sw_context: The software context being used for this batch.
2064  * @header: Pointer to the command header in the command stream.
2065  */
2066 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2067                                      struct vmw_sw_context *sw_context,
2068                                      SVGA3dCmdHeader *header)
2069 {
2070         struct vmw_gb_surface_cmd {
2071                 SVGA3dCmdHeader header;
2072                 SVGA3dCmdReadbackGBImage body;
2073         } *cmd;
2074
2075         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2076
2077         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2078                                  user_surface_converter,
2079                                  &cmd->body.image.sid, NULL);
2080 }
2081
2082 /**
2083  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2084  * command
2085  *
2086  * @dev_priv: Pointer to a device private struct.
2087  * @sw_context: The software context being used for this batch.
2088  * @header: Pointer to the command header in the command stream.
2089  */
2090 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2091                                        struct vmw_sw_context *sw_context,
2092                                        SVGA3dCmdHeader *header)
2093 {
2094         struct vmw_gb_surface_cmd {
2095                 SVGA3dCmdHeader header;
2096                 SVGA3dCmdReadbackGBSurface body;
2097         } *cmd;
2098
2099         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2100
2101         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2102                                  user_surface_converter,
2103                                  &cmd->body.sid, NULL);
2104 }
2105
2106 /**
2107  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2108  * command
2109  *
2110  * @dev_priv: Pointer to a device private struct.
2111  * @sw_context: The software context being used for this batch.
2112  * @header: Pointer to the command header in the command stream.
2113  */
2114 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2115                                        struct vmw_sw_context *sw_context,
2116                                        SVGA3dCmdHeader *header)
2117 {
2118         struct vmw_gb_surface_cmd {
2119                 SVGA3dCmdHeader header;
2120                 SVGA3dCmdInvalidateGBImage body;
2121         } *cmd;
2122
2123         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2124
2125         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2126                                  user_surface_converter,
2127                                  &cmd->body.image.sid, NULL);
2128 }
2129
2130 /**
2131  * vmw_cmd_invalidate_gb_surface - Validate an
2132  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2133  *
2134  * @dev_priv: Pointer to a device private struct.
2135  * @sw_context: The software context being used for this batch.
2136  * @header: Pointer to the command header in the command stream.
2137  */
2138 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2139                                          struct vmw_sw_context *sw_context,
2140                                          SVGA3dCmdHeader *header)
2141 {
2142         struct vmw_gb_surface_cmd {
2143                 SVGA3dCmdHeader header;
2144                 SVGA3dCmdInvalidateGBSurface body;
2145         } *cmd;
2146
2147         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2148
2149         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2150                                  user_surface_converter,
2151                                  &cmd->body.sid, NULL);
2152 }
2153
2154
2155 /**
2156  * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2157  * command
2158  *
2159  * @dev_priv: Pointer to a device private struct.
2160  * @sw_context: The software context being used for this batch.
2161  * @header: Pointer to the command header in the command stream.
2162  */
2163 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2164                                  struct vmw_sw_context *sw_context,
2165                                  SVGA3dCmdHeader *header)
2166 {
2167         struct vmw_shader_define_cmd {
2168                 SVGA3dCmdHeader header;
2169                 SVGA3dCmdDefineShader body;
2170         } *cmd;
2171         int ret;
2172         size_t size;
2173         struct vmw_resource_val_node *val;
2174
2175         cmd = container_of(header, struct vmw_shader_define_cmd,
2176                            header);
2177
2178         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2179                                 user_context_converter, &cmd->body.cid,
2180                                 &val);
2181         if (unlikely(ret != 0))
2182                 return ret;
2183
2184         if (unlikely(!dev_priv->has_mob))
2185                 return 0;
2186
2187         size = cmd->header.size - sizeof(cmd->body);
2188         ret = vmw_compat_shader_add(dev_priv,
2189                                     vmw_context_res_man(val->res),
2190                                     cmd->body.shid, cmd + 1,
2191                                     cmd->body.type, size,
2192                                     &sw_context->staged_cmd_res);
2193         if (unlikely(ret != 0))
2194                 return ret;
2195
2196         return vmw_resource_relocation_add(&sw_context->res_relocations,
2197                                            NULL,
2198                                            vmw_ptr_diff(sw_context->buf_start,
2199                                                         &cmd->header.id),
2200                                            vmw_res_rel_nop);
2201 }
2202
2203 /**
2204  * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2205  * command
2206  *
2207  * @dev_priv: Pointer to a device private struct.
2208  * @sw_context: The software context being used for this batch.
2209  * @header: Pointer to the command header in the command stream.
2210  */
2211 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2212                                   struct vmw_sw_context *sw_context,
2213                                   SVGA3dCmdHeader *header)
2214 {
2215         struct vmw_shader_destroy_cmd {
2216                 SVGA3dCmdHeader header;
2217                 SVGA3dCmdDestroyShader body;
2218         } *cmd;
2219         int ret;
2220         struct vmw_resource_val_node *val;
2221
2222         cmd = container_of(header, struct vmw_shader_destroy_cmd,
2223                            header);
2224
2225         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2226                                 user_context_converter, &cmd->body.cid,
2227                                 &val);
2228         if (unlikely(ret != 0))
2229                 return ret;
2230
2231         if (unlikely(!dev_priv->has_mob))
2232                 return 0;
2233
2234         ret = vmw_shader_remove(vmw_context_res_man(val->res),
2235                                 cmd->body.shid,
2236                                 cmd->body.type,
2237                                 &sw_context->staged_cmd_res);
2238         if (unlikely(ret != 0))
2239                 return ret;
2240
2241         return vmw_resource_relocation_add(&sw_context->res_relocations,
2242                                            NULL,
2243                                            vmw_ptr_diff(sw_context->buf_start,
2244                                                         &cmd->header.id),
2245                                            vmw_res_rel_nop);
2246 }
2247
2248 /**
2249  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2250  * command
2251  *
2252  * @dev_priv: Pointer to a device private struct.
2253  * @sw_context: The software context being used for this batch.
2254  * @header: Pointer to the command header in the command stream.
2255  */
2256 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2257                               struct vmw_sw_context *sw_context,
2258                               SVGA3dCmdHeader *header)
2259 {
2260         struct vmw_set_shader_cmd {
2261                 SVGA3dCmdHeader header;
2262                 SVGA3dCmdSetShader body;
2263         } *cmd;
2264         struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2265         struct vmw_ctx_bindinfo_shader binding;
2266         struct vmw_resource *res = NULL;
2267         int ret;
2268
2269         cmd = container_of(header, struct vmw_set_shader_cmd,
2270                            header);
2271
2272         if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2273                 DRM_ERROR("Illegal shader type %u.\n",
2274                           (unsigned) cmd->body.type);
2275                 return -EINVAL;
2276         }
2277
2278         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2279                                 user_context_converter, &cmd->body.cid,
2280                                 &ctx_node);
2281         if (unlikely(ret != 0))
2282                 return ret;
2283
2284         if (!dev_priv->has_mob)
2285                 return 0;
2286
2287         if (cmd->body.shid != SVGA3D_INVALID_ID) {
2288                 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2289                                         cmd->body.shid,
2290                                         cmd->body.type);
2291
2292                 if (!IS_ERR(res)) {
2293                         ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2294                                                     &cmd->body.shid, res,
2295                                                     &res_node);
2296                         vmw_resource_unreference(&res);
2297                         if (unlikely(ret != 0))
2298                                 return ret;
2299                 }
2300         }
2301
2302         if (!res_node) {
2303                 ret = vmw_cmd_res_check(dev_priv, sw_context,
2304                                         vmw_res_shader,
2305                                         user_shader_converter,
2306                                         &cmd->body.shid, &res_node);
2307                 if (unlikely(ret != 0))
2308                         return ret;
2309         }
2310
2311         binding.bi.ctx = ctx_node->res;
2312         binding.bi.res = res_node ? res_node->res : NULL;
2313         binding.bi.bt = vmw_ctx_binding_shader;
2314         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2315         vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2316                         binding.shader_slot, 0);
2317         return 0;
2318 }
2319
2320 /**
2321  * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2322  * command
2323  *
2324  * @dev_priv: Pointer to a device private struct.
2325  * @sw_context: The software context being used for this batch.
2326  * @header: Pointer to the command header in the command stream.
2327  */
2328 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2329                                     struct vmw_sw_context *sw_context,
2330                                     SVGA3dCmdHeader *header)
2331 {
2332         struct vmw_set_shader_const_cmd {
2333                 SVGA3dCmdHeader header;
2334                 SVGA3dCmdSetShaderConst body;
2335         } *cmd;
2336         int ret;
2337
2338         cmd = container_of(header, struct vmw_set_shader_const_cmd,
2339                            header);
2340
2341         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2342                                 user_context_converter, &cmd->body.cid,
2343                                 NULL);
2344         if (unlikely(ret != 0))
2345                 return ret;
2346
2347         if (dev_priv->has_mob)
2348                 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2349
2350         return 0;
2351 }
2352
2353 /**
2354  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2355  * command
2356  *
2357  * @dev_priv: Pointer to a device private struct.
2358  * @sw_context: The software context being used for this batch.
2359  * @header: Pointer to the command header in the command stream.
2360  */
2361 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2362                                   struct vmw_sw_context *sw_context,
2363                                   SVGA3dCmdHeader *header)
2364 {
2365         struct vmw_bind_gb_shader_cmd {
2366                 SVGA3dCmdHeader header;
2367                 SVGA3dCmdBindGBShader body;
2368         } *cmd;
2369
2370         cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2371                            header);
2372
2373         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2374                                      user_shader_converter,
2375                                      &cmd->body.shid, &cmd->body.mobid,
2376                                      cmd->body.offsetInBytes);
2377 }
2378
2379 /**
2380  * vmw_cmd_dx_set_single_constant_buffer - Validate an
2381  * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2382  *
2383  * @dev_priv: Pointer to a device private struct.
2384  * @sw_context: The software context being used for this batch.
2385  * @header: Pointer to the command header in the command stream.
2386  */
2387 static int
2388 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2389                                       struct vmw_sw_context *sw_context,
2390                                       SVGA3dCmdHeader *header)
2391 {
2392         struct {
2393                 SVGA3dCmdHeader header;
2394                 SVGA3dCmdDXSetSingleConstantBuffer body;
2395         } *cmd;
2396         struct vmw_resource_val_node *res_node = NULL;
2397         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2398         struct vmw_ctx_bindinfo_cb binding;
2399         int ret;
2400
2401         if (unlikely(ctx_node == NULL)) {
2402                 DRM_ERROR("DX Context not set.\n");
2403                 return -EINVAL;
2404         }
2405
2406         cmd = container_of(header, typeof(*cmd), header);
2407         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2408                                 user_surface_converter,
2409                                 &cmd->body.sid, &res_node);
2410         if (unlikely(ret != 0))
2411                 return ret;
2412
2413         binding.bi.ctx = ctx_node->res;
2414         binding.bi.res = res_node ? res_node->res : NULL;
2415         binding.bi.bt = vmw_ctx_binding_cb;
2416         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2417         binding.offset = cmd->body.offsetInBytes;
2418         binding.size = cmd->body.sizeInBytes;
2419         binding.slot = cmd->body.slot;
2420
2421         if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2422             binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2423                 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2424                           (unsigned) cmd->body.type,
2425                           (unsigned) binding.slot);
2426                 return -EINVAL;
2427         }
2428
2429         vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2430                         binding.shader_slot, binding.slot);
2431
2432         return 0;
2433 }
2434
2435 /**
2436  * vmw_cmd_dx_set_shader_res - Validate an
2437  * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2438  *
2439  * @dev_priv: Pointer to a device private struct.
2440  * @sw_context: The software context being used for this batch.
2441  * @header: Pointer to the command header in the command stream.
2442  */
2443 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2444                                      struct vmw_sw_context *sw_context,
2445                                      SVGA3dCmdHeader *header)
2446 {
2447         struct {
2448                 SVGA3dCmdHeader header;
2449                 SVGA3dCmdDXSetShaderResources body;
2450         } *cmd = container_of(header, typeof(*cmd), header);
2451         u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2452                 sizeof(SVGA3dShaderResourceViewId);
2453
2454         if ((u64) cmd->body.startView + (u64) num_sr_view >
2455             (u64) SVGA3D_DX_MAX_SRVIEWS ||
2456             cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2457                 DRM_ERROR("Invalid shader binding.\n");
2458                 return -EINVAL;
2459         }
2460
2461         return vmw_view_bindings_add(sw_context, vmw_view_sr,
2462                                      vmw_ctx_binding_sr,
2463                                      cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2464                                      (void *) &cmd[1], num_sr_view,
2465                                      cmd->body.startView);
2466 }
2467
2468 /**
2469  * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2470  * command
2471  *
2472  * @dev_priv: Pointer to a device private struct.
2473  * @sw_context: The software context being used for this batch.
2474  * @header: Pointer to the command header in the command stream.
2475  */
2476 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2477                                  struct vmw_sw_context *sw_context,
2478                                  SVGA3dCmdHeader *header)
2479 {
2480         struct {
2481                 SVGA3dCmdHeader header;
2482                 SVGA3dCmdDXSetShader body;
2483         } *cmd;
2484         struct vmw_resource *res = NULL;
2485         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2486         struct vmw_ctx_bindinfo_shader binding;
2487         int ret = 0;
2488
2489         if (unlikely(ctx_node == NULL)) {
2490                 DRM_ERROR("DX Context not set.\n");
2491                 return -EINVAL;
2492         }
2493
2494         cmd = container_of(header, typeof(*cmd), header);
2495
2496         if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
2497             cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2498                 DRM_ERROR("Illegal shader type %u.\n",
2499                           (unsigned) cmd->body.type);
2500                 return -EINVAL;
2501         }
2502
2503         if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2504                 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2505                 if (IS_ERR(res)) {
2506                         DRM_ERROR("Could not find shader for binding.\n");
2507                         return PTR_ERR(res);
2508                 }
2509
2510                 ret = vmw_resource_val_add(sw_context, res, NULL);
2511                 if (ret)
2512                         goto out_unref;
2513         }
2514
2515         binding.bi.ctx = ctx_node->res;
2516         binding.bi.res = res;
2517         binding.bi.bt = vmw_ctx_binding_dx_shader;
2518         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2519
2520         vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2521                         binding.shader_slot, 0);
2522 out_unref:
2523         if (res)
2524                 vmw_resource_unreference(&res);
2525
2526         return ret;
2527 }
2528
2529 /**
2530  * vmw_cmd_dx_set_vertex_buffers - Validates an
2531  * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2532  *
2533  * @dev_priv: Pointer to a device private struct.
2534  * @sw_context: The software context being used for this batch.
2535  * @header: Pointer to the command header in the command stream.
2536  */
2537 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2538                                          struct vmw_sw_context *sw_context,
2539                                          SVGA3dCmdHeader *header)
2540 {
2541         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2542         struct vmw_ctx_bindinfo_vb binding;
2543         struct vmw_resource_val_node *res_node;
2544         struct {
2545                 SVGA3dCmdHeader header;
2546                 SVGA3dCmdDXSetVertexBuffers body;
2547                 SVGA3dVertexBuffer buf[];
2548         } *cmd;
2549         int i, ret, num;
2550
2551         if (unlikely(ctx_node == NULL)) {
2552                 DRM_ERROR("DX Context not set.\n");
2553                 return -EINVAL;
2554         }
2555
2556         cmd = container_of(header, typeof(*cmd), header);
2557         num = (cmd->header.size - sizeof(cmd->body)) /
2558                 sizeof(SVGA3dVertexBuffer);
2559         if ((u64)num + (u64)cmd->body.startBuffer >
2560             (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2561                 DRM_ERROR("Invalid number of vertex buffers.\n");
2562                 return -EINVAL;
2563         }
2564
2565         for (i = 0; i < num; i++) {
2566                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2567                                         user_surface_converter,
2568                                         &cmd->buf[i].sid, &res_node);
2569                 if (unlikely(ret != 0))
2570                         return ret;
2571
2572                 binding.bi.ctx = ctx_node->res;
2573                 binding.bi.bt = vmw_ctx_binding_vb;
2574                 binding.bi.res = ((res_node) ? res_node->res : NULL);
2575                 binding.offset = cmd->buf[i].offset;
2576                 binding.stride = cmd->buf[i].stride;
2577                 binding.slot = i + cmd->body.startBuffer;
2578
2579                 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2580                                 0, binding.slot);
2581         }
2582
2583         return 0;
2584 }
2585
2586 /**
2587  * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2588  * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2589  *
2590  * @dev_priv: Pointer to a device private struct.
2591  * @sw_context: The software context being used for this batch.
2592  * @header: Pointer to the command header in the command stream.
2593  */
2594 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2595                                        struct vmw_sw_context *sw_context,
2596                                        SVGA3dCmdHeader *header)
2597 {
2598         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2599         struct vmw_ctx_bindinfo_ib binding;
2600         struct vmw_resource_val_node *res_node;
2601         struct {
2602                 SVGA3dCmdHeader header;
2603                 SVGA3dCmdDXSetIndexBuffer body;
2604         } *cmd;
2605         int ret;
2606
2607         if (unlikely(ctx_node == NULL)) {
2608                 DRM_ERROR("DX Context not set.\n");
2609                 return -EINVAL;
2610         }
2611
2612         cmd = container_of(header, typeof(*cmd), header);
2613         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2614                                 user_surface_converter,
2615                                 &cmd->body.sid, &res_node);
2616         if (unlikely(ret != 0))
2617                 return ret;
2618
2619         binding.bi.ctx = ctx_node->res;
2620         binding.bi.res = ((res_node) ? res_node->res : NULL);
2621         binding.bi.bt = vmw_ctx_binding_ib;
2622         binding.offset = cmd->body.offset;
2623         binding.format = cmd->body.format;
2624
2625         vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2626
2627         return 0;
2628 }
2629
2630 /**
2631  * vmw_cmd_dx_set_rendertarget - Validate an
2632  * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2633  *
2634  * @dev_priv: Pointer to a device private struct.
2635  * @sw_context: The software context being used for this batch.
2636  * @header: Pointer to the command header in the command stream.
2637  */
2638 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2639                                         struct vmw_sw_context *sw_context,
2640                                         SVGA3dCmdHeader *header)
2641 {
2642         struct {
2643                 SVGA3dCmdHeader header;
2644                 SVGA3dCmdDXSetRenderTargets body;
2645         } *cmd = container_of(header, typeof(*cmd), header);
2646         int ret;
2647         u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2648                 sizeof(SVGA3dRenderTargetViewId);
2649
2650         if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2651                 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2652                 return -EINVAL;
2653         }
2654
2655         ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2656                                     vmw_ctx_binding_ds, 0,
2657                                     &cmd->body.depthStencilViewId, 1, 0);
2658         if (ret)
2659                 return ret;
2660
2661         return vmw_view_bindings_add(sw_context, vmw_view_rt,
2662                                      vmw_ctx_binding_dx_rt, 0,
2663                                      (void *)&cmd[1], num_rt_view, 0);
2664 }
2665
2666 /**
2667  * vmw_cmd_dx_clear_rendertarget_view - Validate an
2668  * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2669  *
2670  * @dev_priv: Pointer to a device private struct.
2671  * @sw_context: The software context being used for this batch.
2672  * @header: Pointer to the command header in the command stream.
2673  */
2674 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2675                                               struct vmw_sw_context *sw_context,
2676                                               SVGA3dCmdHeader *header)
2677 {
2678         struct {
2679                 SVGA3dCmdHeader header;
2680                 SVGA3dCmdDXClearRenderTargetView body;
2681         } *cmd = container_of(header, typeof(*cmd), header);
2682
2683         return vmw_view_id_val_add(sw_context, vmw_view_rt,
2684                                    cmd->body.renderTargetViewId);
2685 }
2686
2687 /**
2688  * vmw_cmd_dx_clear_rendertarget_view - Validate an
2689  * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2690  *
2691  * @dev_priv: Pointer to a device private struct.
2692  * @sw_context: The software context being used for this batch.
2693  * @header: Pointer to the command header in the command stream.
2694  */
2695 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2696                                               struct vmw_sw_context *sw_context,
2697                                               SVGA3dCmdHeader *header)
2698 {
2699         struct {
2700                 SVGA3dCmdHeader header;
2701                 SVGA3dCmdDXClearDepthStencilView body;
2702         } *cmd = container_of(header, typeof(*cmd), header);
2703
2704         return vmw_view_id_val_add(sw_context, vmw_view_ds,
2705                                    cmd->body.depthStencilViewId);
2706 }
2707
2708 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2709                                   struct vmw_sw_context *sw_context,
2710                                   SVGA3dCmdHeader *header)
2711 {
2712         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2713         struct vmw_resource_val_node *srf_node;
2714         struct vmw_resource *res;
2715         enum vmw_view_type view_type;
2716         int ret;
2717         /*
2718          * This is based on the fact that all affected define commands have
2719          * the same initial command body layout.
2720          */
2721         struct {
2722                 SVGA3dCmdHeader header;
2723                 uint32 defined_id;
2724                 uint32 sid;
2725         } *cmd;
2726
2727         if (unlikely(ctx_node == NULL)) {
2728                 DRM_ERROR("DX Context not set.\n");
2729                 return -EINVAL;
2730         }
2731
2732         view_type = vmw_view_cmd_to_type(header->id);
2733         if (view_type == vmw_view_max)
2734                 return -EINVAL;
2735         cmd = container_of(header, typeof(*cmd), header);
2736         if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2737                 DRM_ERROR("Invalid surface id.\n");
2738                 return -EINVAL;
2739         }
2740         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2741                                 user_surface_converter,
2742                                 &cmd->sid, &srf_node);
2743         if (unlikely(ret != 0))
2744                 return ret;
2745
2746         res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2747         ret = vmw_cotable_notify(res, cmd->defined_id);
2748         vmw_resource_unreference(&res);
2749         if (unlikely(ret != 0))
2750                 return ret;
2751
2752         return vmw_view_add(sw_context->man,
2753                             ctx_node->res,
2754                             srf_node->res,
2755                             view_type,
2756                             cmd->defined_id,
2757                             header,
2758                             header->size + sizeof(*header),
2759                             &sw_context->staged_cmd_res);
2760 }
2761
2762 /**
2763  * vmw_cmd_dx_set_so_targets - Validate an
2764  * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2765  *
2766  * @dev_priv: Pointer to a device private struct.
2767  * @sw_context: The software context being used for this batch.
2768  * @header: Pointer to the command header in the command stream.
2769  */
2770 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2771                                      struct vmw_sw_context *sw_context,
2772                                      SVGA3dCmdHeader *header)
2773 {
2774         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2775         struct vmw_ctx_bindinfo_so binding;
2776         struct vmw_resource_val_node *res_node;
2777         struct {
2778                 SVGA3dCmdHeader header;
2779                 SVGA3dCmdDXSetSOTargets body;
2780                 SVGA3dSoTarget targets[];
2781         } *cmd;
2782         int i, ret, num;
2783
2784         if (unlikely(ctx_node == NULL)) {
2785                 DRM_ERROR("DX Context not set.\n");
2786                 return -EINVAL;
2787         }
2788
2789         cmd = container_of(header, typeof(*cmd), header);
2790         num = (cmd->header.size - sizeof(cmd->body)) /
2791                 sizeof(SVGA3dSoTarget);
2792
2793         if (num > SVGA3D_DX_MAX_SOTARGETS) {
2794                 DRM_ERROR("Invalid DX SO binding.\n");
2795                 return -EINVAL;
2796         }
2797
2798         for (i = 0; i < num; i++) {
2799                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2800                                         user_surface_converter,
2801                                         &cmd->targets[i].sid, &res_node);
2802                 if (unlikely(ret != 0))
2803                         return ret;
2804
2805                 binding.bi.ctx = ctx_node->res;
2806                 binding.bi.res = ((res_node) ? res_node->res : NULL);
2807                 binding.bi.bt = vmw_ctx_binding_so,
2808                 binding.offset = cmd->targets[i].offset;
2809                 binding.size = cmd->targets[i].sizeInBytes;
2810                 binding.slot = i;
2811
2812                 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2813                                 0, binding.slot);
2814         }
2815
2816         return 0;
2817 }
2818
2819 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2820                                 struct vmw_sw_context *sw_context,
2821                                 SVGA3dCmdHeader *header)
2822 {
2823         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2824         struct vmw_resource *res;
2825         /*
2826          * This is based on the fact that all affected define commands have
2827          * the same initial command body layout.
2828          */
2829         struct {
2830                 SVGA3dCmdHeader header;
2831                 uint32 defined_id;
2832         } *cmd;
2833         enum vmw_so_type so_type;
2834         int ret;
2835
2836         if (unlikely(ctx_node == NULL)) {
2837                 DRM_ERROR("DX Context not set.\n");
2838                 return -EINVAL;
2839         }
2840
2841         so_type = vmw_so_cmd_to_type(header->id);
2842         res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2843         cmd = container_of(header, typeof(*cmd), header);
2844         ret = vmw_cotable_notify(res, cmd->defined_id);
2845         vmw_resource_unreference(&res);
2846
2847         return ret;
2848 }
2849
2850 /**
2851  * vmw_cmd_dx_check_subresource - Validate an
2852  * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2853  *
2854  * @dev_priv: Pointer to a device private struct.
2855  * @sw_context: The software context being used for this batch.
2856  * @header: Pointer to the command header in the command stream.
2857  */
2858 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2859                                         struct vmw_sw_context *sw_context,
2860                                         SVGA3dCmdHeader *header)
2861 {
2862         struct {
2863                 SVGA3dCmdHeader header;
2864                 union {
2865                         SVGA3dCmdDXReadbackSubResource r_body;
2866                         SVGA3dCmdDXInvalidateSubResource i_body;
2867                         SVGA3dCmdDXUpdateSubResource u_body;
2868                         SVGA3dSurfaceId sid;
2869                 };
2870         } *cmd;
2871
2872         BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2873                      offsetof(typeof(*cmd), sid));
2874         BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2875                      offsetof(typeof(*cmd), sid));
2876         BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2877                      offsetof(typeof(*cmd), sid));
2878
2879         cmd = container_of(header, typeof(*cmd), header);
2880
2881         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2882                                  user_surface_converter,
2883                                  &cmd->sid, NULL);
2884 }
2885
2886 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2887                                 struct vmw_sw_context *sw_context,
2888                                 SVGA3dCmdHeader *header)
2889 {
2890         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2891
2892         if (unlikely(ctx_node == NULL)) {
2893                 DRM_ERROR("DX Context not set.\n");
2894                 return -EINVAL;
2895         }
2896
2897         return 0;
2898 }
2899
2900 /**
2901  * vmw_cmd_dx_view_remove - validate a view remove command and
2902  * schedule the view resource for removal.
2903  *
2904  * @dev_priv: Pointer to a device private struct.
2905  * @sw_context: The software context being used for this batch.
2906  * @header: Pointer to the command header in the command stream.
2907  *
2908  * Check that the view exists, and if it was not created using this
2909  * command batch, conditionally make this command a NOP.
2910  */
2911 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2912                                   struct vmw_sw_context *sw_context,
2913                                   SVGA3dCmdHeader *header)
2914 {
2915         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2916         struct {
2917                 SVGA3dCmdHeader header;
2918                 union vmw_view_destroy body;
2919         } *cmd = container_of(header, typeof(*cmd), header);
2920         enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2921         struct vmw_resource *view;
2922         int ret;
2923
2924         if (!ctx_node) {
2925                 DRM_ERROR("DX Context not set.\n");
2926                 return -EINVAL;
2927         }
2928
2929         ret = vmw_view_remove(sw_context->man,
2930                               cmd->body.view_id, view_type,
2931                               &sw_context->staged_cmd_res,
2932                               &view);
2933         if (ret || !view)
2934                 return ret;
2935
2936         /*
2937          * If the view wasn't created during this command batch, it might
2938          * have been removed due to a context swapout, so add a
2939          * relocation to conditionally make this command a NOP to avoid
2940          * device errors.
2941          */
2942         return vmw_resource_relocation_add(&sw_context->res_relocations,
2943                                            view,
2944                                            vmw_ptr_diff(sw_context->buf_start,
2945                                                         &cmd->header.id),
2946                                            vmw_res_rel_cond_nop);
2947 }
2948
2949 /**
2950  * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2951  * command
2952  *
2953  * @dev_priv: Pointer to a device private struct.
2954  * @sw_context: The software context being used for this batch.
2955  * @header: Pointer to the command header in the command stream.
2956  */
2957 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2958                                     struct vmw_sw_context *sw_context,
2959                                     SVGA3dCmdHeader *header)
2960 {
2961         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2962         struct vmw_resource *res;
2963         struct {
2964                 SVGA3dCmdHeader header;
2965                 SVGA3dCmdDXDefineShader body;
2966         } *cmd = container_of(header, typeof(*cmd), header);
2967         int ret;
2968
2969         if (!ctx_node) {
2970                 DRM_ERROR("DX Context not set.\n");
2971                 return -EINVAL;
2972         }
2973
2974         res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2975         ret = vmw_cotable_notify(res, cmd->body.shaderId);
2976         vmw_resource_unreference(&res);
2977         if (ret)
2978                 return ret;
2979
2980         return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2981                                  cmd->body.shaderId, cmd->body.type,
2982                                  &sw_context->staged_cmd_res);
2983 }
2984
2985 /**
2986  * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2987  * command
2988  *
2989  * @dev_priv: Pointer to a device private struct.
2990  * @sw_context: The software context being used for this batch.
2991  * @header: Pointer to the command header in the command stream.
2992  */
2993 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2994                                      struct vmw_sw_context *sw_context,
2995                                      SVGA3dCmdHeader *header)
2996 {
2997         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2998         struct {
2999                 SVGA3dCmdHeader header;
3000                 SVGA3dCmdDXDestroyShader body;
3001         } *cmd = container_of(header, typeof(*cmd), header);
3002         int ret;
3003
3004         if (!ctx_node) {
3005                 DRM_ERROR("DX Context not set.\n");
3006                 return -EINVAL;
3007         }
3008
3009         ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
3010                                 &sw_context->staged_cmd_res);
3011         if (ret)
3012                 DRM_ERROR("Could not find shader to remove.\n");
3013
3014         return ret;
3015 }
3016
3017 /**
3018  * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
3019  * command
3020  *
3021  * @dev_priv: Pointer to a device private struct.
3022  * @sw_context: The software context being used for this batch.
3023  * @header: Pointer to the command header in the command stream.
3024  */
3025 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
3026                                   struct vmw_sw_context *sw_context,
3027                                   SVGA3dCmdHeader *header)
3028 {
3029         struct vmw_resource_val_node *ctx_node;
3030         struct vmw_resource_val_node *res_node;
3031         struct vmw_resource *res;
3032         struct {
3033                 SVGA3dCmdHeader header;
3034                 SVGA3dCmdDXBindShader body;
3035         } *cmd = container_of(header, typeof(*cmd), header);
3036         int ret;
3037
3038         if (cmd->body.cid != SVGA3D_INVALID_ID) {
3039                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
3040                                         user_context_converter,
3041                                         &cmd->body.cid, &ctx_node);
3042                 if (ret)
3043                         return ret;
3044         } else {
3045                 ctx_node = sw_context->dx_ctx_node;
3046                 if (!ctx_node) {
3047                         DRM_ERROR("DX Context not set.\n");
3048                         return -EINVAL;
3049                 }
3050         }
3051
3052         res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
3053                                 cmd->body.shid, 0);
3054         if (IS_ERR(res)) {
3055                 DRM_ERROR("Could not find shader to bind.\n");
3056                 return PTR_ERR(res);
3057         }
3058
3059         ret = vmw_resource_val_add(sw_context, res, &res_node);
3060         if (ret) {
3061                 DRM_ERROR("Error creating resource validation node.\n");
3062                 goto out_unref;
3063         }
3064
3065
3066         ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3067                                         &cmd->body.mobid,
3068                                         cmd->body.offsetInBytes);
3069 out_unref:
3070         vmw_resource_unreference(&res);
3071
3072         return ret;
3073 }
3074
3075 /**
3076  * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
3077  *
3078  * @dev_priv: Pointer to a device private struct.
3079  * @sw_context: The software context being used for this batch.
3080  * @header: Pointer to the command header in the command stream.
3081  */
3082 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
3083                               struct vmw_sw_context *sw_context,
3084                               SVGA3dCmdHeader *header)
3085 {
3086         struct {
3087                 SVGA3dCmdHeader header;
3088                 SVGA3dCmdDXGenMips body;
3089         } *cmd = container_of(header, typeof(*cmd), header);
3090
3091         return vmw_view_id_val_add(sw_context, vmw_view_sr,
3092                                    cmd->body.shaderResourceViewId);
3093 }
3094
3095 /**
3096  * vmw_cmd_dx_transfer_from_buffer -
3097  * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
3098  *
3099  * @dev_priv: Pointer to a device private struct.
3100  * @sw_context: The software context being used for this batch.
3101  * @header: Pointer to the command header in the command stream.
3102  */
3103 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
3104                                            struct vmw_sw_context *sw_context,
3105                                            SVGA3dCmdHeader *header)
3106 {
3107         struct {
3108                 SVGA3dCmdHeader header;
3109                 SVGA3dCmdDXTransferFromBuffer body;
3110         } *cmd = container_of(header, typeof(*cmd), header);
3111         int ret;
3112
3113         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3114                                 user_surface_converter,
3115                                 &cmd->body.srcSid, NULL);
3116         if (ret != 0)
3117                 return ret;
3118
3119         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3120                                  user_surface_converter,
3121                                  &cmd->body.destSid, NULL);
3122 }
3123
3124 /**
3125  * vmw_cmd_intra_surface_copy -
3126  * Validate an SVGA_3D_CMD_INTRA_SURFACE_COPY command
3127  *
3128  * @dev_priv: Pointer to a device private struct.
3129  * @sw_context: The software context being used for this batch.
3130  * @header: Pointer to the command header in the command stream.
3131  */
3132 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
3133                                            struct vmw_sw_context *sw_context,
3134                                            SVGA3dCmdHeader *header)
3135 {
3136         struct {
3137                 SVGA3dCmdHeader header;
3138                 SVGA3dCmdIntraSurfaceCopy body;
3139         } *cmd = container_of(header, typeof(*cmd), header);
3140
3141         if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
3142                 return -EINVAL;
3143
3144         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3145                                 user_surface_converter,
3146                                 &cmd->body.surface.sid, NULL);
3147 }
3148
3149
3150 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3151                                 struct vmw_sw_context *sw_context,
3152                                 void *buf, uint32_t *size)
3153 {
3154         uint32_t size_remaining = *size;
3155         uint32_t cmd_id;
3156
3157         cmd_id = ((uint32_t *)buf)[0];
3158         switch (cmd_id) {
3159         case SVGA_CMD_UPDATE:
3160                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3161                 break;
3162         case SVGA_CMD_DEFINE_GMRFB:
3163                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3164                 break;
3165         case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3166                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3167                 break;
3168         case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3169                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3170                 break;
3171         default:
3172                 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3173                 return -EINVAL;
3174         }
3175
3176         if (*size > size_remaining) {
3177                 DRM_ERROR("Invalid SVGA command (size mismatch):"
3178                           " %u.\n", cmd_id);
3179                 return -EINVAL;
3180         }
3181
3182         if (unlikely(!sw_context->kernel)) {
3183                 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3184                 return -EPERM;
3185         }
3186
3187         if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3188                 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3189
3190         return 0;
3191 }
3192
3193 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3194         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3195                     false, false, false),
3196         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3197                     false, false, false),
3198         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3199                     true, false, false),
3200         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3201                     true, false, false),
3202         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3203                     true, false, false),
3204         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3205                     false, false, false),
3206         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3207                     false, false, false),
3208         VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3209                     true, false, false),
3210         VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3211                     true, false, false),
3212         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3213                     true, false, false),
3214         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3215                     &vmw_cmd_set_render_target_check, true, false, false),
3216         VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3217                     true, false, false),
3218         VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3219                     true, false, false),
3220         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3221                     true, false, false),
3222         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3223                     true, false, false),
3224         VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3225                     true, false, false),
3226         VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3227                     true, false, false),
3228         VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3229                     true, false, false),
3230         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3231                     false, false, false),
3232         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3233                     true, false, false),
3234         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3235                     true, false, false),
3236         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3237                     true, false, false),
3238         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3239                     true, false, false),
3240         VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3241                     true, false, false),
3242         VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3243                     true, false, false),
3244         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3245                     true, false, false),
3246         VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3247                     true, false, false),
3248         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3249                     true, false, false),
3250         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3251                     true, false, false),
3252         VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3253                     &vmw_cmd_blt_surf_screen_check, false, false, false),
3254         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3255                     false, false, false),
3256         VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3257                     false, false, false),
3258         VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3259                     false, false, false),
3260         VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3261                     false, false, false),
3262         VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3263                     false, false, false),
3264         VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3265                     false, false, false),
3266         VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3267                     false, false, false),
3268         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3269                     false, false, false),
3270         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3271                     false, false, false),
3272         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3273                     false, false, false),
3274         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3275                     false, false, false),
3276         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3277                     false, false, false),
3278         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3279                     false, false, false),
3280         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3281                     false, false, true),
3282         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3283                     false, false, true),
3284         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3285                     false, false, true),
3286         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3287                     false, false, true),
3288         VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3289                     false, false, true),
3290         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3291                     false, false, true),
3292         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3293                     false, false, true),
3294         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3295                     false, false, true),
3296         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3297                     true, false, true),
3298         VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3299                     false, false, true),
3300         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3301                     true, false, true),
3302         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3303                     &vmw_cmd_update_gb_surface, true, false, true),
3304         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3305                     &vmw_cmd_readback_gb_image, true, false, true),
3306         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3307                     &vmw_cmd_readback_gb_surface, true, false, true),
3308         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3309                     &vmw_cmd_invalidate_gb_image, true, false, true),
3310         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3311                     &vmw_cmd_invalidate_gb_surface, true, false, true),
3312         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3313                     false, false, true),
3314         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3315                     false, false, true),
3316         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3317                     false, false, true),
3318         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3319                     false, false, true),
3320         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3321                     false, false, true),
3322         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3323                     false, false, true),
3324         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3325                     true, false, true),
3326         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3327                     false, false, true),
3328         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3329                     false, false, false),
3330         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3331                     true, false, true),
3332         VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3333                     true, false, true),
3334         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3335                     true, false, true),
3336         VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3337                     true, false, true),
3338         VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3339                     true, false, true),
3340         VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3341                     false, false, true),
3342         VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3343                     false, false, true),
3344         VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3345                     false, false, true),
3346         VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3347                     false, false, true),
3348         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3349                     false, false, true),
3350         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3351                     false, false, true),
3352         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3353                     false, false, true),
3354         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3355                     false, false, true),
3356         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3357                     false, false, true),
3358         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3359                     false, false, true),
3360         VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3361                     true, false, true),
3362         VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3363                     false, false, true),
3364         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3365                     false, false, true),
3366         VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3367                     false, false, true),
3368         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3369                     false, false, true),
3370
3371         /*
3372          * DX commands
3373          */
3374         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3375                     false, false, true),
3376         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3377                     false, false, true),
3378         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3379                     false, false, true),
3380         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3381                     false, false, true),
3382         VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3383                     false, false, true),
3384         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3385                     &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3386         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3387                     &vmw_cmd_dx_set_shader_res, true, false, true),
3388         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3389                     true, false, true),
3390         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3391                     true, false, true),
3392         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3393                     true, false, true),
3394         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3395                     true, false, true),
3396         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3397                     true, false, true),
3398         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3399                     &vmw_cmd_dx_cid_check, true, false, true),
3400         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3401                     true, false, true),
3402         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3403                     &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3404         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3405                     &vmw_cmd_dx_set_index_buffer, true, false, true),
3406         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3407                     &vmw_cmd_dx_set_rendertargets, true, false, true),
3408         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3409                     true, false, true),
3410         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3411                     &vmw_cmd_dx_cid_check, true, false, true),
3412         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3413                     &vmw_cmd_dx_cid_check, true, false, true),
3414         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3415                     true, false, true),
3416         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3417                     true, false, true),
3418         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3419                     true, false, true),
3420         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3421                     &vmw_cmd_dx_cid_check, true, false, true),
3422         VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3423                     true, false, true),
3424         VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3425                     true, false, true),
3426         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3427                     true, false, true),
3428         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3429                     true, false, true),
3430         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3431                     true, false, true),
3432         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3433                     true, false, true),
3434         VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3435                     &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3436         VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3437                     &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3438         VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3439                     true, false, true),
3440         VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3441                     true, false, true),
3442         VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3443                     &vmw_cmd_dx_check_subresource, true, false, true),
3444         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3445                     &vmw_cmd_dx_check_subresource, true, false, true),
3446         VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3447                     &vmw_cmd_dx_check_subresource, true, false, true),
3448         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3449                     &vmw_cmd_dx_view_define, true, false, true),
3450         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3451                     &vmw_cmd_dx_view_remove, true, false, true),
3452         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3453                     &vmw_cmd_dx_view_define, true, false, true),
3454         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3455                     &vmw_cmd_dx_view_remove, true, false, true),
3456         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3457                     &vmw_cmd_dx_view_define, true, false, true),
3458         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3459                     &vmw_cmd_dx_view_remove, true, false, true),
3460         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3461                     &vmw_cmd_dx_so_define, true, false, true),
3462         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3463                     &vmw_cmd_dx_cid_check, true, false, true),
3464         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3465                     &vmw_cmd_dx_so_define, true, false, true),
3466         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3467                     &vmw_cmd_dx_cid_check, true, false, true),
3468         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3469                     &vmw_cmd_dx_so_define, true, false, true),
3470         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3471                     &vmw_cmd_dx_cid_check, true, false, true),
3472         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3473                     &vmw_cmd_dx_so_define, true, false, true),
3474         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3475                     &vmw_cmd_dx_cid_check, true, false, true),
3476         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3477                     &vmw_cmd_dx_so_define, true, false, true),
3478         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3479                     &vmw_cmd_dx_cid_check, true, false, true),
3480         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3481                     &vmw_cmd_dx_define_shader, true, false, true),
3482         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3483                     &vmw_cmd_dx_destroy_shader, true, false, true),
3484         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3485                     &vmw_cmd_dx_bind_shader, true, false, true),
3486         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3487                     &vmw_cmd_dx_so_define, true, false, true),
3488         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3489                     &vmw_cmd_dx_cid_check, true, false, true),
3490         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3491                     true, false, true),
3492         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3493                     &vmw_cmd_dx_set_so_targets, true, false, true),
3494         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3495                     &vmw_cmd_dx_cid_check, true, false, true),
3496         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3497                     &vmw_cmd_dx_cid_check, true, false, true),
3498         VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3499                     &vmw_cmd_buffer_copy_check, true, false, true),
3500         VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3501                     &vmw_cmd_pred_copy_check, true, false, true),
3502         VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3503                     &vmw_cmd_dx_transfer_from_buffer,
3504                     true, false, true),
3505         VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3506                     true, false, true),
3507 };
3508
3509 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3510 {
3511         u32 cmd_id = ((u32 *) buf)[0];
3512
3513         if (cmd_id >= SVGA_CMD_MAX) {
3514                 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3515                 const struct vmw_cmd_entry *entry;
3516
3517                 *size = header->size + sizeof(SVGA3dCmdHeader);
3518                 cmd_id = header->id;
3519                 if (cmd_id >= SVGA_3D_CMD_MAX)
3520                         return false;
3521
3522                 cmd_id -= SVGA_3D_CMD_BASE;
3523                 entry = &vmw_cmd_entries[cmd_id];
3524                 *cmd = entry->cmd_name;
3525                 return true;
3526         }
3527
3528         switch (cmd_id) {
3529         case SVGA_CMD_UPDATE:
3530                 *cmd = "SVGA_CMD_UPDATE";
3531                 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3532                 break;
3533         case SVGA_CMD_DEFINE_GMRFB:
3534                 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3535                 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3536                 break;
3537         case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3538                 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3539                 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3540                 break;
3541         case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3542                 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3543                 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3544                 break;
3545         default:
3546                 *cmd = "UNKNOWN";
3547                 *size = 0;
3548                 return false;
3549         }
3550
3551         return true;
3552 }
3553
3554 static int vmw_cmd_check(struct vmw_private *dev_priv,
3555                          struct vmw_sw_context *sw_context,
3556                          void *buf, uint32_t *size)
3557 {
3558         uint32_t cmd_id;
3559         uint32_t size_remaining = *size;
3560         SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3561         int ret;
3562         const struct vmw_cmd_entry *entry;
3563         bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3564
3565         cmd_id = ((uint32_t *)buf)[0];
3566         /* Handle any none 3D commands */
3567         if (unlikely(cmd_id < SVGA_CMD_MAX))
3568                 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3569
3570
3571         cmd_id = header->id;
3572         *size = header->size + sizeof(SVGA3dCmdHeader);
3573
3574         cmd_id -= SVGA_3D_CMD_BASE;
3575         if (unlikely(*size > size_remaining))
3576                 goto out_invalid;
3577
3578         if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3579                 goto out_invalid;
3580
3581         entry = &vmw_cmd_entries[cmd_id];
3582         if (unlikely(!entry->func))
3583                 goto out_invalid;
3584
3585         if (unlikely(!entry->user_allow && !sw_context->kernel))
3586                 goto out_privileged;
3587
3588         if (unlikely(entry->gb_disable && gb))
3589                 goto out_old;
3590
3591         if (unlikely(entry->gb_enable && !gb))
3592                 goto out_new;
3593
3594         ret = entry->func(dev_priv, sw_context, header);
3595         if (unlikely(ret != 0))
3596                 goto out_invalid;
3597
3598         return 0;
3599 out_invalid:
3600         DRM_ERROR("Invalid SVGA3D command: %d\n",
3601                   cmd_id + SVGA_3D_CMD_BASE);
3602         return -EINVAL;
3603 out_privileged:
3604         DRM_ERROR("Privileged SVGA3D command: %d\n",
3605                   cmd_id + SVGA_3D_CMD_BASE);
3606         return -EPERM;
3607 out_old:
3608         DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3609                   cmd_id + SVGA_3D_CMD_BASE);
3610         return -EINVAL;
3611 out_new:
3612         DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3613                   cmd_id + SVGA_3D_CMD_BASE);
3614         return -EINVAL;
3615 }
3616
3617 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3618                              struct vmw_sw_context *sw_context,
3619                              void *buf,
3620                              uint32_t size)
3621 {
3622         int32_t cur_size = size;
3623         int ret;
3624
3625         sw_context->buf_start = buf;
3626
3627         while (cur_size > 0) {
3628                 size = cur_size;
3629                 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3630                 if (unlikely(ret != 0))
3631                         return ret;
3632                 buf = (void *)((unsigned long) buf + size);
3633                 cur_size -= size;
3634         }
3635
3636         if (unlikely(cur_size != 0)) {
3637                 DRM_ERROR("Command verifier out of sync.\n");
3638                 return -EINVAL;
3639         }
3640
3641         return 0;
3642 }
3643
3644 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3645 {
3646         sw_context->cur_reloc = 0;
3647 }
3648
3649 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3650 {
3651         uint32_t i;
3652         struct vmw_relocation *reloc;
3653         struct ttm_validate_buffer *validate;
3654         struct ttm_buffer_object *bo;
3655
3656         for (i = 0; i < sw_context->cur_reloc; ++i) {
3657                 reloc = &sw_context->relocs[i];
3658                 validate = &sw_context->val_bufs[reloc->index].base;
3659                 bo = validate->bo;
3660                 switch (bo->mem.mem_type) {
3661                 case TTM_PL_VRAM:
3662                         reloc->location->offset += bo->offset;
3663                         reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3664                         break;
3665                 case VMW_PL_GMR:
3666                         reloc->location->gmrId = bo->mem.start;
3667                         break;
3668                 case VMW_PL_MOB:
3669                         *reloc->mob_loc = bo->mem.start;
3670                         break;
3671                 default:
3672                         BUG();
3673                 }
3674         }
3675         vmw_free_relocations(sw_context);
3676 }
3677
3678 /**
3679  * vmw_resource_list_unrefererence - Free up a resource list and unreference
3680  * all resources referenced by it.
3681  *
3682  * @list: The resource list.
3683  */
3684 static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3685                                           struct list_head *list)
3686 {
3687         struct vmw_resource_val_node *val, *val_next;
3688
3689         /*
3690          * Drop references to resources held during command submission.
3691          */
3692
3693         list_for_each_entry_safe(val, val_next, list, head) {
3694                 list_del_init(&val->head);
3695                 vmw_resource_unreference(&val->res);
3696
3697                 if (val->staged_bindings) {
3698                         if (val->staged_bindings != sw_context->staged_bindings)
3699                                 vmw_binding_state_free(val->staged_bindings);
3700                         else
3701                                 sw_context->staged_bindings_inuse = false;
3702                         val->staged_bindings = NULL;
3703                 }
3704
3705                 kfree(val);
3706         }
3707 }
3708
3709 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3710 {
3711         struct vmw_validate_buffer *entry, *next;
3712         struct vmw_resource_val_node *val;
3713
3714         /*
3715          * Drop references to DMA buffers held during command submission.
3716          */
3717         list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
3718                                  base.head) {
3719                 list_del(&entry->base.head);
3720                 ttm_bo_unref(&entry->base.bo);
3721                 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
3722                 sw_context->cur_val_buf--;
3723         }
3724         BUG_ON(sw_context->cur_val_buf != 0);
3725
3726         list_for_each_entry(val, &sw_context->resource_list, head)
3727                 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
3728 }
3729
3730 int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3731                                struct ttm_buffer_object *bo,
3732                                bool interruptible,
3733                                bool validate_as_mob)
3734 {
3735         struct vmw_buffer_object *vbo =
3736                 container_of(bo, struct vmw_buffer_object, base);
3737         struct ttm_operation_ctx ctx = { interruptible, false };
3738         int ret;
3739
3740         if (vbo->pin_count > 0)
3741                 return 0;
3742
3743         if (validate_as_mob)
3744                 return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
3745
3746         /**
3747          * Put BO in VRAM if there is space, otherwise as a GMR.
3748          * If there is no space in VRAM and GMR ids are all used up,
3749          * start evicting GMRs to make room. If the DMA buffer can't be
3750          * used as a GMR, this will return -ENOMEM.
3751          */
3752
3753         ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
3754         if (likely(ret == 0 || ret == -ERESTARTSYS))
3755                 return ret;
3756
3757         /**
3758          * If that failed, try VRAM again, this time evicting
3759          * previous contents.
3760          */
3761
3762         ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
3763         return ret;
3764 }
3765
3766 static int vmw_validate_buffers(struct vmw_private *dev_priv,
3767                                 struct vmw_sw_context *sw_context)
3768 {
3769         struct vmw_validate_buffer *entry;
3770         int ret;
3771
3772         list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3773                 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3774                                                  true,
3775                                                  entry->validate_as_mob);
3776                 if (unlikely(ret != 0))
3777                         return ret;
3778         }
3779         return 0;
3780 }
3781
3782 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3783                                  uint32_t size)
3784 {
3785         if (likely(sw_context->cmd_bounce_size >= size))
3786                 return 0;
3787
3788         if (sw_context->cmd_bounce_size == 0)
3789                 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3790
3791         while (sw_context->cmd_bounce_size < size) {
3792                 sw_context->cmd_bounce_size =
3793                         PAGE_ALIGN(sw_context->cmd_bounce_size +
3794                                    (sw_context->cmd_bounce_size >> 1));
3795         }
3796
3797         vfree(sw_context->cmd_bounce);
3798         sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3799
3800         if (sw_context->cmd_bounce == NULL) {
3801                 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3802                 sw_context->cmd_bounce_size = 0;
3803                 return -ENOMEM;
3804         }
3805
3806         return 0;
3807 }
3808
3809 /**
3810  * vmw_execbuf_fence_commands - create and submit a command stream fence
3811  *
3812  * Creates a fence object and submits a command stream marker.
3813  * If this fails for some reason, We sync the fifo and return NULL.
3814  * It is then safe to fence buffers with a NULL pointer.
3815  *
3816  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3817  * a userspace handle if @p_handle is not NULL, otherwise not.
3818  */
3819
3820 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3821                                struct vmw_private *dev_priv,
3822                                struct vmw_fence_obj **p_fence,
3823                                uint32_t *p_handle)
3824 {
3825         uint32_t sequence;
3826         int ret;
3827         bool synced = false;
3828
3829         /* p_handle implies file_priv. */
3830         BUG_ON(p_handle != NULL && file_priv == NULL);
3831
3832         ret = vmw_fifo_send_fence(dev_priv, &sequence);
3833         if (unlikely(ret != 0)) {
3834                 DRM_ERROR("Fence submission error. Syncing.\n");
3835                 synced = true;
3836         }
3837
3838         if (p_handle != NULL)
3839                 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3840                                             sequence, p_fence, p_handle);
3841         else
3842                 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3843
3844         if (unlikely(ret != 0 && !synced)) {
3845                 (void) vmw_fallback_wait(dev_priv, false, false,
3846                                          sequence, false,
3847                                          VMW_FENCE_WAIT_TIMEOUT);
3848                 *p_fence = NULL;
3849         }
3850
3851         return ret;
3852 }
3853
3854 /**
3855  * vmw_execbuf_copy_fence_user - copy fence object information to
3856  * user-space.
3857  *
3858  * @dev_priv: Pointer to a vmw_private struct.
3859  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3860  * @ret: Return value from fence object creation.
3861  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3862  * which the information should be copied.
3863  * @fence: Pointer to the fenc object.
3864  * @fence_handle: User-space fence handle.
3865  * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3866  * @sync_file:  Only used to clean up in case of an error in this function.
3867  *
3868  * This function copies fence information to user-space. If copying fails,
3869  * The user-space struct drm_vmw_fence_rep::error member is hopefully
3870  * left untouched, and if it's preloaded with an -EFAULT by user-space,
3871  * the error will hopefully be detected.
3872  * Also if copying fails, user-space will be unable to signal the fence
3873  * object so we wait for it immediately, and then unreference the
3874  * user-space reference.
3875  */
3876 int
3877 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3878                             struct vmw_fpriv *vmw_fp,
3879                             int ret,
3880                             struct drm_vmw_fence_rep __user *user_fence_rep,
3881                             struct vmw_fence_obj *fence,
3882                             uint32_t fence_handle,
3883                             int32_t out_fence_fd)
3884 {
3885         struct drm_vmw_fence_rep fence_rep;
3886
3887         if (user_fence_rep == NULL)
3888                 return 0;
3889
3890         memset(&fence_rep, 0, sizeof(fence_rep));
3891
3892         fence_rep.error = ret;
3893         fence_rep.fd = out_fence_fd;
3894         if (ret == 0) {
3895                 BUG_ON(fence == NULL);
3896
3897                 fence_rep.handle = fence_handle;
3898                 fence_rep.seqno = fence->base.seqno;
3899                 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3900                 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3901         }
3902
3903         /*
3904          * copy_to_user errors will be detected by user space not
3905          * seeing fence_rep::error filled in. Typically
3906          * user-space would have pre-set that member to -EFAULT.
3907          */
3908         ret = copy_to_user(user_fence_rep, &fence_rep,
3909                            sizeof(fence_rep));
3910
3911         /*
3912          * User-space lost the fence object. We need to sync
3913          * and unreference the handle.
3914          */
3915         if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3916                 ttm_ref_object_base_unref(vmw_fp->tfile,
3917                                           fence_handle, TTM_REF_USAGE);
3918                 DRM_ERROR("Fence copy error. Syncing.\n");
3919                 (void) vmw_fence_obj_wait(fence, false, false,
3920                                           VMW_FENCE_WAIT_TIMEOUT);
3921         }
3922
3923         return ret ? -EFAULT : 0;
3924 }
3925
3926 /**
3927  * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3928  * the fifo.
3929  *
3930  * @dev_priv: Pointer to a device private structure.
3931  * @kernel_commands: Pointer to the unpatched command batch.
3932  * @command_size: Size of the unpatched command batch.
3933  * @sw_context: Structure holding the relocation lists.
3934  *
3935  * Side effects: If this function returns 0, then the command batch
3936  * pointed to by @kernel_commands will have been modified.
3937  */
3938 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3939                                    void *kernel_commands,
3940                                    u32 command_size,
3941                                    struct vmw_sw_context *sw_context)
3942 {
3943         void *cmd;
3944
3945         if (sw_context->dx_ctx_node)
3946                 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3947                                           sw_context->dx_ctx_node->res->id);
3948         else
3949                 cmd = vmw_fifo_reserve(dev_priv, command_size);
3950         if (!cmd) {
3951                 DRM_ERROR("Failed reserving fifo space for commands.\n");
3952                 return -ENOMEM;
3953         }
3954
3955         vmw_apply_relocations(sw_context);
3956         memcpy(cmd, kernel_commands, command_size);
3957         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3958         vmw_resource_relocations_free(&sw_context->res_relocations);
3959         vmw_fifo_commit(dev_priv, command_size);
3960
3961         return 0;
3962 }
3963
3964 /**
3965  * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3966  * the command buffer manager.
3967  *
3968  * @dev_priv: Pointer to a device private structure.
3969  * @header: Opaque handle to the command buffer allocation.
3970  * @command_size: Size of the unpatched command batch.
3971  * @sw_context: Structure holding the relocation lists.
3972  *
3973  * Side effects: If this function returns 0, then the command buffer
3974  * represented by @header will have been modified.
3975  */
3976 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3977                                      struct vmw_cmdbuf_header *header,
3978                                      u32 command_size,
3979                                      struct vmw_sw_context *sw_context)
3980 {
3981         u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3982                   SVGA3D_INVALID_ID);
3983         void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3984                                        id, false, header);
3985
3986         vmw_apply_relocations(sw_context);
3987         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3988         vmw_resource_relocations_free(&sw_context->res_relocations);
3989         vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3990
3991         return 0;
3992 }
3993
3994 /**
3995  * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3996  * submission using a command buffer.
3997  *
3998  * @dev_priv: Pointer to a device private structure.
3999  * @user_commands: User-space pointer to the commands to be submitted.
4000  * @command_size: Size of the unpatched command batch.
4001  * @header: Out parameter returning the opaque pointer to the command buffer.
4002  *
4003  * This function checks whether we can use the command buffer manager for
4004  * submission and if so, creates a command buffer of suitable size and
4005  * copies the user data into that buffer.
4006  *
4007  * On successful return, the function returns a pointer to the data in the
4008  * command buffer and *@header is set to non-NULL.
4009  * If command buffers could not be used, the function will return the value
4010  * of @kernel_commands on function call. That value may be NULL. In that case,
4011  * the value of *@header will be set to NULL.
4012  * If an error is encountered, the function will return a pointer error value.
4013  * If the function is interrupted by a signal while sleeping, it will return
4014  * -ERESTARTSYS casted to a pointer error value.
4015  */
4016 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
4017                                 void __user *user_commands,
4018                                 void *kernel_commands,
4019                                 u32 command_size,
4020                                 struct vmw_cmdbuf_header **header)
4021 {
4022         size_t cmdbuf_size;
4023         int ret;
4024
4025         *header = NULL;
4026         if (command_size > SVGA_CB_MAX_SIZE) {
4027                 DRM_ERROR("Command buffer is too large.\n");
4028                 return ERR_PTR(-EINVAL);
4029         }
4030
4031         if (!dev_priv->cman || kernel_commands)
4032                 return kernel_commands;
4033
4034         /* If possible, add a little space for fencing. */
4035         cmdbuf_size = command_size + 512;
4036         cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4037         kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
4038                                            true, header);
4039         if (IS_ERR(kernel_commands))
4040                 return kernel_commands;
4041
4042         ret = copy_from_user(kernel_commands, user_commands,
4043                              command_size);
4044         if (ret) {
4045                 DRM_ERROR("Failed copying commands.\n");
4046                 vmw_cmdbuf_header_free(*header);
4047                 *header = NULL;
4048                 return ERR_PTR(-EFAULT);
4049         }
4050
4051         return kernel_commands;
4052 }
4053
4054 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4055                                    struct vmw_sw_context *sw_context,
4056                                    uint32_t handle)
4057 {
4058         struct vmw_resource_val_node *ctx_node;
4059         struct vmw_resource *res;
4060         int ret;
4061
4062         if (handle == SVGA3D_INVALID_ID)
4063                 return 0;
4064
4065         ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
4066                                               handle, user_context_converter,
4067                                               &res);
4068         if (unlikely(ret != 0)) {
4069                 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
4070                           (unsigned) handle);
4071                 return ret;
4072         }
4073
4074         ret = vmw_resource_val_add(sw_context, res, &ctx_node);
4075         if (unlikely(ret != 0))
4076                 goto out_err;
4077
4078         sw_context->dx_ctx_node = ctx_node;
4079         sw_context->man = vmw_context_res_man(res);
4080 out_err:
4081         vmw_resource_unreference(&res);
4082         return ret;
4083 }
4084
4085 int vmw_execbuf_process(struct drm_file *file_priv,
4086                         struct vmw_private *dev_priv,
4087                         void __user *user_commands,
4088                         void *kernel_commands,
4089                         uint32_t command_size,
4090                         uint64_t throttle_us,
4091                         uint32_t dx_context_handle,
4092                         struct drm_vmw_fence_rep __user *user_fence_rep,
4093                         struct vmw_fence_obj **out_fence,
4094                         uint32_t flags)
4095 {
4096         struct vmw_sw_context *sw_context = &dev_priv->ctx;
4097         struct vmw_fence_obj *fence = NULL;
4098         struct vmw_resource *error_resource;
4099         struct list_head resource_list;
4100         struct vmw_cmdbuf_header *header;
4101         struct ww_acquire_ctx ticket;
4102         uint32_t handle;
4103         int ret;
4104         int32_t out_fence_fd = -1;
4105         struct sync_file *sync_file = NULL;
4106
4107
4108         if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4109                 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4110                 if (out_fence_fd < 0) {
4111                         DRM_ERROR("Failed to get a fence file descriptor.\n");
4112                         return out_fence_fd;
4113                 }
4114         }
4115
4116         if (throttle_us) {
4117                 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
4118                                    throttle_us);
4119
4120                 if (ret)
4121                         goto out_free_fence_fd;
4122         }
4123
4124         kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4125                                              kernel_commands, command_size,
4126                                              &header);
4127         if (IS_ERR(kernel_commands)) {
4128                 ret = PTR_ERR(kernel_commands);
4129                 goto out_free_fence_fd;
4130         }
4131
4132         ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4133         if (ret) {
4134                 ret = -ERESTARTSYS;
4135                 goto out_free_header;
4136         }
4137
4138         sw_context->kernel = false;
4139         if (kernel_commands == NULL) {
4140                 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4141                 if (unlikely(ret != 0))
4142                         goto out_unlock;
4143
4144
4145                 ret = copy_from_user(sw_context->cmd_bounce,
4146                                      user_commands, command_size);
4147
4148                 if (unlikely(ret != 0)) {
4149                         ret = -EFAULT;
4150                         DRM_ERROR("Failed copying commands.\n");
4151                         goto out_unlock;
4152                 }
4153                 kernel_commands = sw_context->cmd_bounce;
4154         } else if (!header)
4155                 sw_context->kernel = true;
4156
4157         sw_context->fp = vmw_fpriv(file_priv);
4158         sw_context->cur_reloc = 0;
4159         sw_context->cur_val_buf = 0;
4160         INIT_LIST_HEAD(&sw_context->resource_list);
4161         INIT_LIST_HEAD(&sw_context->ctx_resource_list);
4162         sw_context->cur_query_bo = dev_priv->pinned_bo;
4163         sw_context->last_query_ctx = NULL;
4164         sw_context->needs_post_query_barrier = false;
4165         sw_context->dx_ctx_node = NULL;
4166         sw_context->dx_query_mob = NULL;
4167         sw_context->dx_query_ctx = NULL;
4168         memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4169         INIT_LIST_HEAD(&sw_context->validate_nodes);
4170         INIT_LIST_HEAD(&sw_context->res_relocations);
4171         if (sw_context->staged_bindings)
4172                 vmw_binding_state_reset(sw_context->staged_bindings);
4173
4174         if (!sw_context->res_ht_initialized) {
4175                 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4176                 if (unlikely(ret != 0))
4177                         goto out_unlock;
4178                 sw_context->res_ht_initialized = true;
4179         }
4180         INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4181         INIT_LIST_HEAD(&resource_list);
4182         ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4183         if (unlikely(ret != 0)) {
4184                 list_splice_init(&sw_context->ctx_resource_list,
4185                                  &sw_context->resource_list);
4186                 goto out_err_nores;
4187         }
4188
4189         ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4190                                 command_size);
4191         /*
4192          * Merge the resource lists before checking the return status
4193          * from vmd_cmd_check_all so that all the open hashtabs will
4194          * be handled properly even if vmw_cmd_check_all fails.
4195          */
4196         list_splice_init(&sw_context->ctx_resource_list,
4197                          &sw_context->resource_list);
4198
4199         if (unlikely(ret != 0))
4200                 goto out_err_nores;
4201
4202         ret = vmw_resources_reserve(sw_context);
4203         if (unlikely(ret != 0))
4204                 goto out_err_nores;
4205
4206         ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
4207                                      true, NULL);
4208         if (unlikely(ret != 0))
4209                 goto out_err_nores;
4210
4211         ret = vmw_validate_buffers(dev_priv, sw_context);
4212         if (unlikely(ret != 0))
4213                 goto out_err;
4214
4215         ret = vmw_resources_validate(sw_context);
4216         if (unlikely(ret != 0))
4217                 goto out_err;
4218
4219         ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4220         if (unlikely(ret != 0)) {
4221                 ret = -ERESTARTSYS;
4222                 goto out_err;
4223         }
4224
4225         if (dev_priv->has_mob) {
4226                 ret = vmw_rebind_contexts(sw_context);
4227                 if (unlikely(ret != 0))
4228                         goto out_unlock_binding;
4229         }
4230
4231         if (!header) {
4232                 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4233                                               command_size, sw_context);
4234         } else {
4235                 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4236                                                 sw_context);
4237                 header = NULL;
4238         }
4239         mutex_unlock(&dev_priv->binding_mutex);
4240         if (ret)
4241                 goto out_err;
4242
4243         vmw_query_bo_switch_commit(dev_priv, sw_context);
4244         ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4245                                          &fence,
4246                                          (user_fence_rep) ? &handle : NULL);
4247         /*
4248          * This error is harmless, because if fence submission fails,
4249          * vmw_fifo_send_fence will sync. The error will be propagated to
4250          * user-space in @fence_rep
4251          */
4252
4253         if (ret != 0)
4254                 DRM_ERROR("Fence submission error. Syncing.\n");
4255
4256         vmw_resources_unreserve(sw_context, false);
4257
4258         ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
4259                                     (void *) fence);
4260
4261         if (unlikely(dev_priv->pinned_bo != NULL &&
4262                      !dev_priv->query_cid_valid))
4263                 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4264
4265         vmw_clear_validations(sw_context);
4266
4267         /*
4268          * If anything fails here, give up trying to export the fence
4269          * and do a sync since the user mode will not be able to sync
4270          * the fence itself.  This ensures we are still functionally
4271          * correct.
4272          */
4273         if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4274
4275                 sync_file = sync_file_create(&fence->base);
4276                 if (!sync_file) {
4277                         DRM_ERROR("Unable to create sync file for fence\n");
4278                         put_unused_fd(out_fence_fd);
4279                         out_fence_fd = -1;
4280
4281                         (void) vmw_fence_obj_wait(fence, false, false,
4282                                                   VMW_FENCE_WAIT_TIMEOUT);
4283                 }
4284         }
4285
4286         ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4287                                     user_fence_rep, fence, handle, out_fence_fd);
4288
4289         if (sync_file) {
4290                 if (ret) {
4291                         /* usercopy of fence failed, put the file object */
4292                         fput(sync_file->file);
4293                         put_unused_fd(out_fence_fd);
4294                 } else {
4295                         /* Link the fence with the FD created earlier */
4296                         fd_install(out_fence_fd, sync_file->file);
4297                 }
4298         }
4299
4300         /* Don't unreference when handing fence out */
4301         if (unlikely(out_fence != NULL)) {
4302                 *out_fence = fence;
4303                 fence = NULL;
4304         } else if (likely(fence != NULL)) {
4305                 vmw_fence_obj_unreference(&fence);
4306         }
4307
4308         list_splice_init(&sw_context->resource_list, &resource_list);
4309         vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4310         mutex_unlock(&dev_priv->cmdbuf_mutex);
4311
4312         /*
4313          * Unreference resources outside of the cmdbuf_mutex to
4314          * avoid deadlocks in resource destruction paths.
4315          */
4316         vmw_resource_list_unreference(sw_context, &resource_list);
4317
4318         return ret;
4319
4320 out_unlock_binding:
4321         mutex_unlock(&dev_priv->binding_mutex);
4322 out_err:
4323         ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4324 out_err_nores:
4325         vmw_resources_unreserve(sw_context, true);
4326         vmw_resource_relocations_free(&sw_context->res_relocations);
4327         vmw_free_relocations(sw_context);
4328         vmw_clear_validations(sw_context);
4329         if (unlikely(dev_priv->pinned_bo != NULL &&
4330                      !dev_priv->query_cid_valid))
4331                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4332 out_unlock:
4333         list_splice_init(&sw_context->resource_list, &resource_list);
4334         error_resource = sw_context->error_resource;
4335         sw_context->error_resource = NULL;
4336         vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4337         mutex_unlock(&dev_priv->cmdbuf_mutex);
4338
4339         /*
4340          * Unreference resources outside of the cmdbuf_mutex to
4341          * avoid deadlocks in resource destruction paths.
4342          */
4343         vmw_resource_list_unreference(sw_context, &resource_list);
4344         if (unlikely(error_resource != NULL))
4345                 vmw_resource_unreference(&error_resource);
4346 out_free_header:
4347         if (header)
4348                 vmw_cmdbuf_header_free(header);
4349 out_free_fence_fd:
4350         if (out_fence_fd >= 0)
4351                 put_unused_fd(out_fence_fd);
4352
4353         return ret;
4354 }
4355
4356 /**
4357  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4358  *
4359  * @dev_priv: The device private structure.
4360  *
4361  * This function is called to idle the fifo and unpin the query buffer
4362  * if the normal way to do this hits an error, which should typically be
4363  * extremely rare.
4364  */
4365 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4366 {
4367         DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4368
4369         (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4370         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4371         if (dev_priv->dummy_query_bo_pinned) {
4372                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4373                 dev_priv->dummy_query_bo_pinned = false;
4374         }
4375 }
4376
4377
4378 /**
4379  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4380  * query bo.
4381  *
4382  * @dev_priv: The device private structure.
4383  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4384  * _after_ a query barrier that flushes all queries touching the current
4385  * buffer pointed to by @dev_priv->pinned_bo
4386  *
4387  * This function should be used to unpin the pinned query bo, or
4388  * as a query barrier when we need to make sure that all queries have
4389  * finished before the next fifo command. (For example on hardware
4390  * context destructions where the hardware may otherwise leak unfinished
4391  * queries).
4392  *
4393  * This function does not return any failure codes, but make attempts
4394  * to do safe unpinning in case of errors.
4395  *
4396  * The function will synchronize on the previous query barrier, and will
4397  * thus not finish until that barrier has executed.
4398  *
4399  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4400  * before calling this function.
4401  */
4402 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4403                                      struct vmw_fence_obj *fence)
4404 {
4405         int ret = 0;
4406         struct list_head validate_list;
4407         struct ttm_validate_buffer pinned_val, query_val;
4408         struct vmw_fence_obj *lfence = NULL;
4409         struct ww_acquire_ctx ticket;
4410
4411         if (dev_priv->pinned_bo == NULL)
4412                 goto out_unlock;
4413
4414         INIT_LIST_HEAD(&validate_list);
4415
4416         pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
4417         pinned_val.shared = false;
4418         list_add_tail(&pinned_val.head, &validate_list);
4419
4420         query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
4421         query_val.shared = false;
4422         list_add_tail(&query_val.head, &validate_list);
4423
4424         ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4425                                      false, NULL);
4426         if (unlikely(ret != 0)) {
4427                 vmw_execbuf_unpin_panic(dev_priv);
4428                 goto out_no_reserve;
4429         }
4430
4431         if (dev_priv->query_cid_valid) {
4432                 BUG_ON(fence != NULL);
4433                 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4434                 if (unlikely(ret != 0)) {
4435                         vmw_execbuf_unpin_panic(dev_priv);
4436                         goto out_no_emit;
4437                 }
4438                 dev_priv->query_cid_valid = false;
4439         }
4440
4441         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4442         if (dev_priv->dummy_query_bo_pinned) {
4443                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4444                 dev_priv->dummy_query_bo_pinned = false;
4445         }
4446         if (fence == NULL) {
4447                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4448                                                   NULL);
4449                 fence = lfence;
4450         }
4451         ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
4452         if (lfence != NULL)
4453                 vmw_fence_obj_unreference(&lfence);
4454
4455         ttm_bo_unref(&query_val.bo);
4456         ttm_bo_unref(&pinned_val.bo);
4457         vmw_bo_unreference(&dev_priv->pinned_bo);
4458 out_unlock:
4459         return;
4460
4461 out_no_emit:
4462         ttm_eu_backoff_reservation(&ticket, &validate_list);
4463 out_no_reserve:
4464         ttm_bo_unref(&query_val.bo);
4465         ttm_bo_unref(&pinned_val.bo);
4466         vmw_bo_unreference(&dev_priv->pinned_bo);
4467 }
4468
4469 /**
4470  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4471  * query bo.
4472  *
4473  * @dev_priv: The device private structure.
4474  *
4475  * This function should be used to unpin the pinned query bo, or
4476  * as a query barrier when we need to make sure that all queries have
4477  * finished before the next fifo command. (For example on hardware
4478  * context destructions where the hardware may otherwise leak unfinished
4479  * queries).
4480  *
4481  * This function does not return any failure codes, but make attempts
4482  * to do safe unpinning in case of errors.
4483  *
4484  * The function will synchronize on the previous query barrier, and will
4485  * thus not finish until that barrier has executed.
4486  */
4487 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4488 {
4489         mutex_lock(&dev_priv->cmdbuf_mutex);
4490         if (dev_priv->query_cid_valid)
4491                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4492         mutex_unlock(&dev_priv->cmdbuf_mutex);
4493 }
4494
4495 int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4496                       struct drm_file *file_priv, size_t size)
4497 {
4498         struct vmw_private *dev_priv = vmw_priv(dev);
4499         struct drm_vmw_execbuf_arg arg;
4500         int ret;
4501         static const size_t copy_offset[] = {
4502                 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4503                 sizeof(struct drm_vmw_execbuf_arg)};
4504         struct dma_fence *in_fence = NULL;
4505
4506         if (unlikely(size < copy_offset[0])) {
4507                 DRM_ERROR("Invalid command size, ioctl %d\n",
4508                           DRM_VMW_EXECBUF);
4509                 return -EINVAL;
4510         }
4511
4512         if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4513                 return -EFAULT;
4514
4515         /*
4516          * Extend the ioctl argument while
4517          * maintaining backwards compatibility:
4518          * We take different code paths depending on the value of
4519          * arg.version.
4520          */
4521
4522         if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4523                      arg.version == 0)) {
4524                 DRM_ERROR("Incorrect execbuf version.\n");
4525                 return -EINVAL;
4526         }
4527
4528         if (arg.version > 1 &&
4529             copy_from_user(&arg.context_handle,
4530                            (void __user *) (data + copy_offset[0]),
4531                            copy_offset[arg.version - 1] -
4532                            copy_offset[0]) != 0)
4533                 return -EFAULT;
4534
4535         switch (arg.version) {
4536         case 1:
4537                 arg.context_handle = (uint32_t) -1;
4538                 break;
4539         case 2:
4540         default:
4541                 break;
4542         }
4543
4544
4545         /* If imported a fence FD from elsewhere, then wait on it */
4546         if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4547                 in_fence = sync_file_get_fence(arg.imported_fence_fd);
4548
4549                 if (!in_fence) {
4550                         DRM_ERROR("Cannot get imported fence\n");
4551                         return -EINVAL;
4552                 }
4553
4554                 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4555                 if (ret)
4556                         goto out;
4557         }
4558
4559         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4560         if (unlikely(ret != 0))
4561                 return ret;
4562
4563         ret = vmw_execbuf_process(file_priv, dev_priv,
4564                                   (void __user *)(unsigned long)arg.commands,
4565                                   NULL, arg.command_size, arg.throttle_us,
4566                                   arg.context_handle,
4567                                   (void __user *)(unsigned long)arg.fence_rep,
4568                                   NULL,
4569                                   arg.flags);
4570         ttm_read_unlock(&dev_priv->reservation_sem);
4571         if (unlikely(ret != 0))
4572                 goto out;
4573
4574         vmw_kms_cursor_post_execbuf(dev_priv);
4575
4576 out:
4577         if (in_fence)
4578                 dma_fence_put(in_fence);
4579         return ret;
4580 }