GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / gpu / drm / ttm / ttm_execbuf_util.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include <drm/ttm/ttm_execbuf_util.h>
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <linux/wait.h>
32 #include <linux/sched.h>
33 #include <linux/module.h>
34
35 static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
36                                               struct ttm_validate_buffer *entry)
37 {
38         list_for_each_entry_continue_reverse(entry, list, head) {
39                 struct ttm_buffer_object *bo = entry->bo;
40
41                 __ttm_bo_unreserve(bo);
42         }
43 }
44
45 static void ttm_eu_del_from_lru_locked(struct list_head *list)
46 {
47         struct ttm_validate_buffer *entry;
48
49         list_for_each_entry(entry, list, head) {
50                 struct ttm_buffer_object *bo = entry->bo;
51                 ttm_bo_del_from_lru(bo);
52         }
53 }
54
55 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
56                                 struct list_head *list)
57 {
58         struct ttm_validate_buffer *entry;
59         struct ttm_bo_global *glob;
60
61         if (list_empty(list))
62                 return;
63
64         entry = list_first_entry(list, struct ttm_validate_buffer, head);
65         glob = entry->bo->glob;
66
67         spin_lock(&glob->lru_lock);
68         list_for_each_entry(entry, list, head) {
69                 struct ttm_buffer_object *bo = entry->bo;
70
71                 ttm_bo_add_to_lru(bo);
72                 __ttm_bo_unreserve(bo);
73         }
74         spin_unlock(&glob->lru_lock);
75
76         if (ticket)
77                 ww_acquire_fini(ticket);
78 }
79 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
80
81 /*
82  * Reserve buffers for validation.
83  *
84  * If a buffer in the list is marked for CPU access, we back off and
85  * wait for that buffer to become free for GPU access.
86  *
87  * If a buffer is reserved for another validation, the validator with
88  * the highest validation sequence backs off and waits for that buffer
89  * to become unreserved. This prevents deadlocks when validating multiple
90  * buffers in different orders.
91  */
92
93 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
94                            struct list_head *list, bool intr,
95                            struct list_head *dups)
96 {
97         struct ttm_bo_global *glob;
98         struct ttm_validate_buffer *entry;
99         int ret;
100
101         if (list_empty(list))
102                 return 0;
103
104         entry = list_first_entry(list, struct ttm_validate_buffer, head);
105         glob = entry->bo->glob;
106
107         if (ticket)
108                 ww_acquire_init(ticket, &reservation_ww_class);
109
110         list_for_each_entry(entry, list, head) {
111                 struct ttm_buffer_object *bo = entry->bo;
112
113                 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
114                 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
115                         __ttm_bo_unreserve(bo);
116
117                         ret = -EBUSY;
118
119                 } else if (ret == -EALREADY && dups) {
120                         struct ttm_validate_buffer *safe = entry;
121                         entry = list_prev_entry(entry, head);
122                         list_del(&safe->head);
123                         list_add(&safe->head, dups);
124                         continue;
125                 }
126
127                 if (!ret) {
128                         if (!entry->shared)
129                                 continue;
130
131                         ret = reservation_object_reserve_shared(bo->resv);
132                         if (!ret)
133                                 continue;
134                 }
135
136                 /* uh oh, we lost out, drop every reservation and try
137                  * to only reserve this buffer, then start over if
138                  * this succeeds.
139                  */
140                 ttm_eu_backoff_reservation_reverse(list, entry);
141
142                 if (ret == -EDEADLK && intr) {
143                         ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
144                                                                ticket);
145                 } else if (ret == -EDEADLK) {
146                         ww_mutex_lock_slow(&bo->resv->lock, ticket);
147                         ret = 0;
148                 }
149
150                 if (!ret && entry->shared)
151                         ret = reservation_object_reserve_shared(bo->resv);
152
153                 if (unlikely(ret != 0)) {
154                         if (ret == -EINTR)
155                                 ret = -ERESTARTSYS;
156                         if (ticket) {
157                                 ww_acquire_done(ticket);
158                                 ww_acquire_fini(ticket);
159                         }
160                         return ret;
161                 }
162
163                 /* move this item to the front of the list,
164                  * forces correct iteration of the loop without keeping track
165                  */
166                 list_del(&entry->head);
167                 list_add(&entry->head, list);
168         }
169
170         if (ticket)
171                 ww_acquire_done(ticket);
172         spin_lock(&glob->lru_lock);
173         ttm_eu_del_from_lru_locked(list);
174         spin_unlock(&glob->lru_lock);
175         return 0;
176 }
177 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
178
179 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
180                                  struct list_head *list,
181                                  struct dma_fence *fence)
182 {
183         struct ttm_validate_buffer *entry;
184         struct ttm_buffer_object *bo;
185         struct ttm_bo_global *glob;
186         struct ttm_bo_device *bdev;
187         struct ttm_bo_driver *driver;
188
189         if (list_empty(list))
190                 return;
191
192         bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
193         bdev = bo->bdev;
194         driver = bdev->driver;
195         glob = bo->glob;
196
197         spin_lock(&glob->lru_lock);
198
199         list_for_each_entry(entry, list, head) {
200                 bo = entry->bo;
201                 if (entry->shared)
202                         reservation_object_add_shared_fence(bo->resv, fence);
203                 else
204                         reservation_object_add_excl_fence(bo->resv, fence);
205                 ttm_bo_add_to_lru(bo);
206                 __ttm_bo_unreserve(bo);
207         }
208         spin_unlock(&glob->lru_lock);
209         if (ticket)
210                 ww_acquire_fini(ticket);
211 }
212 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);