1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <drm/ttm/ttm_execbuf_util.h>
29#include <drm/ttm/ttm_bo_driver.h>
30#include <drm/ttm/ttm_placement.h>
31#include <linux/wait.h>
32#include <linux/sched.h>
33#include <linux/module.h>
34#include <linux/export.h>
35
36static void ttm_eu_backoff_reservation_locked(struct list_head *list)
37{
38 struct ttm_validate_buffer *entry;
39
40 list_for_each_entry(entry, list, head) {
41 struct ttm_buffer_object *bo = entry->bo;
42 if (!entry->reserved)
43 continue;
44
45 entry->reserved = false;
46 if (entry->removed) {
47 ttm_bo_add_to_lru(bo);
48 entry->removed = false;
49 }
50 __ttm_bo_unreserve(bo);
51 }
52}
53
54static void ttm_eu_del_from_lru_locked(struct list_head *list)
55{
56 struct ttm_validate_buffer *entry;
57
58 list_for_each_entry(entry, list, head) {
59 struct ttm_buffer_object *bo = entry->bo;
60 if (!entry->reserved)
61 continue;
62
63 if (!entry->removed) {
64 entry->put_count = ttm_bo_del_from_lru(bo);
65 entry->removed = true;
66 }
67 }
68}
69
70static void ttm_eu_list_ref_sub(struct list_head *list)
71{
72 struct ttm_validate_buffer *entry;
73
74 list_for_each_entry(entry, list, head) {
75 struct ttm_buffer_object *bo = entry->bo;
76
77 if (entry->put_count) {
78 ttm_bo_list_ref_sub(bo, entry->put_count, true);
79 entry->put_count = 0;
80 }
81 }
82}
83
84void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
85 struct list_head *list)
86{
87 struct ttm_validate_buffer *entry;
88 struct ttm_bo_global *glob;
89
90 if (list_empty(list))
91 return;
92
93 entry = list_first_entry(list, struct ttm_validate_buffer, head);
94 glob = entry->bo->glob;
95 spin_lock(&glob->lru_lock);
96 ttm_eu_backoff_reservation_locked(list);
97 if (ticket)
98 ww_acquire_fini(ticket);
99 spin_unlock(&glob->lru_lock);
100}
101EXPORT_SYMBOL(ttm_eu_backoff_reservation);
102
103/*
104 * Reserve buffers for validation.
105 *
106 * If a buffer in the list is marked for CPU access, we back off and
107 * wait for that buffer to become free for GPU access.
108 *
109 * If a buffer is reserved for another validation, the validator with
110 * the highest validation sequence backs off and waits for that buffer
111 * to become unreserved. This prevents deadlocks when validating multiple
112 * buffers in different orders.
113 */
114
115int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
116 struct list_head *list)
117{
118 struct ttm_bo_global *glob;
119 struct ttm_validate_buffer *entry;
120 int ret;
121
122 if (list_empty(list))
123 return 0;
124
125 list_for_each_entry(entry, list, head) {
126 entry->reserved = false;
127 entry->put_count = 0;
128 entry->removed = false;
129 }
130
131 entry = list_first_entry(list, struct ttm_validate_buffer, head);
132 glob = entry->bo->glob;
133
134 if (ticket)
135 ww_acquire_init(ticket, &reservation_ww_class);
136retry:
137 list_for_each_entry(entry, list, head) {
138 struct ttm_buffer_object *bo = entry->bo;
139
140 /* already slowpath reserved? */
141 if (entry->reserved)
142 continue;
143
144 ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
145 ticket);
146
147 if (ret == -EDEADLK) {
148 /* uh oh, we lost out, drop every reservation and try
149 * to only reserve this buffer, then start over if
150 * this succeeds.
151 */
152 BUG_ON(ticket == NULL);
153 spin_lock(&glob->lru_lock);
154 ttm_eu_backoff_reservation_locked(list);
155 spin_unlock(&glob->lru_lock);
156 ttm_eu_list_ref_sub(list);
157 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
158 ticket);
159 if (unlikely(ret != 0)) {
160 if (ret == -EINTR)
161 ret = -ERESTARTSYS;
162 goto err_fini;
163 }
164
165 entry->reserved = true;
166 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
167 ret = -EBUSY;
168 goto err;
169 }
170 goto retry;
171 } else if (ret)
172 goto err;
173
174 entry->reserved = true;
175 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
176 ret = -EBUSY;
177 goto err;
178 }
179 }
180
181 if (ticket)
182 ww_acquire_done(ticket);
183 spin_lock(&glob->lru_lock);
184 ttm_eu_del_from_lru_locked(list);
185 spin_unlock(&glob->lru_lock);
186 ttm_eu_list_ref_sub(list);
187 return 0;
188
189err:
190 spin_lock(&glob->lru_lock);
191 ttm_eu_backoff_reservation_locked(list);
192 spin_unlock(&glob->lru_lock);
193 ttm_eu_list_ref_sub(list);
194err_fini:
195 if (ticket) {
196 ww_acquire_done(ticket);
197 ww_acquire_fini(ticket);
198 }
199 return ret;
200}
201EXPORT_SYMBOL(ttm_eu_reserve_buffers);
202
203void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
204 struct list_head *list, void *sync_obj)
205{
206 struct ttm_validate_buffer *entry;
207 struct ttm_buffer_object *bo;
208 struct ttm_bo_global *glob;
209 struct ttm_bo_device *bdev;
210 struct ttm_bo_driver *driver;
211
212 if (list_empty(list))
213 return;
214
215 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
216 bdev = bo->bdev;
217 driver = bdev->driver;
218 glob = bo->glob;
219
220 spin_lock(&glob->lru_lock);
221 spin_lock(&bdev->fence_lock);
222
223 list_for_each_entry(entry, list, head) {
224 bo = entry->bo;
225 entry->old_sync_obj = bo->sync_obj;
226 bo->sync_obj = driver->sync_obj_ref(sync_obj);
227 ttm_bo_add_to_lru(bo);
228 __ttm_bo_unreserve(bo);
229 entry->reserved = false;
230 }
231 spin_unlock(&bdev->fence_lock);
232 spin_unlock(&glob->lru_lock);
233 if (ticket)
234 ww_acquire_fini(ticket);
235
236 list_for_each_entry(entry, list, head) {
237 if (entry->old_sync_obj)
238 driver->sync_obj_unref(&entry->old_sync_obj);
239 }
240}
241EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
242