blob: 1b8a0712515092e54b63776bd9d7c324ca905795 [file] [log] [blame]
Chris Wilson2c665552018-04-04 10:33:29 +01001/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
7#include "../i915_selftest.h"
Chris Wilson98dc0452018-05-05 10:10:13 +01008#include "igt_flush_test.h"
Chris Wilson2c665552018-04-04 10:33:29 +01009
10#include "mock_context.h"
11
12struct spinner {
13 struct drm_i915_private *i915;
14 struct drm_i915_gem_object *hws;
15 struct drm_i915_gem_object *obj;
16 u32 *batch;
17 void *seqno;
18};
19
20static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
21{
22 unsigned int mode;
23 void *vaddr;
24 int err;
25
26 GEM_BUG_ON(INTEL_GEN(i915) < 8);
27
28 memset(spin, 0, sizeof(*spin));
29 spin->i915 = i915;
30
31 spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
32 if (IS_ERR(spin->hws)) {
33 err = PTR_ERR(spin->hws);
34 goto err;
35 }
36
37 spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
38 if (IS_ERR(spin->obj)) {
39 err = PTR_ERR(spin->obj);
40 goto err_hws;
41 }
42
43 i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
44 vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
45 if (IS_ERR(vaddr)) {
46 err = PTR_ERR(vaddr);
47 goto err_obj;
48 }
49 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
50
51 mode = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
52 vaddr = i915_gem_object_pin_map(spin->obj, mode);
53 if (IS_ERR(vaddr)) {
54 err = PTR_ERR(vaddr);
55 goto err_unpin_hws;
56 }
57 spin->batch = vaddr;
58
59 return 0;
60
61err_unpin_hws:
62 i915_gem_object_unpin_map(spin->hws);
63err_obj:
64 i915_gem_object_put(spin->obj);
65err_hws:
66 i915_gem_object_put(spin->hws);
67err:
68 return err;
69}
70
71static unsigned int seqno_offset(u64 fence)
72{
73 return offset_in_page(sizeof(u32) * fence);
74}
75
76static u64 hws_address(const struct i915_vma *hws,
77 const struct i915_request *rq)
78{
79 return hws->node.start + seqno_offset(rq->fence.context);
80}
81
82static int emit_recurse_batch(struct spinner *spin,
83 struct i915_request *rq,
84 u32 arbitration_command)
85{
86 struct i915_address_space *vm = &rq->ctx->ppgtt->base;
87 struct i915_vma *hws, *vma;
88 u32 *batch;
89 int err;
90
91 vma = i915_vma_instance(spin->obj, vm, NULL);
92 if (IS_ERR(vma))
93 return PTR_ERR(vma);
94
95 hws = i915_vma_instance(spin->hws, vm, NULL);
96 if (IS_ERR(hws))
97 return PTR_ERR(hws);
98
99 err = i915_vma_pin(vma, 0, 0, PIN_USER);
100 if (err)
101 return err;
102
103 err = i915_vma_pin(hws, 0, 0, PIN_USER);
104 if (err)
105 goto unpin_vma;
106
107 i915_vma_move_to_active(vma, rq, 0);
108 if (!i915_gem_object_has_active_reference(vma->obj)) {
109 i915_gem_object_get(vma->obj);
110 i915_gem_object_set_active_reference(vma->obj);
111 }
112
113 i915_vma_move_to_active(hws, rq, 0);
114 if (!i915_gem_object_has_active_reference(hws->obj)) {
115 i915_gem_object_get(hws->obj);
116 i915_gem_object_set_active_reference(hws->obj);
117 }
118
119 batch = spin->batch;
120
121 *batch++ = MI_STORE_DWORD_IMM_GEN4;
122 *batch++ = lower_32_bits(hws_address(hws, rq));
123 *batch++ = upper_32_bits(hws_address(hws, rq));
124 *batch++ = rq->fence.seqno;
125
126 *batch++ = arbitration_command;
127
128 *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
129 *batch++ = lower_32_bits(vma->node.start);
130 *batch++ = upper_32_bits(vma->node.start);
131 *batch++ = MI_BATCH_BUFFER_END; /* not reached */
132
133 i915_gem_chipset_flush(spin->i915);
134
135 err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
136
137 i915_vma_unpin(hws);
138unpin_vma:
139 i915_vma_unpin(vma);
140 return err;
141}
142
143static struct i915_request *
144spinner_create_request(struct spinner *spin,
145 struct i915_gem_context *ctx,
146 struct intel_engine_cs *engine,
147 u32 arbitration_command)
148{
149 struct i915_request *rq;
150 int err;
151
152 rq = i915_request_alloc(engine, ctx);
153 if (IS_ERR(rq))
154 return rq;
155
156 err = emit_recurse_batch(spin, rq, arbitration_command);
157 if (err) {
158 __i915_request_add(rq, false);
159 return ERR_PTR(err);
160 }
161
162 return rq;
163}
164
165static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq)
166{
167 u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
168
169 return READ_ONCE(*seqno);
170}
171
Chris Wilson2c665552018-04-04 10:33:29 +0100172static void spinner_end(struct spinner *spin)
173{
174 *spin->batch = MI_BATCH_BUFFER_END;
175 i915_gem_chipset_flush(spin->i915);
176}
177
178static void spinner_fini(struct spinner *spin)
179{
180 spinner_end(spin);
181
182 i915_gem_object_unpin_map(spin->obj);
183 i915_gem_object_put(spin->obj);
184
185 i915_gem_object_unpin_map(spin->hws);
186 i915_gem_object_put(spin->hws);
187}
188
189static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq)
190{
191 if (!wait_event_timeout(rq->execute,
192 READ_ONCE(rq->global_seqno),
193 msecs_to_jiffies(10)))
194 return false;
195
196 return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
197 rq->fence.seqno),
198 10) &&
199 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
200 rq->fence.seqno),
201 1000));
202}
203
204static int live_sanitycheck(void *arg)
205{
206 struct drm_i915_private *i915 = arg;
207 struct intel_engine_cs *engine;
208 struct i915_gem_context *ctx;
209 enum intel_engine_id id;
210 struct spinner spin;
211 int err = -ENOMEM;
212
213 if (!HAS_LOGICAL_RING_CONTEXTS(i915))
214 return 0;
215
216 mutex_lock(&i915->drm.struct_mutex);
217
218 if (spinner_init(&spin, i915))
219 goto err_unlock;
220
221 ctx = kernel_context(i915);
222 if (!ctx)
223 goto err_spin;
224
225 for_each_engine(engine, i915, id) {
226 struct i915_request *rq;
227
228 rq = spinner_create_request(&spin, ctx, engine, MI_NOOP);
229 if (IS_ERR(rq)) {
230 err = PTR_ERR(rq);
231 goto err_ctx;
232 }
233
234 i915_request_add(rq);
235 if (!wait_for_spinner(&spin, rq)) {
236 GEM_TRACE("spinner failed to start\n");
237 GEM_TRACE_DUMP();
238 i915_gem_set_wedged(i915);
239 err = -EIO;
240 goto err_ctx;
241 }
242
243 spinner_end(&spin);
Chris Wilson98dc0452018-05-05 10:10:13 +0100244 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
Chris Wilson2c665552018-04-04 10:33:29 +0100245 err = -EIO;
246 goto err_ctx;
247 }
248 }
249
250 err = 0;
251err_ctx:
252 kernel_context_close(ctx);
253err_spin:
254 spinner_fini(&spin);
255err_unlock:
Chris Wilson98dc0452018-05-05 10:10:13 +0100256 igt_flush_test(i915, I915_WAIT_LOCKED);
Chris Wilson2c665552018-04-04 10:33:29 +0100257 mutex_unlock(&i915->drm.struct_mutex);
258 return err;
259}
260
261static int live_preempt(void *arg)
262{
263 struct drm_i915_private *i915 = arg;
264 struct i915_gem_context *ctx_hi, *ctx_lo;
265 struct spinner spin_hi, spin_lo;
266 struct intel_engine_cs *engine;
267 enum intel_engine_id id;
268 int err = -ENOMEM;
269
270 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
271 return 0;
272
273 mutex_lock(&i915->drm.struct_mutex);
274
275 if (spinner_init(&spin_hi, i915))
276 goto err_unlock;
277
278 if (spinner_init(&spin_lo, i915))
279 goto err_spin_hi;
280
281 ctx_hi = kernel_context(i915);
282 if (!ctx_hi)
283 goto err_spin_lo;
Chris Wilsonb7268c52018-04-18 19:40:52 +0100284 ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
Chris Wilson2c665552018-04-04 10:33:29 +0100285
286 ctx_lo = kernel_context(i915);
287 if (!ctx_lo)
288 goto err_ctx_hi;
Chris Wilsonb7268c52018-04-18 19:40:52 +0100289 ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
Chris Wilson2c665552018-04-04 10:33:29 +0100290
291 for_each_engine(engine, i915, id) {
292 struct i915_request *rq;
293
294 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
295 MI_ARB_CHECK);
296 if (IS_ERR(rq)) {
297 err = PTR_ERR(rq);
298 goto err_ctx_lo;
299 }
300
301 i915_request_add(rq);
302 if (!wait_for_spinner(&spin_lo, rq)) {
303 GEM_TRACE("lo spinner failed to start\n");
304 GEM_TRACE_DUMP();
305 i915_gem_set_wedged(i915);
306 err = -EIO;
307 goto err_ctx_lo;
308 }
309
310 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
311 MI_ARB_CHECK);
312 if (IS_ERR(rq)) {
313 spinner_end(&spin_lo);
314 err = PTR_ERR(rq);
315 goto err_ctx_lo;
316 }
317
318 i915_request_add(rq);
319 if (!wait_for_spinner(&spin_hi, rq)) {
320 GEM_TRACE("hi spinner failed to start\n");
321 GEM_TRACE_DUMP();
322 i915_gem_set_wedged(i915);
323 err = -EIO;
324 goto err_ctx_lo;
325 }
326
327 spinner_end(&spin_hi);
328 spinner_end(&spin_lo);
Chris Wilson98dc0452018-05-05 10:10:13 +0100329 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
Chris Wilson2c665552018-04-04 10:33:29 +0100330 err = -EIO;
331 goto err_ctx_lo;
332 }
333 }
334
335 err = 0;
336err_ctx_lo:
337 kernel_context_close(ctx_lo);
338err_ctx_hi:
339 kernel_context_close(ctx_hi);
340err_spin_lo:
341 spinner_fini(&spin_lo);
342err_spin_hi:
343 spinner_fini(&spin_hi);
344err_unlock:
Chris Wilson98dc0452018-05-05 10:10:13 +0100345 igt_flush_test(i915, I915_WAIT_LOCKED);
Chris Wilson2c665552018-04-04 10:33:29 +0100346 mutex_unlock(&i915->drm.struct_mutex);
347 return err;
348}
349
350static int live_late_preempt(void *arg)
351{
352 struct drm_i915_private *i915 = arg;
353 struct i915_gem_context *ctx_hi, *ctx_lo;
354 struct spinner spin_hi, spin_lo;
355 struct intel_engine_cs *engine;
Chris Wilsonb7268c52018-04-18 19:40:52 +0100356 struct i915_sched_attr attr = {};
Chris Wilson2c665552018-04-04 10:33:29 +0100357 enum intel_engine_id id;
358 int err = -ENOMEM;
359
360 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
361 return 0;
362
363 mutex_lock(&i915->drm.struct_mutex);
364
365 if (spinner_init(&spin_hi, i915))
366 goto err_unlock;
367
368 if (spinner_init(&spin_lo, i915))
369 goto err_spin_hi;
370
371 ctx_hi = kernel_context(i915);
372 if (!ctx_hi)
373 goto err_spin_lo;
374
375 ctx_lo = kernel_context(i915);
376 if (!ctx_lo)
377 goto err_ctx_hi;
378
379 for_each_engine(engine, i915, id) {
380 struct i915_request *rq;
381
382 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
383 MI_ARB_CHECK);
384 if (IS_ERR(rq)) {
385 err = PTR_ERR(rq);
386 goto err_ctx_lo;
387 }
388
389 i915_request_add(rq);
390 if (!wait_for_spinner(&spin_lo, rq)) {
391 pr_err("First context failed to start\n");
392 goto err_wedged;
393 }
394
395 rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP);
396 if (IS_ERR(rq)) {
397 spinner_end(&spin_lo);
398 err = PTR_ERR(rq);
399 goto err_ctx_lo;
400 }
401
402 i915_request_add(rq);
403 if (wait_for_spinner(&spin_hi, rq)) {
404 pr_err("Second context overtook first?\n");
405 goto err_wedged;
406 }
407
Chris Wilsonb7268c52018-04-18 19:40:52 +0100408 attr.priority = I915_PRIORITY_MAX;
409 engine->schedule(rq, &attr);
Chris Wilson2c665552018-04-04 10:33:29 +0100410
411 if (!wait_for_spinner(&spin_hi, rq)) {
412 pr_err("High priority context failed to preempt the low priority context\n");
413 GEM_TRACE_DUMP();
414 goto err_wedged;
415 }
416
417 spinner_end(&spin_hi);
418 spinner_end(&spin_lo);
Chris Wilson98dc0452018-05-05 10:10:13 +0100419 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
Chris Wilson2c665552018-04-04 10:33:29 +0100420 err = -EIO;
421 goto err_ctx_lo;
422 }
423 }
424
425 err = 0;
426err_ctx_lo:
427 kernel_context_close(ctx_lo);
428err_ctx_hi:
429 kernel_context_close(ctx_hi);
430err_spin_lo:
431 spinner_fini(&spin_lo);
432err_spin_hi:
433 spinner_fini(&spin_hi);
434err_unlock:
Chris Wilson98dc0452018-05-05 10:10:13 +0100435 igt_flush_test(i915, I915_WAIT_LOCKED);
Chris Wilson2c665552018-04-04 10:33:29 +0100436 mutex_unlock(&i915->drm.struct_mutex);
437 return err;
438
439err_wedged:
440 spinner_end(&spin_hi);
441 spinner_end(&spin_lo);
442 i915_gem_set_wedged(i915);
443 err = -EIO;
444 goto err_ctx_lo;
445}
446
447int intel_execlists_live_selftests(struct drm_i915_private *i915)
448{
449 static const struct i915_subtest tests[] = {
450 SUBTEST(live_sanitycheck),
451 SUBTEST(live_preempt),
452 SUBTEST(live_late_preempt),
453 };
Chris Wilson52cc8012018-05-04 13:42:02 +0100454
455 if (!HAS_EXECLISTS(i915))
456 return 0;
457
Chris Wilson2c665552018-04-04 10:33:29 +0100458 return i915_subtests(tests, i915);
459}