vmwgfx_fence.c revision 1.3 1 /* $NetBSD: vmwgfx_fence.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $ */
2
3 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 /**************************************************************************
5 *
6 * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 **************************************************************************/
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_fence.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $");
32
33 #include <linux/sched/signal.h>
34
35 #include "vmwgfx_drv.h"
36
37 #define VMW_FENCE_WRAP (1 << 31)
38
39 struct vmw_fence_manager {
40 int num_fence_objects;
41 struct vmw_private *dev_priv;
42 spinlock_t lock;
43 struct list_head fence_list;
44 struct work_struct work;
45 u32 user_fence_size;
46 u32 fence_size;
47 u32 event_fence_action_size;
48 bool fifo_down;
49 struct list_head cleanup_list;
50 uint32_t pending_actions[VMW_ACTION_MAX];
51 struct mutex goal_irq_mutex;
52 bool goal_irq_on; /* Protected by @goal_irq_mutex */
53 bool seqno_valid; /* Protected by @lock, and may not be set to true
54 without the @goal_irq_mutex held. */
55 u64 ctx;
56 };
57
58 struct vmw_user_fence {
59 struct ttm_base_object base;
60 struct vmw_fence_obj fence;
61 };
62
63 /**
64 * struct vmw_event_fence_action - fence action that delivers a drm event.
65 *
66 * @e: A struct drm_pending_event that controls the event delivery.
67 * @action: A struct vmw_fence_action to hook up to a fence.
68 * @fence: A referenced pointer to the fence to keep it alive while @action
69 * hangs on it.
70 * @dev: Pointer to a struct drm_device so we can access the event stuff.
71 * @kref: Both @e and @action has destructors, so we need to refcount.
72 * @size: Size accounted for this object.
73 * @tv_sec: If non-null, the variable pointed to will be assigned
74 * current time tv_sec val when the fence signals.
75 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
76 * be assigned the current time tv_usec val when the fence signals.
77 */
78 struct vmw_event_fence_action {
79 struct vmw_fence_action action;
80
81 struct drm_pending_event *event;
82 struct vmw_fence_obj *fence;
83 struct drm_device *dev;
84
85 uint32_t *tv_sec;
86 uint32_t *tv_usec;
87 };
88
89 static struct vmw_fence_manager *
90 fman_from_fence(struct vmw_fence_obj *fence)
91 {
92 return container_of(fence->base.lock, struct vmw_fence_manager, lock);
93 }
94
95 /**
96 * Note on fencing subsystem usage of irqs:
97 * Typically the vmw_fences_update function is called
98 *
99 * a) When a new fence seqno has been submitted by the fifo code.
100 * b) On-demand when we have waiters. Sleeping waiters will switch on the
101 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
102 * irq is received. When the last fence waiter is gone, that IRQ is masked
103 * away.
104 *
105 * In situations where there are no waiters and we don't submit any new fences,
106 * fence objects may not be signaled. This is perfectly OK, since there are
107 * no consumers of the signaled data, but that is NOT ok when there are fence
108 * actions attached to a fence. The fencing subsystem then makes use of the
109 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
110 * which has an action attached, and each time vmw_fences_update is called,
111 * the subsystem makes sure the fence goal seqno is updated.
112 *
113 * The fence goal seqno irq is on as long as there are unsignaled fence
114 * objects with actions attached to them.
115 */
116
117 static void vmw_fence_obj_destroy(struct dma_fence *f)
118 {
119 struct vmw_fence_obj *fence =
120 container_of(f, struct vmw_fence_obj, base);
121
122 struct vmw_fence_manager *fman = fman_from_fence(fence);
123
124 spin_lock(&fman->lock);
125 list_del_init(&fence->head);
126 --fman->num_fence_objects;
127 spin_unlock(&fman->lock);
128 fence->destroy(fence);
129 }
130
131 static const char *vmw_fence_get_driver_name(struct dma_fence *f)
132 {
133 return "vmwgfx";
134 }
135
136 static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
137 {
138 return "svga";
139 }
140
141 static bool vmw_fence_enable_signaling(struct dma_fence *f)
142 {
143 struct vmw_fence_obj *fence =
144 container_of(f, struct vmw_fence_obj, base);
145
146 struct vmw_fence_manager *fman = fman_from_fence(fence);
147 struct vmw_private *dev_priv = fman->dev_priv;
148
149 u32 *fifo_mem = dev_priv->mmio_virt;
150 u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
151 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
152 return false;
153
154 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
155
156 return true;
157 }
158
159 struct vmwgfx_wait_cb {
160 struct dma_fence_cb base;
161 struct task_struct *task;
162 };
163
164 static void
165 vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
166 {
167 struct vmwgfx_wait_cb *wait =
168 container_of(cb, struct vmwgfx_wait_cb, base);
169
170 wake_up_process(wait->task);
171 }
172
173 static void __vmw_fences_update(struct vmw_fence_manager *fman);
174
175 static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
176 {
177 struct vmw_fence_obj *fence =
178 container_of(f, struct vmw_fence_obj, base);
179
180 struct vmw_fence_manager *fman = fman_from_fence(fence);
181 struct vmw_private *dev_priv = fman->dev_priv;
182 struct vmwgfx_wait_cb cb;
183 long ret = timeout;
184
185 if (likely(vmw_fence_obj_signaled(fence)))
186 return timeout;
187
188 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
189 vmw_seqno_waiter_add(dev_priv);
190
191 spin_lock(f->lock);
192
193 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
194 goto out;
195
196 if (intr && signal_pending(current)) {
197 ret = -ERESTARTSYS;
198 goto out;
199 }
200
201 cb.base.func = vmwgfx_wait_cb;
202 cb.task = current;
203 list_add(&cb.base.node, &f->cb_list);
204
205 for (;;) {
206 __vmw_fences_update(fman);
207
208 /*
209 * We can use the barrier free __set_current_state() since
210 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
211 * fence spinlock.
212 */
213 if (intr)
214 __set_current_state(TASK_INTERRUPTIBLE);
215 else
216 __set_current_state(TASK_UNINTERRUPTIBLE);
217
218 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
219 if (ret == 0 && timeout > 0)
220 ret = 1;
221 break;
222 }
223
224 if (intr && signal_pending(current)) {
225 ret = -ERESTARTSYS;
226 break;
227 }
228
229 if (ret == 0)
230 break;
231
232 spin_unlock(f->lock);
233
234 ret = schedule_timeout(ret);
235
236 spin_lock(f->lock);
237 }
238 __set_current_state(TASK_RUNNING);
239 if (!list_empty(&cb.base.node))
240 list_del(&cb.base.node);
241
242 out:
243 spin_unlock(f->lock);
244
245 vmw_seqno_waiter_remove(dev_priv);
246
247 return ret;
248 }
249
250 static const struct dma_fence_ops vmw_fence_ops = {
251 .get_driver_name = vmw_fence_get_driver_name,
252 .get_timeline_name = vmw_fence_get_timeline_name,
253 .enable_signaling = vmw_fence_enable_signaling,
254 .wait = vmw_fence_wait,
255 .release = vmw_fence_obj_destroy,
256 };
257
258
259 /**
260 * Execute signal actions on fences recently signaled.
261 * This is done from a workqueue so we don't have to execute
262 * signal actions from atomic context.
263 */
264
265 static void vmw_fence_work_func(struct work_struct *work)
266 {
267 struct vmw_fence_manager *fman =
268 container_of(work, struct vmw_fence_manager, work);
269 struct list_head list;
270 struct vmw_fence_action *action, *next_action;
271 bool seqno_valid;
272
273 do {
274 INIT_LIST_HEAD(&list);
275 mutex_lock(&fman->goal_irq_mutex);
276
277 spin_lock(&fman->lock);
278 list_splice_init(&fman->cleanup_list, &list);
279 seqno_valid = fman->seqno_valid;
280 spin_unlock(&fman->lock);
281
282 if (!seqno_valid && fman->goal_irq_on) {
283 fman->goal_irq_on = false;
284 vmw_goal_waiter_remove(fman->dev_priv);
285 }
286 mutex_unlock(&fman->goal_irq_mutex);
287
288 if (list_empty(&list))
289 return;
290
291 /*
292 * At this point, only we should be able to manipulate the
293 * list heads of the actions we have on the private list.
294 * hence fman::lock not held.
295 */
296
297 list_for_each_entry_safe(action, next_action, &list, head) {
298 list_del_init(&action->head);
299 if (action->cleanup)
300 action->cleanup(action);
301 }
302 } while (1);
303 }
304
305 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
306 {
307 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
308
309 if (unlikely(!fman))
310 return NULL;
311
312 fman->dev_priv = dev_priv;
313 spin_lock_init(&fman->lock);
314 INIT_LIST_HEAD(&fman->fence_list);
315 INIT_LIST_HEAD(&fman->cleanup_list);
316 INIT_WORK(&fman->work, &vmw_fence_work_func);
317 fman->fifo_down = true;
318 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
319 TTM_OBJ_EXTRA_SIZE;
320 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
321 fman->event_fence_action_size =
322 ttm_round_pot(sizeof(struct vmw_event_fence_action));
323 mutex_init(&fman->goal_irq_mutex);
324 fman->ctx = dma_fence_context_alloc(1);
325
326 return fman;
327 }
328
329 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
330 {
331 bool lists_empty;
332
333 (void) cancel_work_sync(&fman->work);
334
335 spin_lock(&fman->lock);
336 lists_empty = list_empty(&fman->fence_list) &&
337 list_empty(&fman->cleanup_list);
338 spin_unlock(&fman->lock);
339
340 BUG_ON(!lists_empty);
341 kfree(fman);
342 }
343
344 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
345 struct vmw_fence_obj *fence, u32 seqno,
346 void (*destroy) (struct vmw_fence_obj *fence))
347 {
348 int ret = 0;
349
350 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
351 fman->ctx, seqno);
352 INIT_LIST_HEAD(&fence->seq_passed_actions);
353 fence->destroy = destroy;
354
355 spin_lock(&fman->lock);
356 if (unlikely(fman->fifo_down)) {
357 ret = -EBUSY;
358 goto out_unlock;
359 }
360 list_add_tail(&fence->head, &fman->fence_list);
361 ++fman->num_fence_objects;
362
363 out_unlock:
364 spin_unlock(&fman->lock);
365 return ret;
366
367 }
368
369 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
370 struct list_head *list)
371 {
372 struct vmw_fence_action *action, *next_action;
373
374 list_for_each_entry_safe(action, next_action, list, head) {
375 list_del_init(&action->head);
376 fman->pending_actions[action->type]--;
377 if (action->seq_passed != NULL)
378 action->seq_passed(action);
379
380 /*
381 * Add the cleanup action to the cleanup list so that
382 * it will be performed by a worker task.
383 */
384
385 list_add_tail(&action->head, &fman->cleanup_list);
386 }
387 }
388
389 /**
390 * vmw_fence_goal_new_locked - Figure out a new device fence goal
391 * seqno if needed.
392 *
393 * @fman: Pointer to a fence manager.
394 * @passed_seqno: The seqno the device currently signals as passed.
395 *
396 * This function should be called with the fence manager lock held.
397 * It is typically called when we have a new passed_seqno, and
398 * we might need to update the fence goal. It checks to see whether
399 * the current fence goal has already passed, and, in that case,
400 * scans through all unsignaled fences to get the next fence object with an
401 * action attached, and sets the seqno of that fence as a new fence goal.
402 *
403 * returns true if the device goal seqno was updated. False otherwise.
404 */
405 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
406 u32 passed_seqno)
407 {
408 u32 goal_seqno;
409 u32 *fifo_mem;
410 struct vmw_fence_obj *fence;
411
412 if (likely(!fman->seqno_valid))
413 return false;
414
415 fifo_mem = fman->dev_priv->mmio_virt;
416 goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
417 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
418 return false;
419
420 fman->seqno_valid = false;
421 list_for_each_entry(fence, &fman->fence_list, head) {
422 if (!list_empty(&fence->seq_passed_actions)) {
423 fman->seqno_valid = true;
424 vmw_mmio_write(fence->base.seqno,
425 fifo_mem + SVGA_FIFO_FENCE_GOAL);
426 break;
427 }
428 }
429
430 return true;
431 }
432
433
434 /**
435 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
436 * needed.
437 *
438 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
439 * considered as a device fence goal.
440 *
441 * This function should be called with the fence manager lock held.
442 * It is typically called when an action has been attached to a fence to
443 * check whether the seqno of that fence should be used for a fence
444 * goal interrupt. This is typically needed if the current fence goal is
445 * invalid, or has a higher seqno than that of the current fence object.
446 *
447 * returns true if the device goal seqno was updated. False otherwise.
448 */
449 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
450 {
451 struct vmw_fence_manager *fman = fman_from_fence(fence);
452 u32 goal_seqno;
453 u32 *fifo_mem;
454
455 if (dma_fence_is_signaled_locked(&fence->base))
456 return false;
457
458 fifo_mem = fman->dev_priv->mmio_virt;
459 goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
460 if (likely(fman->seqno_valid &&
461 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
462 return false;
463
464 vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
465 fman->seqno_valid = true;
466
467 return true;
468 }
469
470 static void __vmw_fences_update(struct vmw_fence_manager *fman)
471 {
472 struct vmw_fence_obj *fence, *next_fence;
473 struct list_head action_list;
474 bool needs_rerun;
475 uint32_t seqno, new_seqno;
476 u32 *fifo_mem = fman->dev_priv->mmio_virt;
477
478 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
479 rerun:
480 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
481 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
482 list_del_init(&fence->head);
483 dma_fence_signal_locked(&fence->base);
484 INIT_LIST_HEAD(&action_list);
485 list_splice_init(&fence->seq_passed_actions,
486 &action_list);
487 vmw_fences_perform_actions(fman, &action_list);
488 } else
489 break;
490 }
491
492 /*
493 * Rerun if the fence goal seqno was updated, and the
494 * hardware might have raced with that update, so that
495 * we missed a fence_goal irq.
496 */
497
498 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
499 if (unlikely(needs_rerun)) {
500 new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
501 if (new_seqno != seqno) {
502 seqno = new_seqno;
503 goto rerun;
504 }
505 }
506
507 if (!list_empty(&fman->cleanup_list))
508 (void) schedule_work(&fman->work);
509 }
510
511 void vmw_fences_update(struct vmw_fence_manager *fman)
512 {
513 spin_lock(&fman->lock);
514 __vmw_fences_update(fman);
515 spin_unlock(&fman->lock);
516 }
517
518 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
519 {
520 struct vmw_fence_manager *fman = fman_from_fence(fence);
521
522 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
523 return 1;
524
525 vmw_fences_update(fman);
526
527 return dma_fence_is_signaled(&fence->base);
528 }
529
530 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
531 bool interruptible, unsigned long timeout)
532 {
533 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
534
535 if (likely(ret > 0))
536 return 0;
537 else if (ret == 0)
538 return -EBUSY;
539 else
540 return ret;
541 }
542
543 void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
544 {
545 struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
546
547 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
548 }
549
550 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
551 {
552 dma_fence_free(&fence->base);
553 }
554
555 int vmw_fence_create(struct vmw_fence_manager *fman,
556 uint32_t seqno,
557 struct vmw_fence_obj **p_fence)
558 {
559 struct vmw_fence_obj *fence;
560 int ret;
561
562 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
563 if (unlikely(!fence))
564 return -ENOMEM;
565
566 ret = vmw_fence_obj_init(fman, fence, seqno,
567 vmw_fence_destroy);
568 if (unlikely(ret != 0))
569 goto out_err_init;
570
571 *p_fence = fence;
572 return 0;
573
574 out_err_init:
575 kfree(fence);
576 return ret;
577 }
578
579
580 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
581 {
582 struct vmw_user_fence *ufence =
583 container_of(fence, struct vmw_user_fence, fence);
584 struct vmw_fence_manager *fman = fman_from_fence(fence);
585
586 ttm_base_object_kfree(ufence, base);
587 /*
588 * Free kernel space accounting.
589 */
590 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
591 fman->user_fence_size);
592 }
593
594 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
595 {
596 struct ttm_base_object *base = *p_base;
597 struct vmw_user_fence *ufence =
598 container_of(base, struct vmw_user_fence, base);
599 struct vmw_fence_obj *fence = &ufence->fence;
600
601 *p_base = NULL;
602 vmw_fence_obj_unreference(&fence);
603 }
604
605 int vmw_user_fence_create(struct drm_file *file_priv,
606 struct vmw_fence_manager *fman,
607 uint32_t seqno,
608 struct vmw_fence_obj **p_fence,
609 uint32_t *p_handle)
610 {
611 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
612 struct vmw_user_fence *ufence;
613 struct vmw_fence_obj *tmp;
614 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
615 struct ttm_operation_ctx ctx = {
616 .interruptible = false,
617 .no_wait_gpu = false
618 };
619 int ret;
620
621 /*
622 * Kernel memory space accounting, since this object may
623 * be created by a user-space request.
624 */
625
626 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
627 &ctx);
628 if (unlikely(ret != 0))
629 return ret;
630
631 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
632 if (unlikely(!ufence)) {
633 ret = -ENOMEM;
634 goto out_no_object;
635 }
636
637 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
638 vmw_user_fence_destroy);
639 if (unlikely(ret != 0)) {
640 kfree(ufence);
641 goto out_no_object;
642 }
643
644 /*
645 * The base object holds a reference which is freed in
646 * vmw_user_fence_base_release.
647 */
648 tmp = vmw_fence_obj_reference(&ufence->fence);
649 ret = ttm_base_object_init(tfile, &ufence->base, false,
650 VMW_RES_FENCE,
651 &vmw_user_fence_base_release, NULL);
652
653
654 if (unlikely(ret != 0)) {
655 /*
656 * Free the base object's reference
657 */
658 vmw_fence_obj_unreference(&tmp);
659 goto out_err;
660 }
661
662 *p_fence = &ufence->fence;
663 *p_handle = ufence->base.handle;
664
665 return 0;
666 out_err:
667 tmp = &ufence->fence;
668 vmw_fence_obj_unreference(&tmp);
669 out_no_object:
670 ttm_mem_global_free(mem_glob, fman->user_fence_size);
671 return ret;
672 }
673
674
675 /**
676 * vmw_wait_dma_fence - Wait for a dma fence
677 *
678 * @fman: pointer to a fence manager
679 * @fence: DMA fence to wait on
680 *
681 * This function handles the case when the fence is actually a fence
682 * array. If that's the case, it'll wait on each of the child fence
683 */
684 int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
685 struct dma_fence *fence)
686 {
687 struct dma_fence_array *fence_array;
688 int ret = 0;
689 int i;
690
691
692 if (dma_fence_is_signaled(fence))
693 return 0;
694
695 if (!dma_fence_is_array(fence))
696 return dma_fence_wait(fence, true);
697
698 /* From i915: Note that if the fence-array was created in
699 * signal-on-any mode, we should *not* decompose it into its individual
700 * fences. However, we don't currently store which mode the fence-array
701 * is operating in. Fortunately, the only user of signal-on-any is
702 * private to amdgpu and we should not see any incoming fence-array
703 * from sync-file being in signal-on-any mode.
704 */
705
706 fence_array = to_dma_fence_array(fence);
707 for (i = 0; i < fence_array->num_fences; i++) {
708 struct dma_fence *child = fence_array->fences[i];
709
710 ret = dma_fence_wait(child, true);
711
712 if (ret < 0)
713 return ret;
714 }
715
716 return 0;
717 }
718
719
720 /**
721 * vmw_fence_fifo_down - signal all unsignaled fence objects.
722 */
723
724 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
725 {
726 struct list_head action_list;
727 int ret;
728
729 /*
730 * The list may be altered while we traverse it, so always
731 * restart when we've released the fman->lock.
732 */
733
734 spin_lock(&fman->lock);
735 fman->fifo_down = true;
736 while (!list_empty(&fman->fence_list)) {
737 struct vmw_fence_obj *fence =
738 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
739 head);
740 dma_fence_get(&fence->base);
741 spin_unlock(&fman->lock);
742
743 ret = vmw_fence_obj_wait(fence, false, false,
744 VMW_FENCE_WAIT_TIMEOUT);
745
746 if (unlikely(ret != 0)) {
747 list_del_init(&fence->head);
748 dma_fence_signal(&fence->base);
749 INIT_LIST_HEAD(&action_list);
750 list_splice_init(&fence->seq_passed_actions,
751 &action_list);
752 vmw_fences_perform_actions(fman, &action_list);
753 }
754
755 BUG_ON(!list_empty(&fence->head));
756 dma_fence_put(&fence->base);
757 spin_lock(&fman->lock);
758 }
759 spin_unlock(&fman->lock);
760 }
761
762 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
763 {
764 spin_lock(&fman->lock);
765 fman->fifo_down = false;
766 spin_unlock(&fman->lock);
767 }
768
769
770 /**
771 * vmw_fence_obj_lookup - Look up a user-space fence object
772 *
773 * @tfile: A struct ttm_object_file identifying the caller.
774 * @handle: A handle identifying the fence object.
775 * @return: A struct vmw_user_fence base ttm object on success or
776 * an error pointer on failure.
777 *
778 * The fence object is looked up and type-checked. The caller needs
779 * to have opened the fence object first, but since that happens on
780 * creation and fence objects aren't shareable, that's not an
781 * issue currently.
782 */
783 static struct ttm_base_object *
784 vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
785 {
786 struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
787
788 if (!base) {
789 pr_err("Invalid fence object handle 0x%08lx.\n",
790 (unsigned long)handle);
791 return ERR_PTR(-EINVAL);
792 }
793
794 if (base->refcount_release != vmw_user_fence_base_release) {
795 pr_err("Invalid fence object handle 0x%08lx.\n",
796 (unsigned long)handle);
797 ttm_base_object_unref(&base);
798 return ERR_PTR(-EINVAL);
799 }
800
801 return base;
802 }
803
804
805 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
806 struct drm_file *file_priv)
807 {
808 struct drm_vmw_fence_wait_arg *arg =
809 (struct drm_vmw_fence_wait_arg *)data;
810 unsigned long timeout;
811 struct ttm_base_object *base;
812 struct vmw_fence_obj *fence;
813 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
814 int ret;
815 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
816
817 /*
818 * 64-bit division not present on 32-bit systems, so do an
819 * approximation. (Divide by 1000000).
820 */
821
822 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
823 (wait_timeout >> 26);
824
825 if (!arg->cookie_valid) {
826 arg->cookie_valid = 1;
827 arg->kernel_cookie = jiffies + wait_timeout;
828 }
829
830 base = vmw_fence_obj_lookup(tfile, arg->handle);
831 if (IS_ERR(base))
832 return PTR_ERR(base);
833
834 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
835
836 timeout = jiffies;
837 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
838 ret = ((vmw_fence_obj_signaled(fence)) ?
839 0 : -EBUSY);
840 goto out;
841 }
842
843 timeout = (unsigned long)arg->kernel_cookie - timeout;
844
845 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
846
847 out:
848 ttm_base_object_unref(&base);
849
850 /*
851 * Optionally unref the fence object.
852 */
853
854 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
855 return ttm_ref_object_base_unref(tfile, arg->handle,
856 TTM_REF_USAGE);
857 return ret;
858 }
859
860 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
861 struct drm_file *file_priv)
862 {
863 struct drm_vmw_fence_signaled_arg *arg =
864 (struct drm_vmw_fence_signaled_arg *) data;
865 struct ttm_base_object *base;
866 struct vmw_fence_obj *fence;
867 struct vmw_fence_manager *fman;
868 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
869 struct vmw_private *dev_priv = vmw_priv(dev);
870
871 base = vmw_fence_obj_lookup(tfile, arg->handle);
872 if (IS_ERR(base))
873 return PTR_ERR(base);
874
875 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
876 fman = fman_from_fence(fence);
877
878 arg->signaled = vmw_fence_obj_signaled(fence);
879
880 arg->signaled_flags = arg->flags;
881 spin_lock(&fman->lock);
882 arg->passed_seqno = dev_priv->last_read_seqno;
883 spin_unlock(&fman->lock);
884
885 ttm_base_object_unref(&base);
886
887 return 0;
888 }
889
890
891 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
892 struct drm_file *file_priv)
893 {
894 struct drm_vmw_fence_arg *arg =
895 (struct drm_vmw_fence_arg *) data;
896
897 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
898 arg->handle,
899 TTM_REF_USAGE);
900 }
901
902 /**
903 * vmw_event_fence_action_seq_passed
904 *
905 * @action: The struct vmw_fence_action embedded in a struct
906 * vmw_event_fence_action.
907 *
908 * This function is called when the seqno of the fence where @action is
909 * attached has passed. It queues the event on the submitter's event list.
910 * This function is always called from atomic context.
911 */
912 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
913 {
914 struct vmw_event_fence_action *eaction =
915 container_of(action, struct vmw_event_fence_action, action);
916 struct drm_device *dev = eaction->dev;
917 struct drm_pending_event *event = eaction->event;
918
919 if (unlikely(event == NULL))
920 return;
921
922 spin_lock_irq(&dev->event_lock);
923
924 if (likely(eaction->tv_sec != NULL)) {
925 struct timespec64 ts;
926
927 ktime_get_ts64(&ts);
928 /* monotonic time, so no y2038 overflow */
929 *eaction->tv_sec = ts.tv_sec;
930 *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
931 }
932
933 drm_send_event_locked(dev, eaction->event);
934 eaction->event = NULL;
935 spin_unlock_irq(&dev->event_lock);
936 }
937
938 /**
939 * vmw_event_fence_action_cleanup
940 *
941 * @action: The struct vmw_fence_action embedded in a struct
942 * vmw_event_fence_action.
943 *
944 * This function is the struct vmw_fence_action destructor. It's typically
945 * called from a workqueue.
946 */
947 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
948 {
949 struct vmw_event_fence_action *eaction =
950 container_of(action, struct vmw_event_fence_action, action);
951
952 vmw_fence_obj_unreference(&eaction->fence);
953 kfree(eaction);
954 }
955
956
957 /**
958 * vmw_fence_obj_add_action - Add an action to a fence object.
959 *
960 * @fence - The fence object.
961 * @action - The action to add.
962 *
963 * Note that the action callbacks may be executed before this function
964 * returns.
965 */
966 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
967 struct vmw_fence_action *action)
968 {
969 struct vmw_fence_manager *fman = fman_from_fence(fence);
970 bool run_update = false;
971
972 mutex_lock(&fman->goal_irq_mutex);
973 spin_lock(&fman->lock);
974
975 fman->pending_actions[action->type]++;
976 if (dma_fence_is_signaled_locked(&fence->base)) {
977 struct list_head action_list;
978
979 INIT_LIST_HEAD(&action_list);
980 list_add_tail(&action->head, &action_list);
981 vmw_fences_perform_actions(fman, &action_list);
982 } else {
983 list_add_tail(&action->head, &fence->seq_passed_actions);
984
985 /*
986 * This function may set fman::seqno_valid, so it must
987 * be run with the goal_irq_mutex held.
988 */
989 run_update = vmw_fence_goal_check_locked(fence);
990 }
991
992 spin_unlock(&fman->lock);
993
994 if (run_update) {
995 if (!fman->goal_irq_on) {
996 fman->goal_irq_on = true;
997 vmw_goal_waiter_add(fman->dev_priv);
998 }
999 vmw_fences_update(fman);
1000 }
1001 mutex_unlock(&fman->goal_irq_mutex);
1002
1003 }
1004
1005 /**
1006 * vmw_event_fence_action_create - Post an event for sending when a fence
1007 * object seqno has passed.
1008 *
1009 * @file_priv: The file connection on which the event should be posted.
1010 * @fence: The fence object on which to post the event.
1011 * @event: Event to be posted. This event should've been alloced
1012 * using k[mz]alloc, and should've been completely initialized.
1013 * @interruptible: Interruptible waits if possible.
1014 *
1015 * As a side effect, the object pointed to by @event may have been
1016 * freed when this function returns. If this function returns with
1017 * an error code, the caller needs to free that object.
1018 */
1019
1020 int vmw_event_fence_action_queue(struct drm_file *file_priv,
1021 struct vmw_fence_obj *fence,
1022 struct drm_pending_event *event,
1023 uint32_t *tv_sec,
1024 uint32_t *tv_usec,
1025 bool interruptible)
1026 {
1027 struct vmw_event_fence_action *eaction;
1028 struct vmw_fence_manager *fman = fman_from_fence(fence);
1029
1030 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
1031 if (unlikely(!eaction))
1032 return -ENOMEM;
1033
1034 eaction->event = event;
1035
1036 eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1037 eaction->action.cleanup = vmw_event_fence_action_cleanup;
1038 eaction->action.type = VMW_ACTION_EVENT;
1039
1040 eaction->fence = vmw_fence_obj_reference(fence);
1041 eaction->dev = fman->dev_priv->dev;
1042 eaction->tv_sec = tv_sec;
1043 eaction->tv_usec = tv_usec;
1044
1045 vmw_fence_obj_add_action(fence, &eaction->action);
1046
1047 return 0;
1048 }
1049
1050 struct vmw_event_fence_pending {
1051 struct drm_pending_event base;
1052 struct drm_vmw_event_fence event;
1053 };
1054
1055 static int vmw_event_fence_action_create(struct drm_file *file_priv,
1056 struct vmw_fence_obj *fence,
1057 uint32_t flags,
1058 uint64_t user_data,
1059 bool interruptible)
1060 {
1061 struct vmw_event_fence_pending *event;
1062 struct vmw_fence_manager *fman = fman_from_fence(fence);
1063 struct drm_device *dev = fman->dev_priv->dev;
1064 int ret;
1065
1066 event = kzalloc(sizeof(*event), GFP_KERNEL);
1067 if (unlikely(!event)) {
1068 DRM_ERROR("Failed to allocate an event.\n");
1069 ret = -ENOMEM;
1070 goto out_no_space;
1071 }
1072
1073 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1074 event->event.base.length = sizeof(*event);
1075 event->event.user_data = user_data;
1076
1077 ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1078
1079 if (unlikely(ret != 0)) {
1080 DRM_ERROR("Failed to allocate event space for this file.\n");
1081 kfree(event);
1082 goto out_no_space;
1083 }
1084
1085 if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1086 ret = vmw_event_fence_action_queue(file_priv, fence,
1087 &event->base,
1088 &event->event.tv_sec,
1089 &event->event.tv_usec,
1090 interruptible);
1091 else
1092 ret = vmw_event_fence_action_queue(file_priv, fence,
1093 &event->base,
1094 NULL,
1095 NULL,
1096 interruptible);
1097 if (ret != 0)
1098 goto out_no_queue;
1099
1100 return 0;
1101
1102 out_no_queue:
1103 drm_event_cancel_free(dev, &event->base);
1104 out_no_space:
1105 return ret;
1106 }
1107
1108 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1109 struct drm_file *file_priv)
1110 {
1111 struct vmw_private *dev_priv = vmw_priv(dev);
1112 struct drm_vmw_fence_event_arg *arg =
1113 (struct drm_vmw_fence_event_arg *) data;
1114 struct vmw_fence_obj *fence = NULL;
1115 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1116 struct ttm_object_file *tfile = vmw_fp->tfile;
1117 struct drm_vmw_fence_rep __user *user_fence_rep =
1118 (struct drm_vmw_fence_rep __user *)(unsigned long)
1119 arg->fence_rep;
1120 uint32_t handle;
1121 int ret;
1122
1123 /*
1124 * Look up an existing fence object,
1125 * and if user-space wants a new reference,
1126 * add one.
1127 */
1128 if (arg->handle) {
1129 struct ttm_base_object *base =
1130 vmw_fence_obj_lookup(tfile, arg->handle);
1131
1132 if (IS_ERR(base))
1133 return PTR_ERR(base);
1134
1135 fence = &(container_of(base, struct vmw_user_fence,
1136 base)->fence);
1137 (void) vmw_fence_obj_reference(fence);
1138
1139 if (user_fence_rep != NULL) {
1140 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1141 TTM_REF_USAGE, NULL, false);
1142 if (unlikely(ret != 0)) {
1143 DRM_ERROR("Failed to reference a fence "
1144 "object.\n");
1145 goto out_no_ref_obj;
1146 }
1147 handle = base->handle;
1148 }
1149 ttm_base_object_unref(&base);
1150 }
1151
1152 /*
1153 * Create a new fence object.
1154 */
1155 if (!fence) {
1156 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1157 &fence,
1158 (user_fence_rep) ?
1159 &handle : NULL);
1160 if (unlikely(ret != 0)) {
1161 DRM_ERROR("Fence event failed to create fence.\n");
1162 return ret;
1163 }
1164 }
1165
1166 BUG_ON(fence == NULL);
1167
1168 ret = vmw_event_fence_action_create(file_priv, fence,
1169 arg->flags,
1170 arg->user_data,
1171 true);
1172 if (unlikely(ret != 0)) {
1173 if (ret != -ERESTARTSYS)
1174 DRM_ERROR("Failed to attach event to fence.\n");
1175 goto out_no_create;
1176 }
1177
1178 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1179 handle, -1, NULL);
1180 vmw_fence_obj_unreference(&fence);
1181 return 0;
1182 out_no_create:
1183 if (user_fence_rep != NULL)
1184 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1185 out_no_ref_obj:
1186 vmw_fence_obj_unreference(&fence);
1187 return ret;
1188 }
1189