Home | History | Annotate | Line # | Download | only in i915
      1  1.14  riastrad /*	$NetBSD: i915_active.c,v 1.14 2022/03/16 23:32:52 riastradh Exp $	*/
      2   1.1  riastrad 
      3   1.1  riastrad /*
      4   1.1  riastrad  * SPDX-License-Identifier: MIT
      5   1.1  riastrad  *
      6   1.1  riastrad  * Copyright  2019 Intel Corporation
      7   1.1  riastrad  */
      8   1.1  riastrad 
      9   1.1  riastrad #include <sys/cdefs.h>
     10  1.14  riastrad __KERNEL_RCSID(0, "$NetBSD: i915_active.c,v 1.14 2022/03/16 23:32:52 riastradh Exp $");
     11   1.1  riastrad 
     12   1.1  riastrad #include <linux/debugobjects.h>
     13   1.1  riastrad 
     14   1.1  riastrad #include "gt/intel_context.h"
     15   1.1  riastrad #include "gt/intel_engine_pm.h"
     16   1.1  riastrad #include "gt/intel_ring.h"
     17   1.1  riastrad 
     18   1.1  riastrad #include "i915_drv.h"
     19   1.1  riastrad #include "i915_active.h"
     20   1.1  riastrad #include "i915_globals.h"
     21   1.1  riastrad 
     22   1.4  riastrad #include <linux/nbsd-namespace.h>
     23   1.4  riastrad 
     24   1.1  riastrad /*
     25   1.1  riastrad  * Active refs memory management
     26   1.1  riastrad  *
     27   1.1  riastrad  * To be more economical with memory, we reap all the i915_active trees as
     28   1.1  riastrad  * they idle (when we know the active requests are inactive) and allocate the
     29   1.1  riastrad  * nodes from a local slab cache to hopefully reduce the fragmentation.
     30   1.1  riastrad  */
     31   1.1  riastrad static struct i915_global_active {
     32   1.1  riastrad 	struct i915_global base;
     33   1.1  riastrad 	struct kmem_cache *slab_cache;
     34   1.1  riastrad } global;
     35   1.1  riastrad 
     36   1.1  riastrad struct active_node {
     37   1.1  riastrad 	struct i915_active_fence base;
     38   1.1  riastrad 	struct i915_active *ref;
     39   1.1  riastrad 	struct rb_node node;
     40   1.1  riastrad 	u64 timeline;
     41   1.4  riastrad 	struct intel_engine_cs *engine;
     42   1.1  riastrad };
     43   1.1  riastrad 
     44   1.1  riastrad static inline struct active_node *
     45   1.1  riastrad node_from_active(struct i915_active_fence *active)
     46   1.1  riastrad {
     47   1.1  riastrad 	return container_of(active, struct active_node, base);
     48   1.1  riastrad }
     49   1.1  riastrad 
     50   1.1  riastrad #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
     51   1.1  riastrad 
     52   1.1  riastrad static inline bool is_barrier(const struct i915_active_fence *active)
     53   1.1  riastrad {
     54   1.1  riastrad 	return IS_ERR(rcu_access_pointer(active->fence));
     55   1.1  riastrad }
     56   1.1  riastrad 
     57   1.1  riastrad static inline struct llist_node *barrier_to_ll(struct active_node *node)
     58   1.1  riastrad {
     59   1.1  riastrad 	GEM_BUG_ON(!is_barrier(&node->base));
     60   1.4  riastrad 	return &node->base.llist;
     61   1.1  riastrad }
     62   1.1  riastrad 
     63   1.1  riastrad static inline struct intel_engine_cs *
     64   1.1  riastrad __barrier_to_engine(struct active_node *node)
     65   1.1  riastrad {
     66   1.4  riastrad 	return READ_ONCE(node->engine);
     67   1.1  riastrad }
     68   1.1  riastrad 
     69   1.1  riastrad static inline struct intel_engine_cs *
     70   1.1  riastrad barrier_to_engine(struct active_node *node)
     71   1.1  riastrad {
     72   1.1  riastrad 	GEM_BUG_ON(!is_barrier(&node->base));
     73   1.1  riastrad 	return __barrier_to_engine(node);
     74   1.1  riastrad }
     75   1.1  riastrad 
     76   1.1  riastrad static inline struct active_node *barrier_from_ll(struct llist_node *x)
     77   1.1  riastrad {
     78   1.4  riastrad 	return container_of(x, struct active_node, base.llist);
     79   1.1  riastrad }
     80   1.1  riastrad 
     81   1.1  riastrad #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
     82   1.1  riastrad 
     83   1.1  riastrad static void *active_debug_hint(void *addr)
     84   1.1  riastrad {
     85   1.1  riastrad 	struct i915_active *ref = addr;
     86   1.1  riastrad 
     87   1.1  riastrad 	return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
     88   1.1  riastrad }
     89   1.1  riastrad 
     90   1.1  riastrad static struct debug_obj_descr active_debug_desc = {
     91   1.1  riastrad 	.name = "i915_active",
     92   1.1  riastrad 	.debug_hint = active_debug_hint,
     93   1.1  riastrad };
     94   1.1  riastrad 
     95   1.1  riastrad static void debug_active_init(struct i915_active *ref)
     96   1.1  riastrad {
     97   1.1  riastrad 	debug_object_init(ref, &active_debug_desc);
     98   1.1  riastrad }
     99   1.1  riastrad 
    100   1.1  riastrad static void debug_active_activate(struct i915_active *ref)
    101   1.1  riastrad {
    102   1.1  riastrad 	lockdep_assert_held(&ref->tree_lock);
    103   1.1  riastrad 	if (!atomic_read(&ref->count)) /* before the first inc */
    104   1.1  riastrad 		debug_object_activate(ref, &active_debug_desc);
    105   1.1  riastrad }
    106   1.1  riastrad 
    107   1.1  riastrad static void debug_active_deactivate(struct i915_active *ref)
    108   1.1  riastrad {
    109   1.1  riastrad 	lockdep_assert_held(&ref->tree_lock);
    110   1.1  riastrad 	if (!atomic_read(&ref->count)) /* after the last dec */
    111   1.1  riastrad 		debug_object_deactivate(ref, &active_debug_desc);
    112   1.1  riastrad }
    113   1.1  riastrad 
    114   1.1  riastrad static void debug_active_fini(struct i915_active *ref)
    115   1.1  riastrad {
    116   1.1  riastrad 	debug_object_free(ref, &active_debug_desc);
    117   1.1  riastrad }
    118   1.1  riastrad 
    119   1.1  riastrad static void debug_active_assert(struct i915_active *ref)
    120   1.1  riastrad {
    121   1.1  riastrad 	debug_object_assert_init(ref, &active_debug_desc);
    122   1.1  riastrad }
    123   1.1  riastrad 
    124   1.1  riastrad #else
    125   1.1  riastrad 
    126   1.1  riastrad static inline void debug_active_init(struct i915_active *ref) { }
    127   1.1  riastrad static inline void debug_active_activate(struct i915_active *ref) { }
    128   1.1  riastrad static inline void debug_active_deactivate(struct i915_active *ref) { }
    129   1.1  riastrad static inline void debug_active_fini(struct i915_active *ref) { }
    130   1.1  riastrad static inline void debug_active_assert(struct i915_active *ref) { }
    131   1.1  riastrad 
    132   1.1  riastrad #endif
    133   1.1  riastrad 
    134   1.4  riastrad #ifdef __NetBSD__
    135   1.4  riastrad 
    136   1.4  riastrad static int
    137   1.4  riastrad compare_nodes(void *cookie, const void *va, const void *vb)
    138   1.4  riastrad {
    139   1.4  riastrad 	const struct active_node *a = va;
    140   1.4  riastrad 	const struct active_node *b = vb;
    141   1.4  riastrad 
    142   1.4  riastrad 	if (a->timeline < b->timeline)
    143   1.4  riastrad 		return -1;
    144   1.4  riastrad 	if (a->timeline > b->timeline)
    145   1.4  riastrad 		return +1;
    146   1.9  riastrad 	if ((uintptr_t)a < (uintptr_t)b)
    147   1.9  riastrad 		return -1;
    148   1.9  riastrad 	if ((uintptr_t)a > (uintptr_t)b)
    149   1.9  riastrad 		return +1;
    150   1.4  riastrad 	return 0;
    151   1.4  riastrad }
    152   1.4  riastrad 
    153   1.4  riastrad static int
    154   1.4  riastrad compare_node_key(void *cookie, const void *vn, const void *vk)
    155   1.4  riastrad {
    156   1.4  riastrad 	const struct active_node *a = vn;
    157   1.4  riastrad 	const uint64_t *k = vk;
    158   1.4  riastrad 
    159   1.4  riastrad 	if (a->timeline < *k)
    160   1.4  riastrad 		return -1;
    161   1.4  riastrad 	if (a->timeline > *k)
    162   1.4  riastrad 		return +1;
    163   1.4  riastrad 	return 0;
    164   1.4  riastrad }
    165   1.4  riastrad 
    166   1.4  riastrad static const rb_tree_ops_t active_rb_ops = {
    167   1.4  riastrad 	.rbto_compare_nodes = compare_nodes,
    168   1.4  riastrad 	.rbto_compare_key = compare_node_key,
    169   1.4  riastrad 	.rbto_node_offset = offsetof(struct active_node, node),
    170   1.4  riastrad };
    171   1.4  riastrad 
    172   1.4  riastrad #endif
    173   1.4  riastrad 
    174   1.1  riastrad static void
    175   1.1  riastrad __active_retire(struct i915_active *ref)
    176   1.1  riastrad {
    177   1.1  riastrad 	struct active_node *it, *n;
    178   1.1  riastrad 	struct rb_root root;
    179   1.1  riastrad 	unsigned long flags;
    180   1.1  riastrad 
    181   1.1  riastrad 	GEM_BUG_ON(i915_active_is_idle(ref));
    182   1.1  riastrad 
    183   1.1  riastrad 	/* return the unused nodes to our slabcache -- flushing the allocator */
    184   1.1  riastrad 	if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
    185   1.1  riastrad 		return;
    186   1.1  riastrad 
    187   1.1  riastrad 	GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
    188   1.1  riastrad 	debug_active_deactivate(ref);
    189   1.1  riastrad 
    190   1.4  riastrad #ifdef __NetBSD__
    191  1.13  riastrad 	rb_move(&root, &ref->tree);
    192   1.4  riastrad 	rb_tree_init(&ref->tree.rbr_tree, &active_rb_ops);
    193   1.4  riastrad #else
    194  1.13  riastrad 	root = ref->tree;
    195   1.1  riastrad 	ref->tree = RB_ROOT;
    196   1.4  riastrad #endif
    197   1.1  riastrad 	ref->cache = NULL;
    198   1.1  riastrad 
    199  1.12  riastrad 	DRM_SPIN_WAKEUP_ALL(&ref->tree_wq, &ref->tree_lock);
    200  1.12  riastrad 
    201   1.1  riastrad 	spin_unlock_irqrestore(&ref->tree_lock, flags);
    202   1.1  riastrad 
    203   1.1  riastrad 	/* After the final retire, the entire struct may be freed */
    204   1.1  riastrad 	if (ref->retire)
    205   1.1  riastrad 		ref->retire(ref);
    206   1.1  riastrad 
    207   1.1  riastrad 	/* ... except if you wait on it, you must manage your own references! */
    208   1.1  riastrad 
    209   1.1  riastrad 	rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
    210   1.1  riastrad 		GEM_BUG_ON(i915_active_fence_isset(&it->base));
    211   1.1  riastrad 		kmem_cache_free(global.slab_cache, it);
    212   1.1  riastrad 	}
    213   1.1  riastrad }
    214   1.1  riastrad 
    215   1.1  riastrad static void
    216   1.1  riastrad active_work(struct work_struct *wrk)
    217   1.1  riastrad {
    218   1.1  riastrad 	struct i915_active *ref = container_of(wrk, typeof(*ref), work);
    219   1.1  riastrad 
    220   1.1  riastrad 	GEM_BUG_ON(!atomic_read(&ref->count));
    221   1.1  riastrad 	if (atomic_add_unless(&ref->count, -1, 1))
    222   1.1  riastrad 		return;
    223   1.1  riastrad 
    224   1.1  riastrad 	__active_retire(ref);
    225   1.1  riastrad }
    226   1.1  riastrad 
    227   1.1  riastrad static void
    228   1.1  riastrad active_retire(struct i915_active *ref)
    229   1.1  riastrad {
    230   1.1  riastrad 	GEM_BUG_ON(!atomic_read(&ref->count));
    231   1.1  riastrad 	if (atomic_add_unless(&ref->count, -1, 1))
    232   1.1  riastrad 		return;
    233   1.1  riastrad 
    234   1.1  riastrad 	if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
    235   1.1  riastrad 		queue_work(system_unbound_wq, &ref->work);
    236   1.1  riastrad 		return;
    237   1.1  riastrad 	}
    238   1.1  riastrad 
    239   1.1  riastrad 	__active_retire(ref);
    240   1.1  riastrad }
    241   1.1  riastrad 
    242   1.1  riastrad static inline struct dma_fence **
    243   1.1  riastrad __active_fence_slot(struct i915_active_fence *active)
    244   1.1  riastrad {
    245   1.1  riastrad 	return (struct dma_fence ** __force)&active->fence;
    246   1.1  riastrad }
    247   1.1  riastrad 
    248   1.1  riastrad static inline bool
    249   1.1  riastrad active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
    250   1.1  riastrad {
    251   1.1  riastrad 	struct i915_active_fence *active =
    252   1.1  riastrad 		container_of(cb, typeof(*active), cb);
    253   1.1  riastrad 
    254   1.1  riastrad 	return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
    255   1.1  riastrad }
    256   1.1  riastrad 
    257   1.1  riastrad static void
    258   1.1  riastrad node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
    259   1.1  riastrad {
    260   1.1  riastrad 	if (active_fence_cb(fence, cb))
    261   1.1  riastrad 		active_retire(container_of(cb, struct active_node, base.cb)->ref);
    262   1.1  riastrad }
    263   1.1  riastrad 
    264   1.1  riastrad static void
    265   1.1  riastrad excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
    266   1.1  riastrad {
    267   1.1  riastrad 	if (active_fence_cb(fence, cb))
    268   1.1  riastrad 		active_retire(container_of(cb, struct i915_active, excl.cb));
    269   1.1  riastrad }
    270   1.1  riastrad 
    271   1.1  riastrad static struct i915_active_fence *
    272   1.1  riastrad active_instance(struct i915_active *ref, struct intel_timeline *tl)
    273   1.1  riastrad {
    274   1.1  riastrad 	struct active_node *node, *prealloc;
    275   1.1  riastrad 	struct rb_node **p, *parent;
    276   1.1  riastrad 	u64 idx = tl->fence_context;
    277   1.1  riastrad 
    278   1.1  riastrad 	/*
    279   1.1  riastrad 	 * We track the most recently used timeline to skip a rbtree search
    280   1.1  riastrad 	 * for the common case, under typical loads we never need the rbtree
    281   1.1  riastrad 	 * at all. We can reuse the last slot if it is empty, that is
    282   1.1  riastrad 	 * after the previous activity has been retired, or if it matches the
    283   1.1  riastrad 	 * current timeline.
    284   1.1  riastrad 	 */
    285   1.1  riastrad 	node = READ_ONCE(ref->cache);
    286   1.1  riastrad 	if (node && node->timeline == idx)
    287   1.1  riastrad 		return &node->base;
    288   1.1  riastrad 
    289   1.1  riastrad 	/* Preallocate a replacement, just in case */
    290   1.1  riastrad 	prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
    291   1.1  riastrad 	if (!prealloc)
    292   1.1  riastrad 		return NULL;
    293   1.8  riastrad 	memset(prealloc, 0, sizeof(*prealloc));
    294   1.1  riastrad 
    295   1.1  riastrad 	spin_lock_irq(&ref->tree_lock);
    296   1.1  riastrad 	GEM_BUG_ON(i915_active_is_idle(ref));
    297   1.1  riastrad 
    298   1.3  riastrad #ifdef __NetBSD__
    299   1.3  riastrad 	__USE(parent);
    300   1.3  riastrad 	__USE(p);
    301   1.4  riastrad 	node = rb_tree_find_node(&ref->tree.rbr_tree, &idx);
    302   1.3  riastrad 	if (node) {
    303   1.3  riastrad 		KASSERT(node->timeline == idx);
    304   1.3  riastrad 		goto out;
    305   1.3  riastrad 	}
    306   1.3  riastrad #else
    307   1.1  riastrad 	parent = NULL;
    308   1.1  riastrad 	p = &ref->tree.rb_node;
    309   1.1  riastrad 	while (*p) {
    310   1.1  riastrad 		parent = *p;
    311   1.1  riastrad 
    312   1.1  riastrad 		node = rb_entry(parent, struct active_node, node);
    313   1.1  riastrad 		if (node->timeline == idx) {
    314   1.1  riastrad 			kmem_cache_free(global.slab_cache, prealloc);
    315   1.1  riastrad 			goto out;
    316   1.1  riastrad 		}
    317   1.1  riastrad 
    318   1.1  riastrad 		if (node->timeline < idx)
    319   1.1  riastrad 			p = &parent->rb_right;
    320   1.1  riastrad 		else
    321   1.1  riastrad 			p = &parent->rb_left;
    322   1.1  riastrad 	}
    323   1.3  riastrad #endif
    324   1.1  riastrad 
    325   1.1  riastrad 	node = prealloc;
    326  1.14  riastrad 	prealloc = NULL;
    327   1.1  riastrad 	__i915_active_fence_init(&node->base, NULL, node_retire);
    328   1.1  riastrad 	node->ref = ref;
    329   1.1  riastrad 	node->timeline = idx;
    330   1.1  riastrad 
    331   1.3  riastrad #ifdef __NetBSD__
    332   1.4  riastrad 	struct active_node *collision __diagused;
    333   1.4  riastrad 	collision = rb_tree_insert_node(&ref->tree.rbr_tree, node);
    334   1.3  riastrad 	KASSERT(collision == node);
    335   1.3  riastrad #else
    336   1.1  riastrad 	rb_link_node(&node->node, parent, p);
    337   1.1  riastrad 	rb_insert_color(&node->node, &ref->tree);
    338   1.3  riastrad #endif
    339   1.1  riastrad 
    340   1.1  riastrad out:
    341   1.1  riastrad 	ref->cache = node;
    342   1.1  riastrad 	spin_unlock_irq(&ref->tree_lock);
    343   1.1  riastrad 
    344  1.14  riastrad #ifdef __NetBSD__
    345  1.14  riastrad 	if (prealloc)
    346  1.14  riastrad 		kmem_cache_free(global.slab_cache, prealloc);
    347  1.14  riastrad #endif
    348  1.14  riastrad 
    349   1.1  riastrad 	BUILD_BUG_ON(offsetof(typeof(*node), base));
    350   1.1  riastrad 	return &node->base;
    351   1.1  riastrad }
    352   1.1  riastrad 
    353   1.1  riastrad void __i915_active_init(struct i915_active *ref,
    354   1.1  riastrad 			int (*active)(struct i915_active *ref),
    355   1.1  riastrad 			void (*retire)(struct i915_active *ref),
    356   1.1  riastrad 			struct lock_class_key *mkey,
    357   1.1  riastrad 			struct lock_class_key *wkey)
    358   1.1  riastrad {
    359   1.1  riastrad 	unsigned long bits;
    360   1.1  riastrad 
    361   1.1  riastrad 	debug_active_init(ref);
    362   1.1  riastrad 
    363   1.1  riastrad 	ref->flags = 0;
    364   1.1  riastrad 	ref->active = active;
    365   1.1  riastrad 	ref->retire = ptr_unpack_bits(retire, &bits, 2);
    366   1.1  riastrad 	if (bits & I915_ACTIVE_MAY_SLEEP)
    367   1.1  riastrad 		ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
    368   1.1  riastrad 
    369   1.1  riastrad 	spin_lock_init(&ref->tree_lock);
    370   1.4  riastrad 	DRM_INIT_WAITQUEUE(&ref->tree_wq, "i915act");
    371   1.3  riastrad #ifdef __NetBSD__
    372   1.4  riastrad 	rb_tree_init(&ref->tree.rbr_tree, &active_rb_ops);
    373   1.3  riastrad #else
    374   1.1  riastrad 	ref->tree = RB_ROOT;
    375   1.3  riastrad #endif
    376   1.1  riastrad 	ref->cache = NULL;
    377   1.1  riastrad 
    378   1.1  riastrad 	init_llist_head(&ref->preallocated_barriers);
    379   1.1  riastrad 	atomic_set(&ref->count, 0);
    380   1.1  riastrad 	__mutex_init(&ref->mutex, "i915_active", mkey);
    381   1.1  riastrad 	__i915_active_fence_init(&ref->excl, NULL, excl_retire);
    382   1.1  riastrad 	INIT_WORK(&ref->work, active_work);
    383   1.1  riastrad #if IS_ENABLED(CONFIG_LOCKDEP)
    384   1.1  riastrad 	lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
    385   1.1  riastrad #endif
    386   1.1  riastrad }
    387   1.1  riastrad 
    388   1.1  riastrad static bool ____active_del_barrier(struct i915_active *ref,
    389   1.1  riastrad 				   struct active_node *node,
    390   1.1  riastrad 				   struct intel_engine_cs *engine)
    391   1.1  riastrad 
    392   1.1  riastrad {
    393   1.1  riastrad 	struct llist_node *head = NULL, *tail = NULL;
    394   1.1  riastrad 	struct llist_node *pos, *next;
    395   1.1  riastrad 
    396   1.1  riastrad 	GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
    397   1.1  riastrad 
    398   1.1  riastrad 	/*
    399   1.1  riastrad 	 * Rebuild the llist excluding our node. We may perform this
    400   1.1  riastrad 	 * outside of the kernel_context timeline mutex and so someone
    401   1.1  riastrad 	 * else may be manipulating the engine->barrier_tasks, in
    402   1.1  riastrad 	 * which case either we or they will be upset :)
    403   1.1  riastrad 	 *
    404   1.1  riastrad 	 * A second __active_del_barrier() will report failure to claim
    405   1.1  riastrad 	 * the active_node and the caller will just shrug and know not to
    406   1.1  riastrad 	 * claim ownership of its node.
    407   1.1  riastrad 	 *
    408   1.1  riastrad 	 * A concurrent i915_request_add_active_barriers() will miss adding
    409   1.1  riastrad 	 * any of the tasks, but we will try again on the next -- and since
    410   1.1  riastrad 	 * we are actively using the barrier, we know that there will be
    411   1.1  riastrad 	 * at least another opportunity when we idle.
    412   1.1  riastrad 	 */
    413   1.1  riastrad 	llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
    414   1.1  riastrad 		if (node == barrier_from_ll(pos)) {
    415   1.1  riastrad 			node = NULL;
    416   1.1  riastrad 			continue;
    417   1.1  riastrad 		}
    418   1.1  riastrad 
    419   1.1  riastrad 		pos->next = head;
    420   1.1  riastrad 		head = pos;
    421   1.1  riastrad 		if (!tail)
    422   1.1  riastrad 			tail = pos;
    423   1.1  riastrad 	}
    424   1.1  riastrad 	if (head)
    425   1.1  riastrad 		llist_add_batch(head, tail, &engine->barrier_tasks);
    426   1.1  riastrad 
    427   1.1  riastrad 	return !node;
    428   1.1  riastrad }
    429   1.1  riastrad 
    430   1.1  riastrad static bool
    431   1.1  riastrad __active_del_barrier(struct i915_active *ref, struct active_node *node)
    432   1.1  riastrad {
    433   1.1  riastrad 	return ____active_del_barrier(ref, node, barrier_to_engine(node));
    434   1.1  riastrad }
    435   1.1  riastrad 
    436   1.1  riastrad int i915_active_ref(struct i915_active *ref,
    437   1.1  riastrad 		    struct intel_timeline *tl,
    438   1.1  riastrad 		    struct dma_fence *fence)
    439   1.1  riastrad {
    440   1.1  riastrad 	struct i915_active_fence *active;
    441   1.1  riastrad 	int err;
    442   1.1  riastrad 
    443   1.1  riastrad 	lockdep_assert_held(&tl->mutex);
    444   1.1  riastrad 
    445   1.1  riastrad 	/* Prevent reaping in case we malloc/wait while building the tree */
    446   1.1  riastrad 	err = i915_active_acquire(ref);
    447   1.1  riastrad 	if (err)
    448   1.1  riastrad 		return err;
    449   1.1  riastrad 
    450   1.1  riastrad 	active = active_instance(ref, tl);
    451   1.1  riastrad 	if (!active) {
    452   1.1  riastrad 		err = -ENOMEM;
    453   1.1  riastrad 		goto out;
    454   1.1  riastrad 	}
    455   1.1  riastrad 
    456   1.1  riastrad 	if (is_barrier(active)) { /* proto-node used by our idle barrier */
    457   1.1  riastrad 		/*
    458   1.1  riastrad 		 * This request is on the kernel_context timeline, and so
    459   1.1  riastrad 		 * we can use it to substitute for the pending idle-barrer
    460   1.1  riastrad 		 * request that we want to emit on the kernel_context.
    461   1.1  riastrad 		 */
    462   1.1  riastrad 		__active_del_barrier(ref, node_from_active(active));
    463   1.1  riastrad 		RCU_INIT_POINTER(active->fence, NULL);
    464   1.1  riastrad 		atomic_dec(&ref->count);
    465   1.1  riastrad 	}
    466   1.1  riastrad 	if (!__i915_active_fence_set(active, fence))
    467   1.1  riastrad 		atomic_inc(&ref->count);
    468   1.1  riastrad 
    469   1.1  riastrad out:
    470   1.1  riastrad 	i915_active_release(ref);
    471   1.1  riastrad 	return err;
    472   1.1  riastrad }
    473   1.1  riastrad 
    474   1.1  riastrad void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
    475   1.1  riastrad {
    476   1.1  riastrad 	/* We expect the caller to manage the exclusive timeline ordering */
    477   1.1  riastrad 	GEM_BUG_ON(i915_active_is_idle(ref));
    478   1.1  riastrad 
    479   1.1  riastrad 	if (!__i915_active_fence_set(&ref->excl, f))
    480   1.1  riastrad 		atomic_inc(&ref->count);
    481   1.1  riastrad }
    482   1.1  riastrad 
    483   1.1  riastrad bool i915_active_acquire_if_busy(struct i915_active *ref)
    484   1.1  riastrad {
    485   1.1  riastrad 	debug_active_assert(ref);
    486   1.1  riastrad 	return atomic_add_unless(&ref->count, 1, 0);
    487   1.1  riastrad }
    488   1.1  riastrad 
    489   1.1  riastrad int i915_active_acquire(struct i915_active *ref)
    490   1.1  riastrad {
    491   1.1  riastrad 	int err;
    492   1.1  riastrad 
    493   1.1  riastrad 	if (i915_active_acquire_if_busy(ref))
    494   1.1  riastrad 		return 0;
    495   1.1  riastrad 
    496   1.1  riastrad 	err = mutex_lock_interruptible(&ref->mutex);
    497   1.1  riastrad 	if (err)
    498   1.1  riastrad 		return err;
    499   1.1  riastrad 
    500   1.1  riastrad 	if (likely(!i915_active_acquire_if_busy(ref))) {
    501   1.1  riastrad 		if (ref->active)
    502   1.1  riastrad 			err = ref->active(ref);
    503   1.1  riastrad 		if (!err) {
    504   1.1  riastrad 			spin_lock_irq(&ref->tree_lock); /* __active_retire() */
    505   1.1  riastrad 			debug_active_activate(ref);
    506   1.1  riastrad 			atomic_inc(&ref->count);
    507   1.1  riastrad 			spin_unlock_irq(&ref->tree_lock);
    508   1.1  riastrad 		}
    509   1.1  riastrad 	}
    510   1.1  riastrad 
    511   1.1  riastrad 	mutex_unlock(&ref->mutex);
    512   1.1  riastrad 
    513   1.1  riastrad 	return err;
    514   1.1  riastrad }
    515   1.1  riastrad 
    516   1.1  riastrad void i915_active_release(struct i915_active *ref)
    517   1.1  riastrad {
    518   1.1  riastrad 	debug_active_assert(ref);
    519   1.1  riastrad 	active_retire(ref);
    520   1.1  riastrad }
    521   1.1  riastrad 
    522   1.1  riastrad static void enable_signaling(struct i915_active_fence *active)
    523   1.1  riastrad {
    524   1.1  riastrad 	struct dma_fence *fence;
    525   1.1  riastrad 
    526   1.1  riastrad 	fence = i915_active_fence_get(active);
    527   1.1  riastrad 	if (!fence)
    528   1.1  riastrad 		return;
    529   1.1  riastrad 
    530   1.1  riastrad 	dma_fence_enable_sw_signaling(fence);
    531   1.1  riastrad 	dma_fence_put(fence);
    532   1.1  riastrad }
    533   1.1  riastrad 
    534   1.1  riastrad int i915_active_wait(struct i915_active *ref)
    535   1.1  riastrad {
    536   1.1  riastrad 	struct active_node *it, *n;
    537   1.1  riastrad 	int err = 0;
    538   1.1  riastrad 
    539   1.1  riastrad 	might_sleep();
    540   1.1  riastrad 
    541   1.1  riastrad 	if (!i915_active_acquire_if_busy(ref))
    542   1.1  riastrad 		return 0;
    543   1.1  riastrad 
    544   1.1  riastrad 	/* Flush lazy signals */
    545   1.1  riastrad 	enable_signaling(&ref->excl);
    546   1.1  riastrad 	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
    547   1.1  riastrad 		if (is_barrier(&it->base)) /* unconnected idle barrier */
    548   1.1  riastrad 			continue;
    549   1.1  riastrad 
    550   1.1  riastrad 		enable_signaling(&it->base);
    551   1.1  riastrad 	}
    552   1.1  riastrad 	/* Any fence added after the wait begins will not be auto-signaled */
    553   1.1  riastrad 
    554   1.1  riastrad 	i915_active_release(ref);
    555   1.1  riastrad 	if (err)
    556   1.1  riastrad 		return err;
    557   1.1  riastrad 
    558   1.4  riastrad 	spin_lock(&ref->tree_lock);
    559   1.4  riastrad 	DRM_SPIN_WAIT_UNTIL(err, &ref->tree_wq, &ref->tree_lock,
    560   1.4  riastrad 	    i915_active_is_idle(ref));
    561   1.4  riastrad 	spin_unlock(&ref->tree_lock);
    562   1.4  riastrad 	if (err)
    563   1.4  riastrad 		return err;
    564   1.1  riastrad 
    565   1.1  riastrad 	flush_work(&ref->work);
    566   1.1  riastrad 	return 0;
    567   1.1  riastrad }
    568   1.1  riastrad 
    569   1.1  riastrad int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
    570   1.1  riastrad {
    571   1.1  riastrad 	int err = 0;
    572   1.1  riastrad 
    573   1.1  riastrad 	if (rcu_access_pointer(ref->excl.fence)) {
    574   1.1  riastrad 		struct dma_fence *fence;
    575   1.1  riastrad 
    576   1.1  riastrad 		rcu_read_lock();
    577   1.1  riastrad 		fence = dma_fence_get_rcu_safe(&ref->excl.fence);
    578   1.1  riastrad 		rcu_read_unlock();
    579   1.1  riastrad 		if (fence) {
    580   1.1  riastrad 			err = i915_request_await_dma_fence(rq, fence);
    581   1.1  riastrad 			dma_fence_put(fence);
    582   1.1  riastrad 		}
    583   1.1  riastrad 	}
    584   1.1  riastrad 
    585   1.1  riastrad 	/* In the future we may choose to await on all fences */
    586   1.1  riastrad 
    587   1.1  riastrad 	return err;
    588   1.1  riastrad }
    589   1.1  riastrad 
    590   1.1  riastrad void i915_active_fini(struct i915_active *ref)
    591   1.1  riastrad {
    592   1.1  riastrad 	debug_active_fini(ref);
    593   1.1  riastrad 	GEM_BUG_ON(atomic_read(&ref->count));
    594   1.1  riastrad 	GEM_BUG_ON(work_pending(&ref->work));
    595   1.1  riastrad 	GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
    596   1.1  riastrad 	mutex_destroy(&ref->mutex);
    597   1.7  riastrad 	spin_lock_destroy(&ref->tree_lock);
    598   1.1  riastrad }
    599   1.1  riastrad 
    600   1.1  riastrad static inline bool is_idle_barrier(struct active_node *node, u64 idx)
    601   1.1  riastrad {
    602   1.1  riastrad 	return node->timeline == idx && !i915_active_fence_isset(&node->base);
    603   1.1  riastrad }
    604   1.1  riastrad 
    605   1.1  riastrad static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
    606   1.1  riastrad {
    607   1.1  riastrad 	struct rb_node *prev, *p;
    608   1.1  riastrad 
    609   1.1  riastrad 	if (RB_EMPTY_ROOT(&ref->tree))
    610   1.1  riastrad 		return NULL;
    611   1.1  riastrad 
    612   1.1  riastrad 	spin_lock_irq(&ref->tree_lock);
    613   1.1  riastrad 	GEM_BUG_ON(i915_active_is_idle(ref));
    614   1.1  riastrad 
    615   1.1  riastrad 	/*
    616   1.1  riastrad 	 * Try to reuse any existing barrier nodes already allocated for this
    617   1.1  riastrad 	 * i915_active, due to overlapping active phases there is likely a
    618   1.1  riastrad 	 * node kept alive (as we reuse before parking). We prefer to reuse
    619   1.1  riastrad 	 * completely idle barriers (less hassle in manipulating the llists),
    620   1.1  riastrad 	 * but otherwise any will do.
    621   1.1  riastrad 	 */
    622   1.1  riastrad 	if (ref->cache && is_idle_barrier(ref->cache, idx)) {
    623   1.1  riastrad 		p = &ref->cache->node;
    624   1.1  riastrad 		goto match;
    625   1.1  riastrad 	}
    626   1.1  riastrad 
    627   1.4  riastrad #ifdef __NetBSD__
    628   1.4  riastrad     {
    629   1.4  riastrad 	struct active_node *node =
    630   1.4  riastrad 	    rb_tree_find_node_leq(&ref->tree.rbr_tree, &idx);
    631   1.4  riastrad 	if (node) {
    632   1.4  riastrad 		if (node->timeline == idx && is_idle_barrier(node, idx)) {
    633   1.4  riastrad 			p = &node->node;
    634   1.4  riastrad 			goto match;
    635   1.4  riastrad 		}
    636   1.4  riastrad 		prev = &node->node;
    637   1.4  riastrad 	} else {
    638   1.4  riastrad 		prev = NULL;
    639   1.4  riastrad 	}
    640   1.4  riastrad     }
    641   1.4  riastrad #else
    642   1.1  riastrad 	prev = NULL;
    643   1.1  riastrad 	p = ref->tree.rb_node;
    644   1.1  riastrad 	while (p) {
    645   1.1  riastrad 		struct active_node *node =
    646   1.1  riastrad 			rb_entry(p, struct active_node, node);
    647   1.1  riastrad 
    648   1.1  riastrad 		if (is_idle_barrier(node, idx))
    649   1.1  riastrad 			goto match;
    650   1.1  riastrad 
    651   1.1  riastrad 		prev = p;
    652   1.1  riastrad 		if (node->timeline < idx)
    653   1.1  riastrad 			p = p->rb_right;
    654   1.1  riastrad 		else
    655   1.1  riastrad 			p = p->rb_left;
    656   1.1  riastrad 	}
    657   1.4  riastrad #endif
    658   1.1  riastrad 
    659   1.1  riastrad 	/*
    660   1.1  riastrad 	 * No quick match, but we did find the leftmost rb_node for the
    661   1.1  riastrad 	 * kernel_context. Walk the rb_tree in-order to see if there were
    662   1.1  riastrad 	 * any idle-barriers on this timeline that we missed, or just use
    663   1.1  riastrad 	 * the first pending barrier.
    664   1.1  riastrad 	 */
    665   1.4  riastrad 	for (p = prev; p; p = rb_next2(&ref->tree, p)) {
    666   1.1  riastrad 		struct active_node *node =
    667   1.1  riastrad 			rb_entry(p, struct active_node, node);
    668   1.1  riastrad 		struct intel_engine_cs *engine;
    669   1.1  riastrad 
    670   1.1  riastrad 		if (node->timeline > idx)
    671   1.1  riastrad 			break;
    672   1.1  riastrad 
    673   1.1  riastrad 		if (node->timeline < idx)
    674   1.1  riastrad 			continue;
    675   1.1  riastrad 
    676   1.1  riastrad 		if (is_idle_barrier(node, idx))
    677   1.1  riastrad 			goto match;
    678   1.1  riastrad 
    679   1.1  riastrad 		/*
    680   1.1  riastrad 		 * The list of pending barriers is protected by the
    681   1.1  riastrad 		 * kernel_context timeline, which notably we do not hold
    682   1.1  riastrad 		 * here. i915_request_add_active_barriers() may consume
    683   1.1  riastrad 		 * the barrier before we claim it, so we have to check
    684   1.1  riastrad 		 * for success.
    685   1.1  riastrad 		 */
    686   1.1  riastrad 		engine = __barrier_to_engine(node);
    687   1.1  riastrad 		smp_rmb(); /* serialise with add_active_barriers */
    688   1.1  riastrad 		if (is_barrier(&node->base) &&
    689   1.1  riastrad 		    ____active_del_barrier(ref, node, engine))
    690   1.1  riastrad 			goto match;
    691   1.1  riastrad 	}
    692   1.1  riastrad 
    693   1.1  riastrad 	spin_unlock_irq(&ref->tree_lock);
    694   1.1  riastrad 
    695   1.1  riastrad 	return NULL;
    696   1.1  riastrad 
    697   1.1  riastrad match:
    698   1.1  riastrad 	rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
    699   1.1  riastrad 	if (p == &ref->cache->node)
    700   1.1  riastrad 		ref->cache = NULL;
    701   1.1  riastrad 	spin_unlock_irq(&ref->tree_lock);
    702   1.1  riastrad 
    703   1.1  riastrad 	return rb_entry(p, struct active_node, node);
    704   1.1  riastrad }
    705   1.1  riastrad 
    706   1.1  riastrad int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
    707   1.1  riastrad 					    struct intel_engine_cs *engine)
    708   1.1  riastrad {
    709   1.1  riastrad 	intel_engine_mask_t tmp, mask = engine->mask;
    710   1.1  riastrad 	struct llist_node *first = NULL, *last = NULL;
    711   1.1  riastrad 	struct intel_gt *gt = engine->gt;
    712   1.1  riastrad 	int err;
    713   1.1  riastrad 
    714   1.1  riastrad 	GEM_BUG_ON(i915_active_is_idle(ref));
    715   1.1  riastrad 
    716   1.1  riastrad 	/* Wait until the previous preallocation is completed */
    717   1.1  riastrad 	while (!llist_empty(&ref->preallocated_barriers))
    718   1.1  riastrad 		cond_resched();
    719   1.1  riastrad 
    720   1.1  riastrad 	/*
    721   1.1  riastrad 	 * Preallocate a node for each physical engine supporting the target
    722   1.1  riastrad 	 * engine (remember virtual engines have more than one sibling).
    723   1.1  riastrad 	 * We can then use the preallocated nodes in
    724   1.1  riastrad 	 * i915_active_acquire_barrier()
    725   1.1  riastrad 	 */
    726   1.1  riastrad 	for_each_engine_masked(engine, gt, mask, tmp) {
    727   1.1  riastrad 		u64 idx = engine->kernel_context->timeline->fence_context;
    728   1.1  riastrad 		struct llist_node *prev = first;
    729   1.1  riastrad 		struct active_node *node;
    730   1.1  riastrad 
    731   1.1  riastrad 		node = reuse_idle_barrier(ref, idx);
    732   1.1  riastrad 		if (!node) {
    733   1.1  riastrad 			node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
    734   1.1  riastrad 			if (!node) {
    735   1.1  riastrad 				err = ENOMEM;
    736   1.1  riastrad 				goto unwind;
    737   1.1  riastrad 			}
    738   1.1  riastrad 
    739   1.8  riastrad 			memset(node, 0, sizeof(*node));
    740   1.1  riastrad 			RCU_INIT_POINTER(node->base.fence, NULL);
    741   1.1  riastrad 			node->base.cb.func = node_retire;
    742   1.1  riastrad 			node->timeline = idx;
    743   1.1  riastrad 			node->ref = ref;
    744   1.1  riastrad 		}
    745   1.1  riastrad 
    746   1.1  riastrad 		if (!i915_active_fence_isset(&node->base)) {
    747   1.1  riastrad 			/*
    748   1.1  riastrad 			 * Mark this as being *our* unconnected proto-node.
    749   1.1  riastrad 			 *
    750   1.1  riastrad 			 * Since this node is not in any list, and we have
    751   1.1  riastrad 			 * decoupled it from the rbtree, we can reuse the
    752   1.1  riastrad 			 * request to indicate this is an idle-barrier node
    753   1.1  riastrad 			 * and then we can use the rb_node and list pointers
    754   1.1  riastrad 			 * for our tracking of the pending barrier.
    755   1.1  riastrad 			 */
    756   1.1  riastrad 			RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
    757   1.4  riastrad 			node->engine = engine;
    758   1.1  riastrad 			atomic_inc(&ref->count);
    759   1.1  riastrad 		}
    760   1.1  riastrad 		GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
    761   1.1  riastrad 
    762   1.1  riastrad 		GEM_BUG_ON(barrier_to_engine(node) != engine);
    763   1.1  riastrad 		first = barrier_to_ll(node);
    764   1.1  riastrad 		first->next = prev;
    765   1.1  riastrad 		if (!last)
    766   1.1  riastrad 			last = first;
    767   1.1  riastrad 		intel_engine_pm_get(engine);
    768   1.1  riastrad 	}
    769   1.1  riastrad 
    770   1.1  riastrad 	GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
    771   1.1  riastrad 	llist_add_batch(first, last, &ref->preallocated_barriers);
    772   1.1  riastrad 
    773   1.1  riastrad 	return 0;
    774   1.1  riastrad 
    775   1.1  riastrad unwind:
    776   1.1  riastrad 	while (first) {
    777   1.1  riastrad 		struct active_node *node = barrier_from_ll(first);
    778   1.1  riastrad 
    779   1.1  riastrad 		first = first->next;
    780   1.1  riastrad 
    781   1.1  riastrad 		atomic_dec(&ref->count);
    782   1.1  riastrad 		intel_engine_pm_put(barrier_to_engine(node));
    783   1.1  riastrad 
    784   1.1  riastrad 		kmem_cache_free(global.slab_cache, node);
    785   1.1  riastrad 	}
    786   1.1  riastrad 	return err;
    787   1.1  riastrad }
    788   1.1  riastrad 
    789   1.1  riastrad void i915_active_acquire_barrier(struct i915_active *ref)
    790   1.1  riastrad {
    791   1.1  riastrad 	struct llist_node *pos, *next;
    792   1.1  riastrad 	unsigned long flags;
    793   1.1  riastrad 
    794   1.1  riastrad 	GEM_BUG_ON(i915_active_is_idle(ref));
    795   1.1  riastrad 
    796   1.1  riastrad 	/*
    797   1.1  riastrad 	 * Transfer the list of preallocated barriers into the
    798   1.1  riastrad 	 * i915_active rbtree, but only as proto-nodes. They will be
    799   1.1  riastrad 	 * populated by i915_request_add_active_barriers() to point to the
    800   1.1  riastrad 	 * request that will eventually release them.
    801   1.1  riastrad 	 */
    802   1.1  riastrad 	llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
    803   1.1  riastrad 		struct active_node *node = barrier_from_ll(pos);
    804   1.1  riastrad 		struct intel_engine_cs *engine = barrier_to_engine(node);
    805   1.1  riastrad 		struct rb_node **p, *parent;
    806   1.1  riastrad 
    807   1.1  riastrad 		spin_lock_irqsave_nested(&ref->tree_lock, flags,
    808   1.1  riastrad 					 SINGLE_DEPTH_NESTING);
    809   1.4  riastrad #ifdef __NetBSD__
    810   1.4  riastrad 		__USE(p);
    811   1.4  riastrad 		__USE(parent);
    812   1.4  riastrad 		struct active_node *collision __diagused;
    813   1.4  riastrad 		collision = rb_tree_insert_node(&ref->tree.rbr_tree, node);
    814   1.4  riastrad 		KASSERT(collision == node);
    815   1.4  riastrad #else
    816   1.1  riastrad 		parent = NULL;
    817   1.1  riastrad 		p = &ref->tree.rb_node;
    818   1.1  riastrad 		while (*p) {
    819   1.1  riastrad 			struct active_node *it;
    820   1.1  riastrad 
    821   1.1  riastrad 			parent = *p;
    822   1.1  riastrad 
    823   1.1  riastrad 			it = rb_entry(parent, struct active_node, node);
    824   1.1  riastrad 			if (it->timeline < node->timeline)
    825   1.1  riastrad 				p = &parent->rb_right;
    826   1.1  riastrad 			else
    827   1.1  riastrad 				p = &parent->rb_left;
    828   1.1  riastrad 		}
    829   1.1  riastrad 		rb_link_node(&node->node, parent, p);
    830   1.1  riastrad 		rb_insert_color(&node->node, &ref->tree);
    831   1.4  riastrad #endif
    832   1.1  riastrad 		spin_unlock_irqrestore(&ref->tree_lock, flags);
    833   1.1  riastrad 
    834   1.1  riastrad 		GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
    835   1.1  riastrad 		llist_add(barrier_to_ll(node), &engine->barrier_tasks);
    836   1.1  riastrad 		intel_engine_pm_put(engine);
    837   1.1  riastrad 	}
    838   1.1  riastrad }
    839   1.1  riastrad 
    840   1.1  riastrad static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
    841   1.1  riastrad {
    842   1.1  riastrad 	return __active_fence_slot(&barrier_from_ll(node)->base);
    843   1.1  riastrad }
    844   1.1  riastrad 
    845   1.1  riastrad void i915_request_add_active_barriers(struct i915_request *rq)
    846   1.1  riastrad {
    847   1.1  riastrad 	struct intel_engine_cs *engine = rq->engine;
    848   1.1  riastrad 	struct llist_node *node, *next;
    849   1.1  riastrad 	unsigned long flags;
    850   1.1  riastrad 
    851   1.1  riastrad 	GEM_BUG_ON(!intel_context_is_barrier(rq->context));
    852   1.1  riastrad 	GEM_BUG_ON(intel_engine_is_virtual(engine));
    853   1.1  riastrad 	GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
    854   1.1  riastrad 
    855   1.1  riastrad 	node = llist_del_all(&engine->barrier_tasks);
    856   1.1  riastrad 	if (!node)
    857   1.1  riastrad 		return;
    858   1.1  riastrad 	/*
    859   1.1  riastrad 	 * Attach the list of proto-fences to the in-flight request such
    860   1.1  riastrad 	 * that the parent i915_active will be released when this request
    861   1.1  riastrad 	 * is retired.
    862   1.1  riastrad 	 */
    863   1.1  riastrad 	spin_lock_irqsave(&rq->lock, flags);
    864   1.1  riastrad 	llist_for_each_safe(node, next, node) {
    865   1.1  riastrad 		/* serialise with reuse_idle_barrier */
    866   1.1  riastrad 		smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
    867   1.4  riastrad #ifdef __NetBSD__
    868   1.8  riastrad 		/* XXX ugh bletch */
    869   1.5  riastrad 		struct i915_active_fence *active =
    870   1.4  riastrad 		    container_of(node, struct i915_active_fence, llist);
    871   1.4  riastrad 		/* XXX something bad went wrong in making this code */
    872   1.5  riastrad 		KASSERT(active->cb.func == node_retire ||
    873   1.5  riastrad 		    active->cb.func == excl_retire ||
    874   1.5  riastrad 		    active->cb.func == i915_active_noop);
    875   1.8  riastrad 		KASSERTMSG(active->fence == &rq->fence,
    876   1.8  riastrad 		    "active=%p fence=%p; rq=%p fence=%p",
    877   1.8  riastrad 		    active, active->fence, rq, &rq->fence);
    878   1.8  riastrad 		KASSERTMSG(!active->cb.fcb_onqueue, "active=%p", active);
    879   1.8  riastrad 		active->cb.fcb_onqueue = true;
    880   1.8  riastrad 		TAILQ_INSERT_TAIL(&rq->fence.f_callbacks, &active->cb,
    881   1.8  riastrad 		    fcb_entry);
    882   1.4  riastrad #else
    883   1.1  riastrad 		list_add_tail((struct list_head *)node, &rq->fence.cb_list);
    884   1.4  riastrad #endif
    885   1.1  riastrad 	}
    886   1.1  riastrad 	spin_unlock_irqrestore(&rq->lock, flags);
    887   1.1  riastrad }
    888   1.1  riastrad 
    889   1.1  riastrad /*
    890   1.1  riastrad  * __i915_active_fence_set: Update the last active fence along its timeline
    891   1.1  riastrad  * @active: the active tracker
    892   1.1  riastrad  * @fence: the new fence (under construction)
    893   1.1  riastrad  *
    894   1.1  riastrad  * Records the new @fence as the last active fence along its timeline in
    895   1.1  riastrad  * this active tracker, moving the tracking callbacks from the previous
    896   1.1  riastrad  * fence onto this one. Returns the previous fence (if not already completed),
    897   1.1  riastrad  * which the caller must ensure is executed before the new fence. To ensure
    898   1.1  riastrad  * that the order of fences within the timeline of the i915_active_fence is
    899   1.1  riastrad  * understood, it should be locked by the caller.
    900   1.1  riastrad  */
    901   1.1  riastrad struct dma_fence *
    902   1.1  riastrad __i915_active_fence_set(struct i915_active_fence *active,
    903   1.1  riastrad 			struct dma_fence *fence)
    904   1.1  riastrad {
    905   1.1  riastrad 	struct dma_fence *prev;
    906   1.1  riastrad 	unsigned long flags;
    907   1.1  riastrad 
    908   1.1  riastrad 	if (fence == rcu_access_pointer(active->fence))
    909   1.1  riastrad 		return fence;
    910   1.1  riastrad 
    911   1.1  riastrad 	GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
    912   1.1  riastrad 
    913   1.1  riastrad 	/*
    914   1.1  riastrad 	 * Consider that we have two threads arriving (A and B), with
    915   1.1  riastrad 	 * C already resident as the active->fence.
    916   1.1  riastrad 	 *
    917   1.1  riastrad 	 * A does the xchg first, and so it sees C or NULL depending
    918   1.1  riastrad 	 * on the timing of the interrupt handler. If it is NULL, the
    919   1.1  riastrad 	 * previous fence must have been signaled and we know that
    920   1.1  riastrad 	 * we are first on the timeline. If it is still present,
    921   1.1  riastrad 	 * we acquire the lock on that fence and serialise with the interrupt
    922   1.1  riastrad 	 * handler, in the process removing it from any future interrupt
    923   1.1  riastrad 	 * callback. A will then wait on C before executing (if present).
    924   1.1  riastrad 	 *
    925   1.1  riastrad 	 * As B is second, it sees A as the previous fence and so waits for
    926   1.1  riastrad 	 * it to complete its transition and takes over the occupancy for
    927   1.1  riastrad 	 * itself -- remembering that it needs to wait on A before executing.
    928   1.1  riastrad 	 *
    929   1.1  riastrad 	 * Note the strong ordering of the timeline also provides consistent
    930   1.1  riastrad 	 * nesting rules for the fence->lock; the inner lock is always the
    931   1.1  riastrad 	 * older lock.
    932   1.1  riastrad 	 */
    933   1.1  riastrad 	spin_lock_irqsave(fence->lock, flags);
    934   1.1  riastrad 	prev = xchg(__active_fence_slot(active), fence);
    935   1.1  riastrad 	if (prev) {
    936   1.1  riastrad 		GEM_BUG_ON(prev == fence);
    937   1.8  riastrad 		spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
    938   1.4  riastrad #ifdef __NetBSD__
    939   1.8  riastrad 		/* XXX ugh bletch */
    940   1.5  riastrad 		KASSERT(active->cb.func == node_retire ||
    941   1.5  riastrad 		    active->cb.func == excl_retire ||
    942   1.5  riastrad 		    active->cb.func == i915_active_noop);
    943   1.8  riastrad 		if (active->cb.fcb_onqueue) {
    944   1.8  riastrad 			TAILQ_REMOVE(&prev->f_callbacks, &active->cb,
    945   1.8  riastrad 			    fcb_entry);
    946   1.8  riastrad 			active->cb.fcb_onqueue = false;
    947   1.8  riastrad 		}
    948   1.4  riastrad #else
    949   1.1  riastrad 		__list_del_entry(&active->cb.node);
    950   1.8  riastrad #endif
    951   1.1  riastrad 		spin_unlock(prev->lock); /* serialise with prev->cb_list */
    952   1.1  riastrad 	}
    953   1.1  riastrad 	GEM_BUG_ON(rcu_access_pointer(active->fence) != fence);
    954   1.8  riastrad #ifdef __NetBSD__
    955   1.8  riastrad 	/* XXX ugh bletch */
    956   1.8  riastrad 	KASSERT(!active->cb.fcb_onqueue);
    957   1.8  riastrad 	active->cb.fcb_onqueue = true;
    958   1.8  riastrad 	TAILQ_INSERT_TAIL(&fence->f_callbacks, &active->cb, fcb_entry);
    959   1.8  riastrad #else
    960   1.1  riastrad 	list_add_tail(&active->cb.node, &fence->cb_list);
    961   1.4  riastrad #endif
    962   1.1  riastrad 	spin_unlock_irqrestore(fence->lock, flags);
    963   1.1  riastrad 
    964   1.1  riastrad 	return prev;
    965   1.1  riastrad }
    966   1.1  riastrad 
    967   1.1  riastrad int i915_active_fence_set(struct i915_active_fence *active,
    968   1.1  riastrad 			  struct i915_request *rq)
    969   1.1  riastrad {
    970   1.1  riastrad 	struct dma_fence *fence;
    971   1.1  riastrad 	int err = 0;
    972   1.1  riastrad 
    973   1.1  riastrad 	/* Must maintain timeline ordering wrt previous active requests */
    974   1.1  riastrad 	rcu_read_lock();
    975   1.1  riastrad 	fence = __i915_active_fence_set(active, &rq->fence);
    976   1.1  riastrad 	if (fence) /* but the previous fence may not belong to that timeline! */
    977   1.1  riastrad 		fence = dma_fence_get_rcu(fence);
    978   1.1  riastrad 	rcu_read_unlock();
    979   1.1  riastrad 	if (fence) {
    980   1.1  riastrad 		err = i915_request_await_dma_fence(rq, fence);
    981   1.1  riastrad 		dma_fence_put(fence);
    982   1.1  riastrad 	}
    983   1.1  riastrad 
    984   1.1  riastrad 	return err;
    985   1.1  riastrad }
    986   1.1  riastrad 
    987   1.1  riastrad void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
    988   1.1  riastrad {
    989   1.1  riastrad 	active_fence_cb(fence, cb);
    990   1.1  riastrad }
    991   1.1  riastrad 
    992   1.1  riastrad #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
    993   1.1  riastrad #include "selftests/i915_active.c"
    994   1.1  riastrad #endif
    995   1.1  riastrad 
    996   1.1  riastrad static void i915_global_active_shrink(void)
    997   1.1  riastrad {
    998   1.1  riastrad 	kmem_cache_shrink(global.slab_cache);
    999   1.1  riastrad }
   1000   1.1  riastrad 
   1001   1.1  riastrad static void i915_global_active_exit(void)
   1002   1.1  riastrad {
   1003   1.1  riastrad 	kmem_cache_destroy(global.slab_cache);
   1004   1.1  riastrad }
   1005   1.1  riastrad 
   1006   1.1  riastrad static struct i915_global_active global = { {
   1007   1.1  riastrad 	.shrink = i915_global_active_shrink,
   1008   1.1  riastrad 	.exit = i915_global_active_exit,
   1009   1.1  riastrad } };
   1010   1.1  riastrad 
   1011   1.1  riastrad int __init i915_global_active_init(void)
   1012   1.1  riastrad {
   1013   1.1  riastrad 	global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
   1014   1.1  riastrad 	if (!global.slab_cache)
   1015   1.1  riastrad 		return -ENOMEM;
   1016   1.1  riastrad 
   1017   1.1  riastrad 	i915_global_register(&global.base);
   1018   1.1  riastrad 	return 0;
   1019   1.1  riastrad }
   1020