Home | History | Annotate | Line # | Download | only in i915
      1  1.3  riastrad /*	$NetBSD: i915_globals.c,v 1.3 2021/12/19 11:49:11 riastradh Exp $	*/
      2  1.1  riastrad 
      3  1.1  riastrad /*
      4  1.1  riastrad  * SPDX-License-Identifier: MIT
      5  1.1  riastrad  *
      6  1.1  riastrad  * Copyright  2019 Intel Corporation
      7  1.1  riastrad  */
      8  1.1  riastrad 
      9  1.1  riastrad #include <sys/cdefs.h>
     10  1.3  riastrad __KERNEL_RCSID(0, "$NetBSD: i915_globals.c,v 1.3 2021/12/19 11:49:11 riastradh Exp $");
     11  1.1  riastrad 
     12  1.1  riastrad #include <linux/slab.h>
     13  1.1  riastrad #include <linux/workqueue.h>
     14  1.1  riastrad 
     15  1.1  riastrad #include "i915_active.h"
     16  1.1  riastrad #include "gem/i915_gem_context.h"
     17  1.1  riastrad #include "gem/i915_gem_object.h"
     18  1.1  riastrad #include "i915_globals.h"
     19  1.1  riastrad #include "i915_request.h"
     20  1.1  riastrad #include "i915_scheduler.h"
     21  1.1  riastrad #include "i915_vma.h"
     22  1.1  riastrad 
     23  1.3  riastrad #include <linux/nbsd-namespace.h>
     24  1.3  riastrad 
     25  1.1  riastrad static LIST_HEAD(globals);
     26  1.1  riastrad 
     27  1.1  riastrad static atomic_t active;
     28  1.1  riastrad static atomic_t epoch;
     29  1.1  riastrad static struct park_work {
     30  1.1  riastrad 	struct delayed_work work;
     31  1.1  riastrad 	struct rcu_head rcu;
     32  1.1  riastrad 	unsigned long flags;
     33  1.1  riastrad #define PENDING 0
     34  1.1  riastrad 	int epoch;
     35  1.1  riastrad } park;
     36  1.1  riastrad 
     37  1.1  riastrad static void i915_globals_shrink(void)
     38  1.1  riastrad {
     39  1.1  riastrad 	struct i915_global *global;
     40  1.1  riastrad 
     41  1.1  riastrad 	/*
     42  1.1  riastrad 	 * kmem_cache_shrink() discards empty slabs and reorders partially
     43  1.1  riastrad 	 * filled slabs to prioritise allocating from the mostly full slabs,
     44  1.1  riastrad 	 * with the aim of reducing fragmentation.
     45  1.1  riastrad 	 */
     46  1.1  riastrad 	list_for_each_entry(global, &globals, link)
     47  1.1  riastrad 		global->shrink();
     48  1.1  riastrad }
     49  1.1  riastrad 
     50  1.1  riastrad static void __i915_globals_grace(struct rcu_head *rcu)
     51  1.1  riastrad {
     52  1.1  riastrad 	/* Ratelimit parking as shrinking is quite slow */
     53  1.1  riastrad 	schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
     54  1.1  riastrad }
     55  1.1  riastrad 
     56  1.1  riastrad static void __i915_globals_queue_rcu(void)
     57  1.1  riastrad {
     58  1.1  riastrad 	park.epoch = atomic_inc_return(&epoch);
     59  1.1  riastrad 	if (!atomic_read(&active)) {
     60  1.1  riastrad 		init_rcu_head(&park.rcu);
     61  1.1  riastrad 		call_rcu(&park.rcu, __i915_globals_grace);
     62  1.1  riastrad 	}
     63  1.1  riastrad }
     64  1.1  riastrad 
     65  1.1  riastrad static void __i915_globals_park(struct work_struct *work)
     66  1.1  riastrad {
     67  1.1  riastrad 	destroy_rcu_head(&park.rcu);
     68  1.1  riastrad 
     69  1.1  riastrad 	/* Confirm nothing woke up in the last grace period */
     70  1.1  riastrad 	if (park.epoch != atomic_read(&epoch)) {
     71  1.1  riastrad 		__i915_globals_queue_rcu();
     72  1.1  riastrad 		return;
     73  1.1  riastrad 	}
     74  1.1  riastrad 
     75  1.1  riastrad 	clear_bit(PENDING, &park.flags);
     76  1.1  riastrad 	i915_globals_shrink();
     77  1.1  riastrad }
     78  1.1  riastrad 
     79  1.1  riastrad void __init i915_global_register(struct i915_global *global)
     80  1.1  riastrad {
     81  1.1  riastrad 	GEM_BUG_ON(!global->shrink);
     82  1.1  riastrad 	GEM_BUG_ON(!global->exit);
     83  1.1  riastrad 
     84  1.1  riastrad 	list_add_tail(&global->link, &globals);
     85  1.1  riastrad }
     86  1.1  riastrad 
     87  1.1  riastrad static void __i915_globals_cleanup(void)
     88  1.1  riastrad {
     89  1.1  riastrad 	struct i915_global *global, *next;
     90  1.1  riastrad 
     91  1.1  riastrad 	list_for_each_entry_safe_reverse(global, next, &globals, link)
     92  1.1  riastrad 		global->exit();
     93  1.1  riastrad }
     94  1.1  riastrad 
     95  1.1  riastrad static __initconst int (* const initfn[])(void) = {
     96  1.1  riastrad 	i915_global_active_init,
     97  1.1  riastrad 	i915_global_buddy_init,
     98  1.1  riastrad 	i915_global_context_init,
     99  1.1  riastrad 	i915_global_gem_context_init,
    100  1.1  riastrad 	i915_global_objects_init,
    101  1.1  riastrad 	i915_global_request_init,
    102  1.1  riastrad 	i915_global_scheduler_init,
    103  1.1  riastrad 	i915_global_vma_init,
    104  1.1  riastrad };
    105  1.1  riastrad 
    106  1.1  riastrad int __init i915_globals_init(void)
    107  1.1  riastrad {
    108  1.1  riastrad 	int i;
    109  1.1  riastrad 
    110  1.1  riastrad 	for (i = 0; i < ARRAY_SIZE(initfn); i++) {
    111  1.1  riastrad 		int err;
    112  1.1  riastrad 
    113  1.1  riastrad 		err = initfn[i]();
    114  1.1  riastrad 		if (err) {
    115  1.1  riastrad 			__i915_globals_cleanup();
    116  1.1  riastrad 			return err;
    117  1.1  riastrad 		}
    118  1.1  riastrad 	}
    119  1.1  riastrad 
    120  1.1  riastrad 	INIT_DELAYED_WORK(&park.work, __i915_globals_park);
    121  1.1  riastrad 	return 0;
    122  1.1  riastrad }
    123  1.1  riastrad 
    124  1.1  riastrad void i915_globals_park(void)
    125  1.1  riastrad {
    126  1.1  riastrad 	/*
    127  1.1  riastrad 	 * Defer shrinking the global slab caches (and other work) until
    128  1.1  riastrad 	 * after a RCU grace period has completed with no activity. This
    129  1.1  riastrad 	 * is to try and reduce the latency impact on the consumers caused
    130  1.1  riastrad 	 * by us shrinking the caches the same time as they are trying to
    131  1.1  riastrad 	 * allocate, with the assumption being that if we idle long enough
    132  1.1  riastrad 	 * for an RCU grace period to elapse since the last use, it is likely
    133  1.1  riastrad 	 * to be longer until we need the caches again.
    134  1.1  riastrad 	 */
    135  1.1  riastrad 	if (!atomic_dec_and_test(&active))
    136  1.1  riastrad 		return;
    137  1.1  riastrad 
    138  1.1  riastrad 	/* Queue cleanup after the next RCU grace period has freed slabs */
    139  1.1  riastrad 	if (!test_and_set_bit(PENDING, &park.flags))
    140  1.1  riastrad 		__i915_globals_queue_rcu();
    141  1.1  riastrad }
    142  1.1  riastrad 
    143  1.1  riastrad void i915_globals_unpark(void)
    144  1.1  riastrad {
    145  1.1  riastrad 	atomic_inc(&epoch);
    146  1.1  riastrad 	atomic_inc(&active);
    147  1.1  riastrad }
    148  1.1  riastrad 
    149  1.1  riastrad static void __exit __i915_globals_flush(void)
    150  1.1  riastrad {
    151  1.1  riastrad 	atomic_inc(&active); /* skip shrinking */
    152  1.1  riastrad 
    153  1.1  riastrad 	rcu_barrier(); /* wait for the work to be queued */
    154  1.1  riastrad 	flush_delayed_work(&park.work);
    155  1.1  riastrad 
    156  1.1  riastrad 	atomic_dec(&active);
    157  1.1  riastrad }
    158  1.1  riastrad 
    159  1.1  riastrad void __exit i915_globals_exit(void)
    160  1.1  riastrad {
    161  1.1  riastrad 	GEM_BUG_ON(atomic_read(&active));
    162  1.1  riastrad 
    163  1.1  riastrad 	__i915_globals_flush();
    164  1.1  riastrad 	__i915_globals_cleanup();
    165  1.1  riastrad 
    166  1.1  riastrad 	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
    167  1.1  riastrad 	rcu_barrier();
    168  1.1  riastrad }
    169