Home | History | Annotate | Line # | Download | only in i915
      1  1.12  riastrad /*	$NetBSD: intel_runtime_pm.c,v 1.12 2021/12/19 12:32:15 riastradh Exp $	*/
      2   1.1  riastrad 
      3   1.1  riastrad /*
      4   1.1  riastrad  * Copyright  2012-2014 Intel Corporation
      5   1.1  riastrad  *
      6   1.1  riastrad  * Permission is hereby granted, free of charge, to any person obtaining a
      7   1.1  riastrad  * copy of this software and associated documentation files (the "Software"),
      8   1.1  riastrad  * to deal in the Software without restriction, including without limitation
      9   1.1  riastrad  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10   1.1  riastrad  * and/or sell copies of the Software, and to permit persons to whom the
     11   1.1  riastrad  * Software is furnished to do so, subject to the following conditions:
     12   1.1  riastrad  *
     13   1.1  riastrad  * The above copyright notice and this permission notice (including the next
     14   1.1  riastrad  * paragraph) shall be included in all copies or substantial portions of the
     15   1.1  riastrad  * Software.
     16   1.1  riastrad  *
     17   1.1  riastrad  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18   1.1  riastrad  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19   1.1  riastrad  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20   1.1  riastrad  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21   1.1  riastrad  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22   1.1  riastrad  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     23   1.1  riastrad  * IN THE SOFTWARE.
     24   1.1  riastrad  *
     25   1.1  riastrad  * Authors:
     26   1.1  riastrad  *    Eugeni Dodonov <eugeni.dodonov (at) intel.com>
     27   1.1  riastrad  *    Daniel Vetter <daniel.vetter (at) ffwll.ch>
     28   1.1  riastrad  *
     29   1.1  riastrad  */
     30   1.1  riastrad 
     31   1.1  riastrad #include <sys/cdefs.h>
     32  1.12  riastrad __KERNEL_RCSID(0, "$NetBSD: intel_runtime_pm.c,v 1.12 2021/12/19 12:32:15 riastradh Exp $");
     33   1.1  riastrad 
     34   1.1  riastrad #include <linux/pm_runtime.h>
     35  1.10  riastrad 
     36  1.10  riastrad #include <drm/drm_print.h>
     37   1.1  riastrad 
     38   1.1  riastrad #include "i915_drv.h"
     39  1.10  riastrad #include "i915_trace.h"
     40   1.1  riastrad 
     41   1.8  riastrad #include <linux/nbsd-namespace.h>
     42   1.8  riastrad 
     43   1.1  riastrad /**
     44   1.1  riastrad  * DOC: runtime pm
     45   1.1  riastrad  *
     46   1.1  riastrad  * The i915 driver supports dynamic enabling and disabling of entire hardware
     47   1.1  riastrad  * blocks at runtime. This is especially important on the display side where
     48   1.1  riastrad  * software is supposed to control many power gates manually on recent hardware,
     49   1.1  riastrad  * since on the GT side a lot of the power management is done by the hardware.
     50   1.1  riastrad  * But even there some manual control at the device level is required.
     51   1.1  riastrad  *
     52   1.1  riastrad  * Since i915 supports a diverse set of platforms with a unified codebase and
     53   1.1  riastrad  * hardware engineers just love to shuffle functionality around between power
     54   1.1  riastrad  * domains there's a sizeable amount of indirection required. This file provides
     55   1.1  riastrad  * generic functions to the driver for grabbing and releasing references for
     56   1.1  riastrad  * abstract power domains. It then maps those to the actual power wells
     57   1.1  riastrad  * present for a given platform.
     58   1.1  riastrad  */
     59   1.1  riastrad 
     60  1.10  riastrad #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
     61   1.1  riastrad 
     62  1.10  riastrad #include <linux/sort.h>
     63   1.1  riastrad 
     64  1.10  riastrad #define STACKDEPTH 8
     65   1.1  riastrad 
     66  1.10  riastrad static noinline depot_stack_handle_t __save_depot_stack(void)
     67   1.1  riastrad {
     68  1.10  riastrad 	unsigned long entries[STACKDEPTH];
     69  1.10  riastrad 	unsigned int n;
     70   1.1  riastrad 
     71  1.10  riastrad 	n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
     72  1.10  riastrad 	return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
     73   1.1  riastrad }
     74   1.1  riastrad 
     75  1.10  riastrad static void __print_depot_stack(depot_stack_handle_t stack,
     76  1.10  riastrad 				char *buf, int sz, int indent)
     77   1.4  riastrad {
     78  1.10  riastrad 	unsigned long *entries;
     79  1.10  riastrad 	unsigned int nr_entries;
     80   1.4  riastrad 
     81  1.10  riastrad 	nr_entries = stack_depot_fetch(stack, &entries);
     82  1.10  riastrad 	stack_trace_snprint(buf, sz, entries, nr_entries, indent);
     83   1.4  riastrad }
     84   1.4  riastrad 
     85  1.10  riastrad static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
     86   1.1  riastrad {
     87  1.10  riastrad 	spin_lock_init(&rpm->debug.lock);
     88   1.1  riastrad }
     89   1.1  riastrad 
     90  1.12  riastrad static void fini_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
     91  1.12  riastrad {
     92  1.12  riastrad 	spin_lock_fini(&rpm->debug.lock);
     93  1.12  riastrad }
     94  1.12  riastrad 
     95  1.10  riastrad static noinline depot_stack_handle_t
     96  1.10  riastrad track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
     97   1.1  riastrad {
     98  1.10  riastrad 	depot_stack_handle_t stack, *stacks;
     99  1.10  riastrad 	unsigned long flags;
    100   1.1  riastrad 
    101  1.10  riastrad 	if (!rpm->available)
    102  1.10  riastrad 		return -1;
    103   1.1  riastrad 
    104  1.10  riastrad 	stack = __save_depot_stack();
    105  1.10  riastrad 	if (!stack)
    106  1.10  riastrad 		return -1;
    107   1.1  riastrad 
    108  1.10  riastrad 	spin_lock_irqsave(&rpm->debug.lock, flags);
    109   1.1  riastrad 
    110  1.10  riastrad 	if (!rpm->debug.count)
    111  1.10  riastrad 		rpm->debug.last_acquire = stack;
    112   1.1  riastrad 
    113  1.10  riastrad 	stacks = krealloc(rpm->debug.owners,
    114  1.10  riastrad 			  (rpm->debug.count + 1) * sizeof(*stacks),
    115  1.10  riastrad 			  GFP_NOWAIT | __GFP_NOWARN);
    116  1.10  riastrad 	if (stacks) {
    117  1.10  riastrad 		stacks[rpm->debug.count++] = stack;
    118  1.10  riastrad 		rpm->debug.owners = stacks;
    119   1.1  riastrad 	} else {
    120  1.10  riastrad 		stack = -1;
    121   1.1  riastrad 	}
    122   1.1  riastrad 
    123  1.10  riastrad 	spin_unlock_irqrestore(&rpm->debug.lock, flags);
    124   1.1  riastrad 
    125  1.10  riastrad 	return stack;
    126   1.1  riastrad }
    127   1.1  riastrad 
    128  1.10  riastrad static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
    129  1.10  riastrad 					     depot_stack_handle_t stack)
    130   1.1  riastrad {
    131  1.10  riastrad 	unsigned long flags, n;
    132  1.10  riastrad 	bool found = false;
    133   1.1  riastrad 
    134  1.10  riastrad 	if (unlikely(stack == -1))
    135   1.1  riastrad 		return;
    136   1.1  riastrad 
    137  1.10  riastrad 	spin_lock_irqsave(&rpm->debug.lock, flags);
    138  1.10  riastrad 	for (n = rpm->debug.count; n--; ) {
    139  1.10  riastrad 		if (rpm->debug.owners[n] == stack) {
    140  1.10  riastrad 			memmove(rpm->debug.owners + n,
    141  1.10  riastrad 				rpm->debug.owners + n + 1,
    142  1.10  riastrad 				(--rpm->debug.count - n) * sizeof(stack));
    143  1.10  riastrad 			found = true;
    144  1.10  riastrad 			break;
    145  1.10  riastrad 		}
    146  1.10  riastrad 	}
    147  1.10  riastrad 	spin_unlock_irqrestore(&rpm->debug.lock, flags);
    148   1.1  riastrad 
    149  1.10  riastrad 	if (WARN(!found,
    150  1.10  riastrad 		 "Unmatched wakeref (tracking %lu), count %u\n",
    151  1.10  riastrad 		 rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
    152  1.10  riastrad 		char *buf;
    153   1.1  riastrad 
    154  1.10  riastrad 		buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
    155  1.10  riastrad 		if (!buf)
    156   1.1  riastrad 			return;
    157   1.1  riastrad 
    158  1.10  riastrad 		__print_depot_stack(stack, buf, PAGE_SIZE, 2);
    159  1.10  riastrad 		DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
    160   1.1  riastrad 
    161  1.10  riastrad 		stack = READ_ONCE(rpm->debug.last_release);
    162  1.10  riastrad 		if (stack) {
    163  1.10  riastrad 			__print_depot_stack(stack, buf, PAGE_SIZE, 2);
    164  1.10  riastrad 			DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
    165   1.1  riastrad 		}
    166   1.1  riastrad 
    167  1.10  riastrad 		kfree(buf);
    168   1.1  riastrad 	}
    169   1.1  riastrad }
    170   1.1  riastrad 
    171  1.10  riastrad static int cmphandle(const void *_a, const void *_b)
    172   1.1  riastrad {
    173  1.10  riastrad 	const depot_stack_handle_t * const a = _a, * const b = _b;
    174   1.1  riastrad 
    175  1.10  riastrad 	if (*a < *b)
    176  1.10  riastrad 		return -1;
    177  1.10  riastrad 	else if (*a > *b)
    178  1.10  riastrad 		return 1;
    179  1.10  riastrad 	else
    180  1.10  riastrad 		return 0;
    181   1.1  riastrad }
    182   1.1  riastrad 
    183  1.10  riastrad static void
    184  1.10  riastrad __print_intel_runtime_pm_wakeref(struct drm_printer *p,
    185  1.10  riastrad 				 const struct intel_runtime_pm_debug *dbg)
    186   1.1  riastrad {
    187  1.10  riastrad 	unsigned long i;
    188  1.10  riastrad 	char *buf;
    189   1.1  riastrad 
    190  1.10  riastrad 	buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
    191  1.10  riastrad 	if (!buf)
    192  1.10  riastrad 		return;
    193   1.1  riastrad 
    194  1.10  riastrad 	if (dbg->last_acquire) {
    195  1.10  riastrad 		__print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
    196  1.10  riastrad 		drm_printf(p, "Wakeref last acquired:\n%s", buf);
    197   1.1  riastrad 	}
    198   1.1  riastrad 
    199  1.10  riastrad 	if (dbg->last_release) {
    200  1.10  riastrad 		__print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
    201  1.10  riastrad 		drm_printf(p, "Wakeref last released:\n%s", buf);
    202   1.1  riastrad 	}
    203   1.1  riastrad 
    204  1.10  riastrad 	drm_printf(p, "Wakeref count: %lu\n", dbg->count);
    205   1.1  riastrad 
    206  1.10  riastrad 	sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
    207   1.1  riastrad 
    208  1.10  riastrad 	for (i = 0; i < dbg->count; i++) {
    209  1.10  riastrad 		depot_stack_handle_t stack = dbg->owners[i];
    210  1.10  riastrad 		unsigned long rep;
    211   1.1  riastrad 
    212  1.10  riastrad 		rep = 1;
    213  1.10  riastrad 		while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
    214  1.10  riastrad 			rep++, i++;
    215  1.10  riastrad 		__print_depot_stack(stack, buf, PAGE_SIZE, 2);
    216  1.10  riastrad 		drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
    217   1.1  riastrad 	}
    218   1.1  riastrad 
    219  1.10  riastrad 	kfree(buf);
    220   1.1  riastrad }
    221   1.1  riastrad 
    222  1.10  riastrad static noinline void
    223  1.10  riastrad __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
    224  1.10  riastrad 		       struct intel_runtime_pm_debug *saved)
    225   1.1  riastrad {
    226  1.10  riastrad 	*saved = *debug;
    227   1.1  riastrad 
    228  1.10  riastrad 	debug->owners = NULL;
    229  1.10  riastrad 	debug->count = 0;
    230  1.10  riastrad 	debug->last_release = __save_depot_stack();
    231   1.1  riastrad }
    232   1.1  riastrad 
    233  1.10  riastrad static void
    234  1.10  riastrad dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
    235   1.1  riastrad {
    236  1.10  riastrad 	if (debug->count) {
    237  1.10  riastrad 		struct drm_printer p = drm_debug_printer("i915");
    238   1.1  riastrad 
    239  1.10  riastrad 		__print_intel_runtime_pm_wakeref(&p, debug);
    240   1.1  riastrad 	}
    241   1.1  riastrad 
    242  1.10  riastrad 	kfree(debug->owners);
    243   1.1  riastrad }
    244   1.1  riastrad 
    245  1.10  riastrad static noinline void
    246  1.10  riastrad __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
    247   1.1  riastrad {
    248  1.10  riastrad 	struct intel_runtime_pm_debug dbg = {};
    249  1.10  riastrad 	unsigned long flags;
    250   1.1  riastrad 
    251  1.10  riastrad 	if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
    252  1.10  riastrad 					 &rpm->debug.lock,
    253  1.10  riastrad 					 flags))
    254   1.1  riastrad 		return;
    255   1.1  riastrad 
    256  1.10  riastrad 	__untrack_all_wakerefs(&rpm->debug, &dbg);
    257  1.10  riastrad 	spin_unlock_irqrestore(&rpm->debug.lock, flags);
    258   1.1  riastrad 
    259  1.10  riastrad 	dump_and_free_wakeref_tracking(&dbg);
    260   1.1  riastrad }
    261   1.1  riastrad 
    262  1.10  riastrad static noinline void
    263  1.10  riastrad untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
    264   1.1  riastrad {
    265  1.10  riastrad 	struct intel_runtime_pm_debug dbg = {};
    266  1.10  riastrad 	unsigned long flags;
    267   1.1  riastrad 
    268  1.10  riastrad 	spin_lock_irqsave(&rpm->debug.lock, flags);
    269  1.10  riastrad 	__untrack_all_wakerefs(&rpm->debug, &dbg);
    270  1.10  riastrad 	spin_unlock_irqrestore(&rpm->debug.lock, flags);
    271   1.1  riastrad 
    272  1.10  riastrad 	dump_and_free_wakeref_tracking(&dbg);
    273   1.1  riastrad }
    274   1.1  riastrad 
    275  1.10  riastrad void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
    276  1.10  riastrad 				    struct drm_printer *p)
    277   1.1  riastrad {
    278  1.10  riastrad 	struct intel_runtime_pm_debug dbg = {};
    279   1.1  riastrad 
    280  1.10  riastrad 	do {
    281  1.10  riastrad 		unsigned long alloc = dbg.count;
    282  1.10  riastrad 		depot_stack_handle_t *s;
    283   1.1  riastrad 
    284  1.10  riastrad 		spin_lock_irq(&rpm->debug.lock);
    285  1.10  riastrad 		dbg.count = rpm->debug.count;
    286  1.10  riastrad 		if (dbg.count <= alloc) {
    287  1.10  riastrad 			memcpy(dbg.owners,
    288  1.10  riastrad 			       rpm->debug.owners,
    289  1.10  riastrad 			       dbg.count * sizeof(*s));
    290  1.10  riastrad 		}
    291  1.10  riastrad 		dbg.last_acquire = rpm->debug.last_acquire;
    292  1.10  riastrad 		dbg.last_release = rpm->debug.last_release;
    293  1.10  riastrad 		spin_unlock_irq(&rpm->debug.lock);
    294  1.10  riastrad 		if (dbg.count <= alloc)
    295  1.10  riastrad 			break;
    296   1.1  riastrad 
    297  1.10  riastrad 		s = krealloc(dbg.owners,
    298  1.10  riastrad 			     dbg.count * sizeof(*s),
    299  1.10  riastrad 			     GFP_NOWAIT | __GFP_NOWARN);
    300  1.10  riastrad 		if (!s)
    301  1.10  riastrad 			goto out;
    302   1.1  riastrad 
    303  1.10  riastrad 		dbg.owners = s;
    304  1.10  riastrad 	} while (1);
    305   1.1  riastrad 
    306  1.10  riastrad 	__print_intel_runtime_pm_wakeref(p, &dbg);
    307   1.1  riastrad 
    308  1.10  riastrad out:
    309  1.10  riastrad 	kfree(dbg.owners);
    310   1.1  riastrad }
    311   1.1  riastrad 
    312  1.10  riastrad #else
    313   1.1  riastrad 
    314  1.10  riastrad static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
    315   1.1  riastrad {
    316   1.1  riastrad }
    317   1.1  riastrad 
    318  1.12  riastrad static void fini_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
    319  1.12  riastrad {
    320  1.12  riastrad }
    321  1.12  riastrad 
    322  1.10  riastrad static depot_stack_handle_t
    323  1.10  riastrad track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
    324   1.1  riastrad {
    325  1.10  riastrad 	return -1;
    326   1.1  riastrad }
    327   1.1  riastrad 
    328  1.10  riastrad static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
    329  1.10  riastrad 					     intel_wakeref_t wref)
    330   1.1  riastrad {
    331   1.1  riastrad }
    332   1.1  riastrad 
    333  1.10  riastrad static void
    334  1.10  riastrad __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
    335   1.1  riastrad {
    336  1.10  riastrad 	atomic_dec(&rpm->wakeref_count);
    337   1.1  riastrad }
    338   1.1  riastrad 
    339  1.10  riastrad static void
    340  1.10  riastrad untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
    341   1.1  riastrad {
    342   1.1  riastrad }
    343   1.1  riastrad 
    344  1.10  riastrad #endif
    345   1.1  riastrad 
    346  1.10  riastrad static void
    347  1.10  riastrad intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool wakelock)
    348   1.1  riastrad {
    349  1.10  riastrad 	if (wakelock) {
    350  1.10  riastrad 		atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
    351  1.10  riastrad 		assert_rpm_wakelock_held(rpm);
    352  1.10  riastrad 	} else {
    353  1.10  riastrad 		atomic_inc(&rpm->wakeref_count);
    354  1.10  riastrad 		assert_rpm_raw_wakeref_held(rpm);
    355   1.1  riastrad 	}
    356   1.1  riastrad }
    357   1.1  riastrad 
    358  1.10  riastrad static void
    359  1.10  riastrad intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
    360   1.1  riastrad {
    361  1.10  riastrad 	if (wakelock) {
    362  1.10  riastrad 		assert_rpm_wakelock_held(rpm);
    363  1.10  riastrad 		atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
    364   1.1  riastrad 	} else {
    365  1.10  riastrad 		assert_rpm_raw_wakeref_held(rpm);
    366   1.1  riastrad 	}
    367   1.1  riastrad 
    368  1.10  riastrad 	__intel_wakeref_dec_and_check_tracking(rpm);
    369   1.1  riastrad }
    370   1.1  riastrad 
    371  1.10  riastrad static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
    372  1.10  riastrad 					      bool wakelock)
    373   1.1  riastrad {
    374  1.10  riastrad 	int ret;
    375   1.1  riastrad 
    376  1.10  riastrad 	ret = pm_runtime_get_sync(rpm->kdev);
    377  1.10  riastrad 	WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
    378   1.1  riastrad 
    379  1.10  riastrad 	intel_runtime_pm_acquire(rpm, wakelock);
    380   1.1  riastrad 
    381  1.10  riastrad 	return track_intel_runtime_pm_wakeref(rpm);
    382   1.1  riastrad }
    383   1.1  riastrad 
    384   1.1  riastrad /**
    385  1.10  riastrad  * intel_runtime_pm_get_raw - grab a raw runtime pm reference
    386  1.10  riastrad  * @rpm: the intel_runtime_pm structure
    387   1.1  riastrad  *
    388  1.10  riastrad  * This is the unlocked version of intel_display_power_is_enabled() and should
    389  1.10  riastrad  * only be used from error capture and recovery code where deadlocks are
    390  1.10  riastrad  * possible.
    391  1.10  riastrad  * This function grabs a device-level runtime pm reference (mostly used for
    392  1.10  riastrad  * asynchronous PM management from display code) and ensures that it is powered
    393  1.10  riastrad  * up. Raw references are not considered during wakelock assert checks.
    394  1.10  riastrad  *
    395  1.10  riastrad  * Any runtime pm reference obtained by this function must have a symmetric
    396  1.10  riastrad  * call to intel_runtime_pm_put_raw() to release the reference again.
    397  1.10  riastrad  *
    398  1.10  riastrad  * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates
    399  1.10  riastrad  * as True if the wakeref was acquired, or False otherwise.
    400   1.1  riastrad  */
    401  1.10  riastrad intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm)
    402   1.1  riastrad {
    403  1.10  riastrad 	return __intel_runtime_pm_get(rpm, false);
    404   1.1  riastrad }
    405   1.1  riastrad 
    406   1.1  riastrad /**
    407  1.10  riastrad  * intel_runtime_pm_get - grab a runtime pm reference
    408  1.10  riastrad  * @rpm: the intel_runtime_pm structure
    409  1.10  riastrad  *
    410  1.10  riastrad  * This function grabs a device-level runtime pm reference (mostly used for GEM
    411  1.10  riastrad  * code to ensure the GTT or GT is on) and ensures that it is powered up.
    412  1.10  riastrad  *
    413  1.10  riastrad  * Any runtime pm reference obtained by this function must have a symmetric
    414  1.10  riastrad  * call to intel_runtime_pm_put() to release the reference again.
    415   1.1  riastrad  *
    416  1.10  riastrad  * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
    417   1.1  riastrad  */
    418  1.10  riastrad intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
    419   1.1  riastrad {
    420  1.10  riastrad 	return __intel_runtime_pm_get(rpm, true);
    421   1.1  riastrad }
    422   1.1  riastrad 
    423   1.1  riastrad /**
    424  1.10  riastrad  * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
    425  1.10  riastrad  * @rpm: the intel_runtime_pm structure
    426   1.1  riastrad  *
    427  1.10  riastrad  * This function grabs a device-level runtime pm reference if the device is
    428  1.10  riastrad  * already in use and ensures that it is powered up. It is illegal to try
    429  1.10  riastrad  * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
    430   1.1  riastrad  *
    431   1.1  riastrad  * Any runtime pm reference obtained by this function must have a symmetric
    432   1.1  riastrad  * call to intel_runtime_pm_put() to release the reference again.
    433  1.10  riastrad  *
    434  1.10  riastrad  * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
    435  1.10  riastrad  * as True if the wakeref was acquired, or False otherwise.
    436   1.1  riastrad  */
    437  1.10  riastrad intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
    438   1.1  riastrad {
    439  1.10  riastrad 	if (IS_ENABLED(CONFIG_PM)) {
    440  1.10  riastrad 		/*
    441  1.10  riastrad 		 * In cases runtime PM is disabled by the RPM core and we get
    442  1.10  riastrad 		 * an -EINVAL return value we are not supposed to call this
    443  1.10  riastrad 		 * function, since the power state is undefined. This applies
    444  1.10  riastrad 		 * atm to the late/early system suspend/resume handlers.
    445  1.10  riastrad 		 */
    446  1.10  riastrad 		if (pm_runtime_get_if_in_use(rpm->kdev) <= 0)
    447  1.10  riastrad 			return 0;
    448  1.10  riastrad 	}
    449   1.1  riastrad 
    450  1.10  riastrad 	intel_runtime_pm_acquire(rpm, true);
    451   1.1  riastrad 
    452  1.10  riastrad 	return track_intel_runtime_pm_wakeref(rpm);
    453   1.1  riastrad }
    454   1.1  riastrad 
    455   1.1  riastrad /**
    456   1.1  riastrad  * intel_runtime_pm_get_noresume - grab a runtime pm reference
    457  1.10  riastrad  * @rpm: the intel_runtime_pm structure
    458   1.1  riastrad  *
    459   1.1  riastrad  * This function grabs a device-level runtime pm reference (mostly used for GEM
    460   1.1  riastrad  * code to ensure the GTT or GT is on).
    461   1.1  riastrad  *
    462   1.1  riastrad  * It will _not_ power up the device but instead only check that it's powered
    463   1.1  riastrad  * on.  Therefore it is only valid to call this functions from contexts where
    464   1.1  riastrad  * the device is known to be powered up and where trying to power it up would
    465   1.1  riastrad  * result in hilarity and deadlocks. That pretty much means only the system
    466   1.1  riastrad  * suspend/resume code where this is used to grab runtime pm references for
    467   1.1  riastrad  * delayed setup down in work items.
    468   1.1  riastrad  *
    469   1.1  riastrad  * Any runtime pm reference obtained by this function must have a symmetric
    470   1.1  riastrad  * call to intel_runtime_pm_put() to release the reference again.
    471  1.10  riastrad  *
    472  1.10  riastrad  * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
    473   1.1  riastrad  */
    474  1.10  riastrad intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
    475   1.1  riastrad {
    476  1.10  riastrad 	assert_rpm_wakelock_held(rpm);
    477  1.10  riastrad 	pm_runtime_get_noresume(rpm->kdev);
    478   1.1  riastrad 
    479  1.10  riastrad 	intel_runtime_pm_acquire(rpm, true);
    480  1.10  riastrad 
    481  1.10  riastrad 	return track_intel_runtime_pm_wakeref(rpm);
    482  1.10  riastrad }
    483  1.10  riastrad 
    484  1.10  riastrad static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm,
    485  1.10  riastrad 				   intel_wakeref_t wref,
    486  1.10  riastrad 				   bool wakelock)
    487  1.10  riastrad {
    488  1.10  riastrad 	struct device *kdev = rpm->kdev;
    489  1.10  riastrad 
    490  1.10  riastrad 	untrack_intel_runtime_pm_wakeref(rpm, wref);
    491  1.10  riastrad 
    492  1.10  riastrad 	intel_runtime_pm_release(rpm, wakelock);
    493  1.10  riastrad 
    494  1.10  riastrad 	pm_runtime_mark_last_busy(kdev);
    495  1.10  riastrad 	pm_runtime_put_autosuspend(kdev);
    496  1.10  riastrad }
    497   1.1  riastrad 
    498  1.10  riastrad /**
    499  1.10  riastrad  * intel_runtime_pm_put_raw - release a raw runtime pm reference
    500  1.10  riastrad  * @rpm: the intel_runtime_pm structure
    501  1.10  riastrad  * @wref: wakeref acquired for the reference that is being released
    502  1.10  riastrad  *
    503  1.10  riastrad  * This function drops the device-level runtime pm reference obtained by
    504  1.10  riastrad  * intel_runtime_pm_get_raw() and might power down the corresponding
    505  1.10  riastrad  * hardware block right away if this is the last reference.
    506  1.10  riastrad  */
    507  1.10  riastrad void
    508  1.10  riastrad intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
    509  1.10  riastrad {
    510  1.10  riastrad 	__intel_runtime_pm_put(rpm, wref, false);
    511   1.1  riastrad }
    512   1.1  riastrad 
    513   1.1  riastrad /**
    514  1.10  riastrad  * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
    515  1.10  riastrad  * @rpm: the intel_runtime_pm structure
    516  1.10  riastrad  *
    517  1.10  riastrad  * This function drops the device-level runtime pm reference obtained by
    518  1.10  riastrad  * intel_runtime_pm_get() and might power down the corresponding
    519  1.10  riastrad  * hardware block right away if this is the last reference.
    520  1.10  riastrad  *
    521  1.10  riastrad  * This function exists only for historical reasons and should be avoided in
    522  1.10  riastrad  * new code, as the correctness of its use cannot be checked. Always use
    523  1.10  riastrad  * intel_runtime_pm_put() instead.
    524  1.10  riastrad  */
    525  1.10  riastrad void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm)
    526  1.10  riastrad {
    527  1.10  riastrad 	__intel_runtime_pm_put(rpm, -1, true);
    528  1.10  riastrad }
    529  1.10  riastrad 
    530  1.10  riastrad #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
    531  1.10  riastrad /**
    532   1.1  riastrad  * intel_runtime_pm_put - release a runtime pm reference
    533  1.10  riastrad  * @rpm: the intel_runtime_pm structure
    534  1.10  riastrad  * @wref: wakeref acquired for the reference that is being released
    535   1.1  riastrad  *
    536   1.1  riastrad  * This function drops the device-level runtime pm reference obtained by
    537   1.1  riastrad  * intel_runtime_pm_get() and might power down the corresponding
    538   1.1  riastrad  * hardware block right away if this is the last reference.
    539   1.1  riastrad  */
    540  1.10  riastrad void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
    541   1.1  riastrad {
    542  1.10  riastrad 	__intel_runtime_pm_put(rpm, wref, true);
    543   1.1  riastrad }
    544  1.10  riastrad #endif
    545   1.1  riastrad 
    546   1.1  riastrad /**
    547   1.1  riastrad  * intel_runtime_pm_enable - enable runtime pm
    548  1.10  riastrad  * @rpm: the intel_runtime_pm structure
    549   1.1  riastrad  *
    550   1.1  riastrad  * This function enables runtime pm at the end of the driver load sequence.
    551   1.1  riastrad  *
    552   1.1  riastrad  * Note that this function does currently not enable runtime pm for the
    553  1.10  riastrad  * subordinate display power domains. That is done by
    554  1.10  riastrad  * intel_power_domains_enable().
    555   1.1  riastrad  */
    556  1.10  riastrad void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
    557   1.1  riastrad {
    558  1.10  riastrad 	struct device *kdev = rpm->kdev;
    559  1.10  riastrad 
    560  1.10  riastrad 	/*
    561  1.10  riastrad 	 * Disable the system suspend direct complete optimization, which can
    562  1.10  riastrad 	 * leave the device suspended skipping the driver's suspend handlers
    563  1.10  riastrad 	 * if the device was already runtime suspended. This is needed due to
    564  1.10  riastrad 	 * the difference in our runtime and system suspend sequence and
    565  1.10  riastrad 	 * becaue the HDA driver may require us to enable the audio power
    566  1.10  riastrad 	 * domain during system suspend.
    567  1.10  riastrad 	 */
    568  1.10  riastrad 	dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
    569   1.1  riastrad 
    570  1.10  riastrad 	pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
    571  1.10  riastrad 	pm_runtime_mark_last_busy(kdev);
    572   1.1  riastrad 
    573   1.1  riastrad 	/*
    574  1.10  riastrad 	 * Take a permanent reference to disable the RPM functionality and drop
    575  1.10  riastrad 	 * it only when unloading the driver. Use the low level get/put helpers,
    576  1.10  riastrad 	 * so the driver's own RPM reference tracking asserts also work on
    577  1.10  riastrad 	 * platforms without RPM support.
    578   1.1  riastrad 	 */
    579  1.10  riastrad 	if (!rpm->available) {
    580  1.10  riastrad 		int ret;
    581  1.10  riastrad 
    582  1.10  riastrad 		pm_runtime_dont_use_autosuspend(kdev);
    583  1.10  riastrad 		ret = pm_runtime_get_sync(kdev);
    584  1.10  riastrad 		WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
    585  1.10  riastrad 	} else {
    586  1.10  riastrad 		pm_runtime_use_autosuspend(kdev);
    587   1.1  riastrad 	}
    588   1.1  riastrad 
    589  1.10  riastrad 	/*
    590  1.10  riastrad 	 * The core calls the driver load handler with an RPM reference held.
    591  1.10  riastrad 	 * We drop that here and will reacquire it during unloading in
    592  1.10  riastrad 	 * intel_power_domains_fini().
    593  1.10  riastrad 	 */
    594  1.10  riastrad 	pm_runtime_put_autosuspend(kdev);
    595  1.10  riastrad }
    596  1.10  riastrad 
    597  1.10  riastrad void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
    598  1.10  riastrad {
    599  1.10  riastrad 	struct device *kdev = rpm->kdev;
    600  1.10  riastrad 
    601  1.10  riastrad 	/* Transfer rpm ownership back to core */
    602  1.10  riastrad 	WARN(pm_runtime_get_sync(kdev) < 0,
    603  1.10  riastrad 	     "Failed to pass rpm ownership back to core\n");
    604  1.10  riastrad 
    605  1.10  riastrad 	pm_runtime_dont_use_autosuspend(kdev);
    606  1.10  riastrad 
    607  1.10  riastrad 	if (!rpm->available)
    608  1.10  riastrad 		pm_runtime_put(kdev);
    609  1.10  riastrad }
    610  1.10  riastrad 
    611  1.10  riastrad void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
    612  1.10  riastrad {
    613  1.10  riastrad 	int count = atomic_read(&rpm->wakeref_count);
    614  1.10  riastrad 
    615  1.10  riastrad 	WARN(count,
    616  1.10  riastrad 	     "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
    617  1.10  riastrad 	     intel_rpm_raw_wakeref_count(count),
    618  1.10  riastrad 	     intel_rpm_wakelock_count(count));
    619  1.10  riastrad 
    620  1.10  riastrad 	untrack_all_intel_runtime_pm_wakerefs(rpm);
    621  1.12  riastrad 	fini_intel_runtime_pm_wakeref(rpm);
    622  1.10  riastrad }
    623  1.10  riastrad 
    624  1.10  riastrad void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
    625  1.10  riastrad {
    626  1.10  riastrad 	struct drm_i915_private *i915 =
    627  1.10  riastrad 			container_of(rpm, struct drm_i915_private, runtime_pm);
    628  1.10  riastrad 	struct pci_dev *pdev = i915->drm.pdev;
    629  1.11  riastrad 	struct device *kdev = pci_dev_dev(pdev);
    630  1.10  riastrad 
    631  1.10  riastrad 	rpm->kdev = kdev;
    632  1.10  riastrad 	rpm->available = HAS_RUNTIME_PM(i915);
    633   1.1  riastrad 
    634  1.10  riastrad 	init_intel_runtime_pm_wakeref(rpm);
    635   1.1  riastrad }
    636