Home | History | Annotate | Line # | Download | only in gt
      1 /*	$NetBSD: intel_gt_types.h,v 1.2 2021/12/18 23:45:30 riastradh Exp $	*/
      2 
      3 /* SPDX-License-Identifier: MIT */
      4 /*
      5  * Copyright  2019 Intel Corporation
      6  */
      7 
      8 #ifndef __INTEL_GT_TYPES__
      9 #define __INTEL_GT_TYPES__
     10 
     11 #include <linux/ktime.h>
     12 #include <linux/list.h>
     13 #include <linux/mutex.h>
     14 #include <linux/notifier.h>
     15 #include <linux/spinlock.h>
     16 #include <linux/types.h>
     17 
     18 #include "uc/intel_uc.h"
     19 
     20 #include "i915_vma.h"
     21 #include "intel_engine_types.h"
     22 #include "intel_llc_types.h"
     23 #include "intel_reset_types.h"
     24 #include "intel_rc6_types.h"
     25 #include "intel_rps_types.h"
     26 #include "intel_wakeref.h"
     27 
     28 struct drm_i915_private;
     29 struct i915_ggtt;
     30 struct intel_engine_cs;
     31 struct intel_uncore;
     32 
     33 struct intel_gt {
     34 	struct drm_i915_private *i915;
     35 	struct intel_uncore *uncore;
     36 	struct i915_ggtt *ggtt;
     37 
     38 	struct intel_uc uc;
     39 
     40 	struct intel_gt_timelines {
     41 		spinlock_t lock; /* protects active_list */
     42 		struct list_head active_list;
     43 
     44 		/* Pack multiple timelines' seqnos into the same page */
     45 		spinlock_t hwsp_lock;
     46 		struct list_head hwsp_free_list;
     47 	} timelines;
     48 
     49 	struct intel_gt_requests {
     50 		/**
     51 		 * We leave the user IRQ off as much as possible,
     52 		 * but this means that requests will finish and never
     53 		 * be retired once the system goes idle. Set a timer to
     54 		 * fire periodically while the ring is running. When it
     55 		 * fires, go retire requests.
     56 		 */
     57 		struct delayed_work retire_work;
     58 	} requests;
     59 
     60 	struct intel_wakeref wakeref;
     61 	atomic_t user_wakeref;
     62 
     63 	struct list_head closed_vma;
     64 	spinlock_t closed_lock; /* guards the list of closed_vma */
     65 
     66 	struct intel_reset reset;
     67 
     68 	/**
     69 	 * Is the GPU currently considered idle, or busy executing
     70 	 * userspace requests? Whilst idle, we allow runtime power
     71 	 * management to power down the hardware and display clocks.
     72 	 * In order to reduce the effect on performance, there
     73 	 * is a slight delay before we do so.
     74 	 */
     75 	intel_wakeref_t awake;
     76 
     77 	struct intel_llc llc;
     78 	struct intel_rc6 rc6;
     79 	struct intel_rps rps;
     80 
     81 	ktime_t last_init_time;
     82 
     83 	struct i915_vma *scratch;
     84 
     85 	spinlock_t irq_lock;
     86 	u32 gt_imr;
     87 	u32 pm_ier;
     88 	u32 pm_imr;
     89 
     90 	u32 pm_guc_events;
     91 
     92 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
     93 	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
     94 					    [MAX_ENGINE_INSTANCE + 1];
     95 
     96 	/*
     97 	 * Default address space (either GGTT or ppGTT depending on arch).
     98 	 *
     99 	 * Reserved for exclusive use by the kernel.
    100 	 */
    101 	struct i915_address_space *vm;
    102 };
    103 
    104 enum intel_gt_scratch_field {
    105 	/* 8 bytes */
    106 	INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
    107 
    108 	/* 8 bytes */
    109 	INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,
    110 
    111 	/* 8 bytes */
    112 	INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
    113 
    114 	/* 6 * 8 bytes */
    115 	INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048,
    116 
    117 	/* 4 bytes */
    118 	INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096,
    119 };
    120 
    121 #endif /* __INTEL_GT_TYPES_H__ */
    122