Home | History | Annotate | Line # | Download | only in uc
      1 /*	$NetBSD: intel_guc.h,v 1.2 2021/12/18 23:45:31 riastradh Exp $	*/
      2 
      3 /* SPDX-License-Identifier: MIT */
      4 /*
      5  * Copyright  2014-2019 Intel Corporation
      6  */
      7 
      8 #ifndef _INTEL_GUC_H_
      9 #define _INTEL_GUC_H_
     10 
     11 #include "intel_uncore.h"
     12 #include "intel_guc_fw.h"
     13 #include "intel_guc_fwif.h"
     14 #include "intel_guc_ct.h"
     15 #include "intel_guc_log.h"
     16 #include "intel_guc_reg.h"
     17 #include "intel_uc_fw.h"
     18 #include "i915_utils.h"
     19 #include "i915_vma.h"
     20 
     21 struct __guc_ads_blob;
     22 
     23 /*
     24  * Top level structure of GuC. It handles firmware loading and manages client
     25  * pool. intel_guc owns a intel_guc_client to replace the legacy ExecList
     26  * submission.
     27  */
     28 struct intel_guc {
     29 	struct intel_uc_fw fw;
     30 	struct intel_guc_log log;
     31 	struct intel_guc_ct ct;
     32 
     33 	/* intel_guc_recv interrupt related state */
     34 	spinlock_t irq_lock;
     35 	unsigned int msg_enabled_mask;
     36 
     37 	struct {
     38 		bool enabled;
     39 		void (*reset)(struct intel_guc *guc);
     40 		void (*enable)(struct intel_guc *guc);
     41 		void (*disable)(struct intel_guc *guc);
     42 	} interrupts;
     43 
     44 	bool submission_supported;
     45 
     46 	struct i915_vma *ads_vma;
     47 	struct __guc_ads_blob *ads_blob;
     48 
     49 	struct i915_vma *stage_desc_pool;
     50 	void *stage_desc_pool_vaddr;
     51 
     52 	struct i915_vma *workqueue;
     53 	void *workqueue_vaddr;
     54 	spinlock_t wq_lock;
     55 
     56 	struct i915_vma *proc_desc;
     57 	void *proc_desc_vaddr;
     58 
     59 	/* Control params for fw initialization */
     60 	u32 params[GUC_CTL_MAX_DWORDS];
     61 
     62 	/* GuC's FW specific registers used in MMIO send */
     63 	struct {
     64 		u32 base;
     65 		unsigned int count;
     66 		enum forcewake_domains fw_domains;
     67 	} send_regs;
     68 
     69 	/* register used to send interrupts to the GuC FW */
     70 	i915_reg_t notify_reg;
     71 
     72 	/* Store msg (e.g. log flush) that we see while CTBs are disabled */
     73 	u32 mmio_msg;
     74 
     75 	/* To serialize the intel_guc_send actions */
     76 	struct mutex send_mutex;
     77 };
     78 
     79 static
     80 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
     81 {
     82 	return intel_guc_ct_send(&guc->ct, action, len, NULL, 0);
     83 }
     84 
     85 static inline int
     86 intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
     87 			   u32 *response_buf, u32 response_buf_size)
     88 {
     89 	return intel_guc_ct_send(&guc->ct, action, len,
     90 				 response_buf, response_buf_size);
     91 }
     92 
     93 static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
     94 {
     95 	intel_guc_ct_event_handler(&guc->ct);
     96 }
     97 
     98 /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
     99 #define GUC_GGTT_TOP	0xFEE00000
    100 
    101 /**
    102  * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma
    103  * @guc: intel_guc structure.
    104  * @vma: i915 graphics virtual memory area.
    105  *
    106  * GuC does not allow any gfx GGTT address that falls into range
    107  * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM.
    108  * Currently, in order to exclude [0, ggtt.pin_bias) address space from
    109  * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma()
    110  * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias.
    111  *
    112  * Return: GGTT offset of the @vma.
    113  */
    114 static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
    115 					struct i915_vma *vma)
    116 {
    117 	u32 offset = i915_ggtt_offset(vma);
    118 
    119 	GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
    120 	GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
    121 
    122 	return offset;
    123 }
    124 
    125 void intel_guc_init_early(struct intel_guc *guc);
    126 void intel_guc_init_send_regs(struct intel_guc *guc);
    127 void intel_guc_write_params(struct intel_guc *guc);
    128 int intel_guc_init(struct intel_guc *guc);
    129 void intel_guc_fini(struct intel_guc *guc);
    130 void intel_guc_notify(struct intel_guc *guc);
    131 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
    132 			u32 *response_buf, u32 response_buf_size);
    133 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
    134 				       const u32 *payload, u32 len);
    135 int intel_guc_sample_forcewake(struct intel_guc *guc);
    136 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
    137 int intel_guc_suspend(struct intel_guc *guc);
    138 int intel_guc_resume(struct intel_guc *guc);
    139 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
    140 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
    141 				   struct i915_vma **out_vma, void **out_vaddr);
    142 
    143 static inline bool intel_guc_is_supported(struct intel_guc *guc)
    144 {
    145 	return intel_uc_fw_is_supported(&guc->fw);
    146 }
    147 
    148 static inline bool intel_guc_is_enabled(struct intel_guc *guc)
    149 {
    150 	return intel_uc_fw_is_enabled(&guc->fw);
    151 }
    152 
    153 static inline bool intel_guc_is_running(struct intel_guc *guc)
    154 {
    155 	return intel_uc_fw_is_running(&guc->fw);
    156 }
    157 
    158 static inline int intel_guc_sanitize(struct intel_guc *guc)
    159 {
    160 	intel_uc_fw_sanitize(&guc->fw);
    161 	guc->mmio_msg = 0;
    162 
    163 	return 0;
    164 }
    165 
    166 static inline bool intel_guc_is_submission_supported(struct intel_guc *guc)
    167 {
    168 	return guc->submission_supported;
    169 }
    170 
    171 static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
    172 {
    173 	spin_lock_irq(&guc->irq_lock);
    174 	guc->msg_enabled_mask |= mask;
    175 	spin_unlock_irq(&guc->irq_lock);
    176 }
    177 
    178 static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
    179 {
    180 	spin_lock_irq(&guc->irq_lock);
    181 	guc->msg_enabled_mask &= ~mask;
    182 	spin_unlock_irq(&guc->irq_lock);
    183 }
    184 
    185 int intel_guc_reset_engine(struct intel_guc *guc,
    186 			   struct intel_engine_cs *engine);
    187 
    188 #endif
    189