radeon_kv_smc.c revision 1.1 1 /* $NetBSD: radeon_kv_smc.c,v 1.1 2018/08/27 14:38:20 riastradh Exp $ */
2
3 /*
4 * Copyright 2013 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Alex Deucher
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: radeon_kv_smc.c,v 1.1 2018/08/27 14:38:20 riastradh Exp $");
29
30 #include "drmP.h"
31 #include "radeon.h"
32 #include "cikd.h"
33 #include "kv_dpm.h"
34
35 int kv_notify_message_to_smu(struct radeon_device *rdev, u32 id)
36 {
37 u32 i;
38 u32 tmp = 0;
39
40 WREG32(SMC_MESSAGE_0, id & SMC_MSG_MASK);
41
42 for (i = 0; i < rdev->usec_timeout; i++) {
43 if ((RREG32(SMC_RESP_0) & SMC_RESP_MASK) != 0)
44 break;
45 udelay(1);
46 }
47 tmp = RREG32(SMC_RESP_0) & SMC_RESP_MASK;
48
49 if (tmp != 1) {
50 if (tmp == 0xFF)
51 return -EINVAL;
52 else if (tmp == 0xFE)
53 return -EINVAL;
54 }
55
56 return 0;
57 }
58
59 int kv_dpm_get_enable_mask(struct radeon_device *rdev, u32 *enable_mask)
60 {
61 int ret;
62
63 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_SCLKDPM_GetEnabledMask);
64
65 if (ret == 0)
66 *enable_mask = RREG32_SMC(SMC_SYSCON_MSG_ARG_0);
67
68 return ret;
69 }
70
71 int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
72 PPSMC_Msg msg, u32 parameter)
73 {
74
75 WREG32(SMC_MSG_ARG_0, parameter);
76
77 return kv_notify_message_to_smu(rdev, msg);
78 }
79
80 static int kv_set_smc_sram_address(struct radeon_device *rdev,
81 u32 smc_address, u32 limit)
82 {
83 if (smc_address & 3)
84 return -EINVAL;
85 if ((smc_address + 3) > limit)
86 return -EINVAL;
87
88 WREG32(SMC_IND_INDEX_0, smc_address);
89 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
90
91 return 0;
92 }
93
94 int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
95 u32 *value, u32 limit)
96 {
97 int ret;
98
99 ret = kv_set_smc_sram_address(rdev, smc_address, limit);
100 if (ret)
101 return ret;
102
103 *value = RREG32(SMC_IND_DATA_0);
104 return 0;
105 }
106
107 int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable)
108 {
109 if (enable)
110 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Enable);
111 else
112 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable);
113 }
114
115 int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable)
116 {
117 if (enable)
118 return kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
119 else
120 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
121 }
122
123 int kv_copy_bytes_to_smc(struct radeon_device *rdev,
124 u32 smc_start_address,
125 const u8 *src, u32 byte_count, u32 limit)
126 {
127 int ret;
128 u32 data, original_data, addr, extra_shift, t_byte, count, mask;
129
130 if ((smc_start_address + byte_count) > limit)
131 return -EINVAL;
132
133 addr = smc_start_address;
134 t_byte = addr & 3;
135
136 /* RMW for the initial bytes */
137 if (t_byte != 0) {
138 addr -= t_byte;
139
140 ret = kv_set_smc_sram_address(rdev, addr, limit);
141 if (ret)
142 return ret;
143
144 original_data = RREG32(SMC_IND_DATA_0);
145
146 data = 0;
147 mask = 0;
148 count = 4;
149 while (count > 0) {
150 if (t_byte > 0) {
151 mask = (mask << 8) | 0xff;
152 t_byte--;
153 } else if (byte_count > 0) {
154 data = (data << 8) + *src++;
155 byte_count--;
156 mask <<= 8;
157 } else {
158 data <<= 8;
159 mask = (mask << 8) | 0xff;
160 }
161 count--;
162 }
163
164 data |= original_data & mask;
165
166 ret = kv_set_smc_sram_address(rdev, addr, limit);
167 if (ret)
168 return ret;
169
170 WREG32(SMC_IND_DATA_0, data);
171
172 addr += 4;
173 }
174
175 while (byte_count >= 4) {
176 /* SMC address space is BE */
177 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
178
179 ret = kv_set_smc_sram_address(rdev, addr, limit);
180 if (ret)
181 return ret;
182
183 WREG32(SMC_IND_DATA_0, data);
184
185 src += 4;
186 byte_count -= 4;
187 addr += 4;
188 }
189
190 /* RMW for the final bytes */
191 if (byte_count > 0) {
192 data = 0;
193
194 ret = kv_set_smc_sram_address(rdev, addr, limit);
195 if (ret)
196 return ret;
197
198 original_data= RREG32(SMC_IND_DATA_0);
199
200 extra_shift = 8 * (4 - byte_count);
201
202 while (byte_count > 0) {
203 /* SMC address space is BE */
204 data = (data << 8) + *src++;
205 byte_count--;
206 }
207
208 data <<= extra_shift;
209
210 data |= (original_data & ~((~0UL) << extra_shift));
211
212 ret = kv_set_smc_sram_address(rdev, addr, limit);
213 if (ret)
214 return ret;
215
216 WREG32(SMC_IND_DATA_0, data);
217 }
218 return 0;
219 }
220
221