amdgpu_atombios.c revision 1.3.6.2 1 /* $NetBSD: amdgpu_atombios.c,v 1.3.6.2 2019/06/10 22:07:57 christos Exp $ */
2
3 /*
4 * Copyright 2007-8 Advanced Micro Devices, Inc.
5 * Copyright 2008 Red Hat Inc.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Authors: Dave Airlie
26 * Alex Deucher
27 */
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: amdgpu_atombios.c,v 1.3.6.2 2019/06/10 22:07:57 christos Exp $");
30
31 #include <asm/byteorder.h>
32 #include <drm/drmP.h>
33 #include <drm/amdgpu_drm.h>
34 #include "amdgpu.h"
35 #include "amdgpu_atombios.h"
36 #include "amdgpu_i2c.h"
37
38 #include "atom.h"
39 #include "atom-bits.h"
40 #include "atombios_encoders.h"
41 #include "bif/bif_4_1_d.h"
42
43 static void amdgpu_atombios_lookup_i2c_gpio_quirks(struct amdgpu_device *adev,
44 ATOM_GPIO_I2C_ASSIGMENT *gpio,
45 u8 index)
46 {
47
48 }
49
50 static struct amdgpu_i2c_bus_rec amdgpu_atombios_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
51 {
52 struct amdgpu_i2c_bus_rec i2c;
53
54 memset(&i2c, 0, sizeof(struct amdgpu_i2c_bus_rec));
55
56 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex);
57 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex);
58 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex);
59 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex);
60 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex);
61 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex);
62 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex);
63 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex);
64 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
65 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
66 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
67 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
68 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
69 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
70 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
71 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
72
73 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
74 i2c.hw_capable = true;
75 else
76 i2c.hw_capable = false;
77
78 if (gpio->sucI2cId.ucAccess == 0xa0)
79 i2c.mm_i2c = true;
80 else
81 i2c.mm_i2c = false;
82
83 i2c.i2c_id = gpio->sucI2cId.ucAccess;
84
85 if (i2c.mask_clk_reg)
86 i2c.valid = true;
87 else
88 i2c.valid = false;
89
90 return i2c;
91 }
92
93 struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *adev,
94 uint8_t id)
95 {
96 struct atom_context *ctx = adev->mode_info.atom_context;
97 ATOM_GPIO_I2C_ASSIGMENT *gpio;
98 struct amdgpu_i2c_bus_rec i2c;
99 int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
100 struct _ATOM_GPIO_I2C_INFO *i2c_info;
101 uint16_t data_offset, size;
102 int i, num_indices;
103
104 memset(&i2c, 0, sizeof(struct amdgpu_i2c_bus_rec));
105 i2c.valid = false;
106
107 if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
108 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)((char *)ctx->bios + data_offset);
109
110 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
111 sizeof(ATOM_GPIO_I2C_ASSIGMENT);
112
113 gpio = &i2c_info->asGPIO_Info[0];
114 for (i = 0; i < num_indices; i++) {
115
116 amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i);
117
118 if (gpio->sucI2cId.ucAccess == id) {
119 i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
120 break;
121 }
122 gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
123 ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
124 }
125 }
126
127 return i2c;
128 }
129
130 void amdgpu_atombios_i2c_init(struct amdgpu_device *adev)
131 {
132 struct atom_context *ctx = adev->mode_info.atom_context;
133 ATOM_GPIO_I2C_ASSIGMENT *gpio;
134 struct amdgpu_i2c_bus_rec i2c;
135 int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
136 struct _ATOM_GPIO_I2C_INFO *i2c_info;
137 uint16_t data_offset, size;
138 int i, num_indices;
139 char stmp[32];
140
141 if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
142 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)((char *)ctx->bios + data_offset);
143
144 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
145 sizeof(ATOM_GPIO_I2C_ASSIGMENT);
146
147 gpio = &i2c_info->asGPIO_Info[0];
148 for (i = 0; i < num_indices; i++) {
149 amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i);
150
151 i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
152
153 if (i2c.valid) {
154 snprintf(stmp, sizeof stmp, "0x%x", i2c.i2c_id);
155 adev->i2c_bus[i] = amdgpu_i2c_create(adev->ddev, &i2c, stmp);
156 }
157 gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
158 ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
159 }
160 }
161 }
162
163 struct amdgpu_gpio_rec
164 amdgpu_atombios_lookup_gpio(struct amdgpu_device *adev,
165 u8 id)
166 {
167 struct atom_context *ctx = adev->mode_info.atom_context;
168 struct amdgpu_gpio_rec gpio;
169 int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT);
170 struct _ATOM_GPIO_PIN_LUT *gpio_info;
171 ATOM_GPIO_PIN_ASSIGNMENT *pin;
172 u16 data_offset, size;
173 int i, num_indices;
174
175 memset(&gpio, 0, sizeof(struct amdgpu_gpio_rec));
176 gpio.valid = false;
177
178 if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
179 gpio_info = (struct _ATOM_GPIO_PIN_LUT *)((char *)ctx->bios + data_offset);
180
181 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
182 sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
183
184 pin = gpio_info->asGPIO_Pin;
185 for (i = 0; i < num_indices; i++) {
186 if (id == pin->ucGPIO_ID) {
187 gpio.id = pin->ucGPIO_ID;
188 gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex);
189 gpio.shift = pin->ucGpioPinBitShift;
190 gpio.mask = (1 << pin->ucGpioPinBitShift);
191 gpio.valid = true;
192 break;
193 }
194 pin = (ATOM_GPIO_PIN_ASSIGNMENT *)
195 ((u8 *)pin + sizeof(ATOM_GPIO_PIN_ASSIGNMENT));
196 }
197 }
198
199 return gpio;
200 }
201
202 static struct amdgpu_hpd
203 amdgpu_atombios_get_hpd_info_from_gpio(struct amdgpu_device *adev,
204 struct amdgpu_gpio_rec *gpio)
205 {
206 struct amdgpu_hpd hpd;
207 u32 reg;
208
209 memset(&hpd, 0, sizeof(struct amdgpu_hpd));
210
211 reg = amdgpu_display_hpd_get_gpio_reg(adev);
212
213 hpd.gpio = *gpio;
214 if (gpio->reg == reg) {
215 switch(gpio->mask) {
216 case (1 << 0):
217 hpd.hpd = AMDGPU_HPD_1;
218 break;
219 case (1 << 8):
220 hpd.hpd = AMDGPU_HPD_2;
221 break;
222 case (1 << 16):
223 hpd.hpd = AMDGPU_HPD_3;
224 break;
225 case (1 << 24):
226 hpd.hpd = AMDGPU_HPD_4;
227 break;
228 case (1 << 26):
229 hpd.hpd = AMDGPU_HPD_5;
230 break;
231 case (1 << 28):
232 hpd.hpd = AMDGPU_HPD_6;
233 break;
234 default:
235 hpd.hpd = AMDGPU_HPD_NONE;
236 break;
237 }
238 } else
239 hpd.hpd = AMDGPU_HPD_NONE;
240 return hpd;
241 }
242
243 static bool amdgpu_atombios_apply_quirks(struct amdgpu_device *adev,
244 uint32_t supported_device,
245 int *connector_type,
246 struct amdgpu_i2c_bus_rec *i2c_bus,
247 uint16_t *line_mux,
248 struct amdgpu_hpd *hpd)
249 {
250 return true;
251 }
252
253 static const int object_connector_convert[] = {
254 DRM_MODE_CONNECTOR_Unknown,
255 DRM_MODE_CONNECTOR_DVII,
256 DRM_MODE_CONNECTOR_DVII,
257 DRM_MODE_CONNECTOR_DVID,
258 DRM_MODE_CONNECTOR_DVID,
259 DRM_MODE_CONNECTOR_VGA,
260 DRM_MODE_CONNECTOR_Composite,
261 DRM_MODE_CONNECTOR_SVIDEO,
262 DRM_MODE_CONNECTOR_Unknown,
263 DRM_MODE_CONNECTOR_Unknown,
264 DRM_MODE_CONNECTOR_9PinDIN,
265 DRM_MODE_CONNECTOR_Unknown,
266 DRM_MODE_CONNECTOR_HDMIA,
267 DRM_MODE_CONNECTOR_HDMIB,
268 DRM_MODE_CONNECTOR_LVDS,
269 DRM_MODE_CONNECTOR_9PinDIN,
270 DRM_MODE_CONNECTOR_Unknown,
271 DRM_MODE_CONNECTOR_Unknown,
272 DRM_MODE_CONNECTOR_Unknown,
273 DRM_MODE_CONNECTOR_DisplayPort,
274 DRM_MODE_CONNECTOR_eDP,
275 DRM_MODE_CONNECTOR_Unknown
276 };
277
278 bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev)
279 {
280 struct amdgpu_mode_info *mode_info = &adev->mode_info;
281 struct atom_context *ctx = mode_info->atom_context;
282 int index = GetIndexIntoMasterTable(DATA, Object_Header);
283 u16 size, data_offset;
284 u8 frev, crev;
285 ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
286 ATOM_ENCODER_OBJECT_TABLE *enc_obj;
287 ATOM_OBJECT_TABLE *router_obj;
288 ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
289 ATOM_OBJECT_HEADER *obj_header;
290 int i, j, k, path_size, device_support;
291 int connector_type;
292 u16 conn_id, connector_object_id;
293 struct amdgpu_i2c_bus_rec ddc_bus;
294 struct amdgpu_router router;
295 struct amdgpu_gpio_rec gpio;
296 struct amdgpu_hpd hpd;
297
298 if (!amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
299 return false;
300
301 if (crev < 2)
302 return false;
303
304 obj_header = (ATOM_OBJECT_HEADER *) ((char *)ctx->bios + data_offset);
305 path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
306 ((char *)ctx->bios + data_offset +
307 le16_to_cpu(obj_header->usDisplayPathTableOffset));
308 con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
309 ((char *)ctx->bios + data_offset +
310 le16_to_cpu(obj_header->usConnectorObjectTableOffset));
311 enc_obj = (ATOM_ENCODER_OBJECT_TABLE *)
312 ((char *)ctx->bios + data_offset +
313 le16_to_cpu(obj_header->usEncoderObjectTableOffset));
314 router_obj = (ATOM_OBJECT_TABLE *)
315 ((char *)ctx->bios + data_offset +
316 le16_to_cpu(obj_header->usRouterObjectTableOffset));
317 device_support = le16_to_cpu(obj_header->usDeviceSupport);
318
319 path_size = 0;
320 for (i = 0; i < path_obj->ucNumOfDispPath; i++) {
321 uint8_t *addr = (uint8_t *) path_obj->asDispPath;
322 ATOM_DISPLAY_OBJECT_PATH *path;
323 addr += path_size;
324 path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
325 path_size += le16_to_cpu(path->usSize);
326
327 if (device_support & le16_to_cpu(path->usDeviceTag)) {
328 uint8_t con_obj_id, con_obj_num __unused, con_obj_type __unused;
329
330 con_obj_id =
331 (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
332 >> OBJECT_ID_SHIFT;
333 con_obj_num =
334 (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
335 >> ENUM_ID_SHIFT;
336 con_obj_type =
337 (le16_to_cpu(path->usConnObjectId) &
338 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
339
340 /* Skip TV/CV support */
341 if ((le16_to_cpu(path->usDeviceTag) ==
342 ATOM_DEVICE_TV1_SUPPORT) ||
343 (le16_to_cpu(path->usDeviceTag) ==
344 ATOM_DEVICE_CV_SUPPORT))
345 continue;
346
347 if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
348 DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
349 con_obj_id, le16_to_cpu(path->usDeviceTag));
350 continue;
351 }
352
353 connector_type =
354 object_connector_convert[con_obj_id];
355 connector_object_id = con_obj_id;
356
357 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
358 continue;
359
360 router.ddc_valid = false;
361 router.cd_valid = false;
362 for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
363 uint8_t grph_obj_id __unused, grph_obj_num __unused, grph_obj_type __unused;
364
365 grph_obj_id =
366 (le16_to_cpu(path->usGraphicObjIds[j]) &
367 OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
368 grph_obj_num =
369 (le16_to_cpu(path->usGraphicObjIds[j]) &
370 ENUM_ID_MASK) >> ENUM_ID_SHIFT;
371 grph_obj_type =
372 (le16_to_cpu(path->usGraphicObjIds[j]) &
373 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
374
375 if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
376 for (k = 0; k < enc_obj->ucNumberOfObjects; k++) {
377 u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID);
378 if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) {
379 ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
380 ((char *)ctx->bios + data_offset +
381 le16_to_cpu(enc_obj->asObjects[k].usRecordOffset));
382 ATOM_ENCODER_CAP_RECORD *cap_record;
383 u16 caps = 0;
384
385 while (record->ucRecordSize > 0 &&
386 record->ucRecordType > 0 &&
387 record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
388 switch (record->ucRecordType) {
389 case ATOM_ENCODER_CAP_RECORD_TYPE:
390 cap_record =(ATOM_ENCODER_CAP_RECORD *)
391 record;
392 caps = le16_to_cpu(cap_record->usEncoderCap);
393 break;
394 }
395 record = (ATOM_COMMON_RECORD_HEADER *)
396 ((char *)record + record->ucRecordSize);
397 }
398 amdgpu_display_add_encoder(adev, encoder_obj,
399 le16_to_cpu(path->usDeviceTag),
400 caps);
401 }
402 }
403 } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
404 for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
405 u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
406 if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
407 ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
408 ((char *)ctx->bios + data_offset +
409 le16_to_cpu(router_obj->asObjects[k].usRecordOffset));
410 ATOM_I2C_RECORD *i2c_record;
411 ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
412 ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
413 ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
414 ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
415 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
416 ((char *)ctx->bios + data_offset +
417 le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
418 u8 *num_dst_objs = (u8 *)
419 ((u8 *)router_src_dst_table + 1 +
420 (router_src_dst_table->ucNumberOfSrc * 2));
421 u16 *dst_objs = (u16 *)(num_dst_objs + 1);
422 int enum_id;
423
424 router.router_id = router_obj_id;
425 for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
426 if (le16_to_cpu(path->usConnObjectId) ==
427 le16_to_cpu(dst_objs[enum_id]))
428 break;
429 }
430
431 while (record->ucRecordSize > 0 &&
432 record->ucRecordType > 0 &&
433 record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
434 switch (record->ucRecordType) {
435 case ATOM_I2C_RECORD_TYPE:
436 i2c_record =
437 (ATOM_I2C_RECORD *)
438 record;
439 i2c_config =
440 (ATOM_I2C_ID_CONFIG_ACCESS *)
441 &i2c_record->sucI2cId;
442 router.i2c_info =
443 amdgpu_atombios_lookup_i2c_gpio(adev,
444 i2c_config->
445 ucAccess);
446 router.i2c_addr = i2c_record->ucI2CAddr >> 1;
447 break;
448 case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
449 ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
450 record;
451 router.ddc_valid = true;
452 router.ddc_mux_type = ddc_path->ucMuxType;
453 router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
454 router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
455 break;
456 case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
457 cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
458 record;
459 router.cd_valid = true;
460 router.cd_mux_type = cd_path->ucMuxType;
461 router.cd_mux_control_pin = cd_path->ucMuxControlPin;
462 router.cd_mux_state = cd_path->ucMuxState[enum_id];
463 break;
464 }
465 record = (ATOM_COMMON_RECORD_HEADER *)
466 ((char *)record + record->ucRecordSize);
467 }
468 }
469 }
470 }
471 }
472
473 /* look up gpio for ddc, hpd */
474 ddc_bus.valid = false;
475 hpd.hpd = AMDGPU_HPD_NONE;
476 if ((le16_to_cpu(path->usDeviceTag) &
477 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
478 for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
479 if (le16_to_cpu(path->usConnObjectId) ==
480 le16_to_cpu(con_obj->asObjects[j].
481 usObjectID)) {
482 ATOM_COMMON_RECORD_HEADER
483 *record =
484 (ATOM_COMMON_RECORD_HEADER
485 *)
486 ((char *)ctx->bios + data_offset +
487 le16_to_cpu(con_obj->
488 asObjects[j].
489 usRecordOffset));
490 ATOM_I2C_RECORD *i2c_record;
491 ATOM_HPD_INT_RECORD *hpd_record;
492 ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
493
494 while (record->ucRecordSize > 0 &&
495 record->ucRecordType > 0 &&
496 record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
497 switch (record->ucRecordType) {
498 case ATOM_I2C_RECORD_TYPE:
499 i2c_record =
500 (ATOM_I2C_RECORD *)
501 record;
502 i2c_config =
503 (ATOM_I2C_ID_CONFIG_ACCESS *)
504 &i2c_record->sucI2cId;
505 ddc_bus = amdgpu_atombios_lookup_i2c_gpio(adev,
506 i2c_config->
507 ucAccess);
508 break;
509 case ATOM_HPD_INT_RECORD_TYPE:
510 hpd_record =
511 (ATOM_HPD_INT_RECORD *)
512 record;
513 gpio = amdgpu_atombios_lookup_gpio(adev,
514 hpd_record->ucHPDIntGPIOID);
515 hpd = amdgpu_atombios_get_hpd_info_from_gpio(adev, &gpio);
516 hpd.plugged_state = hpd_record->ucPlugged_PinState;
517 break;
518 }
519 record =
520 (ATOM_COMMON_RECORD_HEADER
521 *) ((char *)record
522 +
523 record->
524 ucRecordSize);
525 }
526 break;
527 }
528 }
529 }
530
531 /* needed for aux chan transactions */
532 ddc_bus.hpd = hpd.hpd;
533
534 conn_id = le16_to_cpu(path->usConnObjectId);
535
536 if (!amdgpu_atombios_apply_quirks
537 (adev, le16_to_cpu(path->usDeviceTag), &connector_type,
538 &ddc_bus, &conn_id, &hpd))
539 continue;
540
541 amdgpu_display_add_connector(adev,
542 conn_id,
543 le16_to_cpu(path->usDeviceTag),
544 connector_type, &ddc_bus,
545 connector_object_id,
546 &hpd,
547 &router);
548
549 }
550 }
551
552 amdgpu_link_encoder_connector(adev->ddev);
553
554 return true;
555 }
556
557 union firmware_info {
558 ATOM_FIRMWARE_INFO info;
559 ATOM_FIRMWARE_INFO_V1_2 info_12;
560 ATOM_FIRMWARE_INFO_V1_3 info_13;
561 ATOM_FIRMWARE_INFO_V1_4 info_14;
562 ATOM_FIRMWARE_INFO_V2_1 info_21;
563 ATOM_FIRMWARE_INFO_V2_2 info_22;
564 };
565
566 int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
567 {
568 struct amdgpu_mode_info *mode_info = &adev->mode_info;
569 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
570 uint8_t frev, crev;
571 uint16_t data_offset;
572 int ret = -EINVAL;
573
574 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
575 &frev, &crev, &data_offset)) {
576 int i;
577 struct amdgpu_pll *ppll = &adev->clock.ppll[0];
578 struct amdgpu_pll *spll = &adev->clock.spll;
579 struct amdgpu_pll *mpll = &adev->clock.mpll;
580 union firmware_info *firmware_info =
581 (union firmware_info *)((char *)mode_info->atom_context->bios +
582 data_offset);
583 /* pixel clocks */
584 ppll->reference_freq =
585 le16_to_cpu(firmware_info->info.usReferenceClock);
586 ppll->reference_div = 0;
587
588 ppll->pll_out_min =
589 le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
590 ppll->pll_out_max =
591 le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
592
593 ppll->lcd_pll_out_min =
594 le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
595 if (ppll->lcd_pll_out_min == 0)
596 ppll->lcd_pll_out_min = ppll->pll_out_min;
597 ppll->lcd_pll_out_max =
598 le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
599 if (ppll->lcd_pll_out_max == 0)
600 ppll->lcd_pll_out_max = ppll->pll_out_max;
601
602 if (ppll->pll_out_min == 0)
603 ppll->pll_out_min = 64800;
604
605 ppll->pll_in_min =
606 le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Input);
607 ppll->pll_in_max =
608 le16_to_cpu(firmware_info->info.usMaxPixelClockPLL_Input);
609
610 ppll->min_post_div = 2;
611 ppll->max_post_div = 0x7f;
612 ppll->min_frac_feedback_div = 0;
613 ppll->max_frac_feedback_div = 9;
614 ppll->min_ref_div = 2;
615 ppll->max_ref_div = 0x3ff;
616 ppll->min_feedback_div = 4;
617 ppll->max_feedback_div = 0xfff;
618 ppll->best_vco = 0;
619
620 for (i = 1; i < AMDGPU_MAX_PPLL; i++)
621 adev->clock.ppll[i] = *ppll;
622
623 /* system clock */
624 spll->reference_freq =
625 le16_to_cpu(firmware_info->info_21.usCoreReferenceClock);
626 spll->reference_div = 0;
627
628 spll->pll_out_min =
629 le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Output);
630 spll->pll_out_max =
631 le32_to_cpu(firmware_info->info.ulMaxEngineClockPLL_Output);
632
633 /* ??? */
634 if (spll->pll_out_min == 0)
635 spll->pll_out_min = 64800;
636
637 spll->pll_in_min =
638 le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Input);
639 spll->pll_in_max =
640 le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
641
642 spll->min_post_div = 1;
643 spll->max_post_div = 1;
644 spll->min_ref_div = 2;
645 spll->max_ref_div = 0xff;
646 spll->min_feedback_div = 4;
647 spll->max_feedback_div = 0xff;
648 spll->best_vco = 0;
649
650 /* memory clock */
651 mpll->reference_freq =
652 le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock);
653 mpll->reference_div = 0;
654
655 mpll->pll_out_min =
656 le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Output);
657 mpll->pll_out_max =
658 le32_to_cpu(firmware_info->info.ulMaxMemoryClockPLL_Output);
659
660 /* ??? */
661 if (mpll->pll_out_min == 0)
662 mpll->pll_out_min = 64800;
663
664 mpll->pll_in_min =
665 le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Input);
666 mpll->pll_in_max =
667 le16_to_cpu(firmware_info->info.usMaxMemoryClockPLL_Input);
668
669 adev->clock.default_sclk =
670 le32_to_cpu(firmware_info->info.ulDefaultEngineClock);
671 adev->clock.default_mclk =
672 le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
673
674 mpll->min_post_div = 1;
675 mpll->max_post_div = 1;
676 mpll->min_ref_div = 2;
677 mpll->max_ref_div = 0xff;
678 mpll->min_feedback_div = 4;
679 mpll->max_feedback_div = 0xff;
680 mpll->best_vco = 0;
681
682 /* disp clock */
683 adev->clock.default_dispclk =
684 le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
685 /* set a reasonable default for DP */
686 if (adev->clock.default_dispclk < 53900) {
687 DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
688 adev->clock.default_dispclk / 100);
689 adev->clock.default_dispclk = 60000;
690 } else if (adev->clock.default_dispclk <= 60000) {
691 DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
692 adev->clock.default_dispclk / 100);
693 adev->clock.default_dispclk = 62500;
694 }
695 adev->clock.dp_extclk =
696 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
697 adev->clock.current_dispclk = adev->clock.default_dispclk;
698
699 adev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
700 if (adev->clock.max_pixel_clock == 0)
701 adev->clock.max_pixel_clock = 40000;
702
703 /* not technically a clock, but... */
704 adev->mode_info.firmware_flags =
705 le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
706
707 ret = 0;
708 }
709
710 adev->pm.current_sclk = adev->clock.default_sclk;
711 adev->pm.current_mclk = adev->clock.default_mclk;
712
713 return ret;
714 }
715
716 union igp_info {
717 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
718 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
719 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
720 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
721 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
722 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9;
723 };
724
725 static void amdgpu_atombios_get_igp_ss_overrides(struct amdgpu_device *adev,
726 struct amdgpu_atom_ss *ss,
727 int id)
728 {
729 struct amdgpu_mode_info *mode_info = &adev->mode_info;
730 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
731 u16 data_offset, size;
732 union igp_info *igp_info;
733 u8 frev, crev;
734 u16 percentage = 0, rate = 0;
735
736 /* get any igp specific overrides */
737 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
738 &frev, &crev, &data_offset)) {
739 igp_info = (union igp_info *)
740 ((char *)mode_info->atom_context->bios + data_offset);
741 switch (crev) {
742 case 6:
743 switch (id) {
744 case ASIC_INTERNAL_SS_ON_TMDS:
745 percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage);
746 rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz);
747 break;
748 case ASIC_INTERNAL_SS_ON_HDMI:
749 percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage);
750 rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz);
751 break;
752 case ASIC_INTERNAL_SS_ON_LVDS:
753 percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage);
754 rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz);
755 break;
756 }
757 break;
758 case 7:
759 switch (id) {
760 case ASIC_INTERNAL_SS_ON_TMDS:
761 percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage);
762 rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz);
763 break;
764 case ASIC_INTERNAL_SS_ON_HDMI:
765 percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage);
766 rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz);
767 break;
768 case ASIC_INTERNAL_SS_ON_LVDS:
769 percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage);
770 rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz);
771 break;
772 }
773 break;
774 case 8:
775 switch (id) {
776 case ASIC_INTERNAL_SS_ON_TMDS:
777 percentage = le16_to_cpu(igp_info->info_8.usDVISSPercentage);
778 rate = le16_to_cpu(igp_info->info_8.usDVISSpreadRateIn10Hz);
779 break;
780 case ASIC_INTERNAL_SS_ON_HDMI:
781 percentage = le16_to_cpu(igp_info->info_8.usHDMISSPercentage);
782 rate = le16_to_cpu(igp_info->info_8.usHDMISSpreadRateIn10Hz);
783 break;
784 case ASIC_INTERNAL_SS_ON_LVDS:
785 percentage = le16_to_cpu(igp_info->info_8.usLvdsSSPercentage);
786 rate = le16_to_cpu(igp_info->info_8.usLvdsSSpreadRateIn10Hz);
787 break;
788 }
789 break;
790 case 9:
791 switch (id) {
792 case ASIC_INTERNAL_SS_ON_TMDS:
793 percentage = le16_to_cpu(igp_info->info_9.usDVISSPercentage);
794 rate = le16_to_cpu(igp_info->info_9.usDVISSpreadRateIn10Hz);
795 break;
796 case ASIC_INTERNAL_SS_ON_HDMI:
797 percentage = le16_to_cpu(igp_info->info_9.usHDMISSPercentage);
798 rate = le16_to_cpu(igp_info->info_9.usHDMISSpreadRateIn10Hz);
799 break;
800 case ASIC_INTERNAL_SS_ON_LVDS:
801 percentage = le16_to_cpu(igp_info->info_9.usLvdsSSPercentage);
802 rate = le16_to_cpu(igp_info->info_9.usLvdsSSpreadRateIn10Hz);
803 break;
804 }
805 break;
806 default:
807 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
808 break;
809 }
810 if (percentage)
811 ss->percentage = percentage;
812 if (rate)
813 ss->rate = rate;
814 }
815 }
816
817 union asic_ss_info {
818 struct _ATOM_ASIC_INTERNAL_SS_INFO info;
819 struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
820 struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
821 };
822
823 union asic_ss_assignment {
824 struct _ATOM_ASIC_SS_ASSIGNMENT v1;
825 struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
826 struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
827 };
828
829 bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
830 struct amdgpu_atom_ss *ss,
831 int id, u32 clock)
832 {
833 struct amdgpu_mode_info *mode_info = &adev->mode_info;
834 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
835 uint16_t data_offset, size;
836 union asic_ss_info *ss_info;
837 union asic_ss_assignment *ss_assign;
838 uint8_t frev, crev;
839 int i, num_indices;
840
841 if (id == ASIC_INTERNAL_MEMORY_SS) {
842 if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT))
843 return false;
844 }
845 if (id == ASIC_INTERNAL_ENGINE_SS) {
846 if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT))
847 return false;
848 }
849
850 memset(ss, 0, sizeof(struct amdgpu_atom_ss));
851 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
852 &frev, &crev, &data_offset)) {
853
854 ss_info =
855 (union asic_ss_info *)((char *)mode_info->atom_context->bios + data_offset);
856
857 switch (frev) {
858 case 1:
859 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
860 sizeof(ATOM_ASIC_SS_ASSIGNMENT);
861
862 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
863 for (i = 0; i < num_indices; i++) {
864 if ((ss_assign->v1.ucClockIndication == id) &&
865 (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
866 ss->percentage =
867 le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
868 ss->type = ss_assign->v1.ucSpreadSpectrumMode;
869 ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
870 ss->percentage_divider = 100;
871 return true;
872 }
873 ss_assign = (union asic_ss_assignment *)
874 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
875 }
876 break;
877 case 2:
878 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
879 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
880 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
881 for (i = 0; i < num_indices; i++) {
882 if ((ss_assign->v2.ucClockIndication == id) &&
883 (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
884 ss->percentage =
885 le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
886 ss->type = ss_assign->v2.ucSpreadSpectrumMode;
887 ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
888 ss->percentage_divider = 100;
889 if ((crev == 2) &&
890 ((id == ASIC_INTERNAL_ENGINE_SS) ||
891 (id == ASIC_INTERNAL_MEMORY_SS)))
892 ss->rate /= 100;
893 return true;
894 }
895 ss_assign = (union asic_ss_assignment *)
896 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
897 }
898 break;
899 case 3:
900 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
901 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
902 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
903 for (i = 0; i < num_indices; i++) {
904 if ((ss_assign->v3.ucClockIndication == id) &&
905 (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
906 ss->percentage =
907 le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
908 ss->type = ss_assign->v3.ucSpreadSpectrumMode;
909 ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
910 if (ss_assign->v3.ucSpreadSpectrumMode &
911 SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK)
912 ss->percentage_divider = 1000;
913 else
914 ss->percentage_divider = 100;
915 if ((id == ASIC_INTERNAL_ENGINE_SS) ||
916 (id == ASIC_INTERNAL_MEMORY_SS))
917 ss->rate /= 100;
918 if (adev->flags & AMD_IS_APU)
919 amdgpu_atombios_get_igp_ss_overrides(adev, ss, id);
920 return true;
921 }
922 ss_assign = (union asic_ss_assignment *)
923 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
924 }
925 break;
926 default:
927 DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev);
928 break;
929 }
930
931 }
932 return false;
933 }
934
935 union get_clock_dividers {
936 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1;
937 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2;
938 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3;
939 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4;
940 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5;
941 struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6 v6_in;
942 struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 v6_out;
943 };
944
945 int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
946 u8 clock_type,
947 u32 clock,
948 bool strobe_mode,
949 struct atom_clock_dividers *dividers)
950 {
951 union get_clock_dividers args;
952 int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL);
953 u8 frev, crev;
954
955 memset(&args, 0, sizeof(args));
956 memset(dividers, 0, sizeof(struct atom_clock_dividers));
957
958 if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
959 return -EINVAL;
960
961 switch (crev) {
962 case 4:
963 /* fusion */
964 args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
965
966 amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
967
968 dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
969 dividers->real_clock = le32_to_cpu(args.v4.ulClock);
970 break;
971 case 6:
972 /* CI */
973 /* COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, COMPUTE_GPUCLK_INPUT_FLAG_SCLK */
974 args.v6_in.ulClock.ulComputeClockFlag = clock_type;
975 args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */
976
977 amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
978
979 dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
980 dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
981 dividers->ref_div = args.v6_out.ucPllRefDiv;
982 dividers->post_div = args.v6_out.ucPllPostDiv;
983 dividers->flags = args.v6_out.ucPllCntlFlag;
984 dividers->real_clock = le32_to_cpu(args.v6_out.ulClock.ulClock);
985 dividers->post_divider = args.v6_out.ulClock.ucPostDiv;
986 break;
987 default:
988 return -EINVAL;
989 }
990 return 0;
991 }
992
993 int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
994 u32 clock,
995 bool strobe_mode,
996 struct atom_mpll_param *mpll_param)
997 {
998 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 args;
999 int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam);
1000 u8 frev, crev;
1001
1002 memset(&args, 0, sizeof(args));
1003 memset(mpll_param, 0, sizeof(struct atom_mpll_param));
1004
1005 if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
1006 return -EINVAL;
1007
1008 switch (frev) {
1009 case 2:
1010 switch (crev) {
1011 case 1:
1012 /* SI */
1013 args.ulClock = cpu_to_le32(clock); /* 10 khz */
1014 args.ucInputFlag = 0;
1015 if (strobe_mode)
1016 args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
1017
1018 amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
1019
1020 mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
1021 mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
1022 mpll_param->post_div = args.ucPostDiv;
1023 mpll_param->dll_speed = args.ucDllSpeed;
1024 mpll_param->bwcntl = args.ucBWCntl;
1025 mpll_param->vco_mode =
1026 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK);
1027 mpll_param->yclk_sel =
1028 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
1029 mpll_param->qdr =
1030 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0;
1031 mpll_param->half_rate =
1032 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0;
1033 break;
1034 default:
1035 return -EINVAL;
1036 }
1037 break;
1038 default:
1039 return -EINVAL;
1040 }
1041 return 0;
1042 }
1043
1044 uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev)
1045 {
1046 GET_ENGINE_CLOCK_PS_ALLOCATION args;
1047 int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
1048
1049 amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
1050 return le32_to_cpu(args.ulReturnEngineClock);
1051 }
1052
1053 uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev)
1054 {
1055 GET_MEMORY_CLOCK_PS_ALLOCATION args;
1056 int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
1057
1058 amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
1059 return le32_to_cpu(args.ulReturnMemoryClock);
1060 }
1061
1062 void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
1063 uint32_t eng_clock)
1064 {
1065 SET_ENGINE_CLOCK_PS_ALLOCATION args;
1066 int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
1067
1068 args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
1069
1070 amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
1071 }
1072
1073 void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
1074 uint32_t mem_clock)
1075 {
1076 SET_MEMORY_CLOCK_PS_ALLOCATION args;
1077 int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
1078
1079 if (adev->flags & AMD_IS_APU)
1080 return;
1081
1082 args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
1083
1084 amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
1085 }
1086
1087 void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
1088 u32 eng_clock, u32 mem_clock)
1089 {
1090 SET_ENGINE_CLOCK_PS_ALLOCATION args;
1091 int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
1092 u32 tmp;
1093
1094 memset(&args, 0, sizeof(args));
1095
1096 tmp = eng_clock & SET_CLOCK_FREQ_MASK;
1097 tmp |= (COMPUTE_ENGINE_PLL_PARAM << 24);
1098
1099 args.ulTargetEngineClock = cpu_to_le32(tmp);
1100 if (mem_clock)
1101 args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
1102
1103 amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
1104 }
1105
1106 union set_voltage {
1107 struct _SET_VOLTAGE_PS_ALLOCATION alloc;
1108 struct _SET_VOLTAGE_PARAMETERS v1;
1109 struct _SET_VOLTAGE_PARAMETERS_V2 v2;
1110 struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
1111 };
1112
1113 void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
1114 u16 voltage_level,
1115 u8 voltage_type)
1116 {
1117 union set_voltage args;
1118 int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
1119 u8 frev, crev, volt_index = voltage_level;
1120
1121 if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
1122 return;
1123
1124 /* 0xff01 is a flag rather then an actual voltage */
1125 if (voltage_level == 0xff01)
1126 return;
1127
1128 switch (crev) {
1129 case 1:
1130 args.v1.ucVoltageType = voltage_type;
1131 args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
1132 args.v1.ucVoltageIndex = volt_index;
1133 break;
1134 case 2:
1135 args.v2.ucVoltageType = voltage_type;
1136 args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
1137 args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
1138 break;
1139 case 3:
1140 args.v3.ucVoltageType = voltage_type;
1141 args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
1142 args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
1143 break;
1144 default:
1145 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
1146 return;
1147 }
1148
1149 amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
1150 }
1151
1152 int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
1153 u16 *leakage_id)
1154 {
1155 union set_voltage args;
1156 int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
1157 u8 frev, crev;
1158
1159 if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
1160 return -EINVAL;
1161
1162 switch (crev) {
1163 case 3:
1164 case 4:
1165 args.v3.ucVoltageType = 0;
1166 args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
1167 args.v3.usVoltageLevel = 0;
1168
1169 amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
1170
1171 *leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
1172 break;
1173 default:
1174 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
1175 return -EINVAL;
1176 }
1177
1178 return 0;
1179 }
1180
1181 int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev,
1182 u16 *vddc, u16 *vddci,
1183 u16 virtual_voltage_id,
1184 u16 vbios_voltage_id)
1185 {
1186 int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
1187 u8 frev, crev;
1188 u16 data_offset, size;
1189 int i, j;
1190 ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
1191 u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
1192
1193 *vddc = 0;
1194 *vddci = 0;
1195
1196 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
1197 &frev, &crev, &data_offset))
1198 return -EINVAL;
1199
1200 profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
1201 ((char *)adev->mode_info.atom_context->bios + data_offset);
1202
1203 switch (frev) {
1204 case 1:
1205 return -EINVAL;
1206 case 2:
1207 switch (crev) {
1208 case 1:
1209 if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))
1210 return -EINVAL;
1211 leakage_bin = (u16 *)
1212 ((char *)adev->mode_info.atom_context->bios + data_offset +
1213 le16_to_cpu(profile->usLeakageBinArrayOffset));
1214 vddc_id_buf = (u16 *)
1215 ((char *)adev->mode_info.atom_context->bios + data_offset +
1216 le16_to_cpu(profile->usElbVDDC_IdArrayOffset));
1217 vddc_buf = (u16 *)
1218 ((char *)adev->mode_info.atom_context->bios + data_offset +
1219 le16_to_cpu(profile->usElbVDDC_LevelArrayOffset));
1220 vddci_id_buf = (u16 *)
1221 ((char *)adev->mode_info.atom_context->bios + data_offset +
1222 le16_to_cpu(profile->usElbVDDCI_IdArrayOffset));
1223 vddci_buf = (u16 *)
1224 ((char *)adev->mode_info.atom_context->bios + data_offset +
1225 le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset));
1226
1227 if (profile->ucElbVDDC_Num > 0) {
1228 for (i = 0; i < profile->ucElbVDDC_Num; i++) {
1229 if (vddc_id_buf[i] == virtual_voltage_id) {
1230 for (j = 0; j < profile->ucLeakageBinNum; j++) {
1231 if (vbios_voltage_id <= leakage_bin[j]) {
1232 *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
1233 break;
1234 }
1235 }
1236 break;
1237 }
1238 }
1239 }
1240 if (profile->ucElbVDDCI_Num > 0) {
1241 for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
1242 if (vddci_id_buf[i] == virtual_voltage_id) {
1243 for (j = 0; j < profile->ucLeakageBinNum; j++) {
1244 if (vbios_voltage_id <= leakage_bin[j]) {
1245 *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
1246 break;
1247 }
1248 }
1249 break;
1250 }
1251 }
1252 }
1253 break;
1254 default:
1255 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
1256 return -EINVAL;
1257 }
1258 break;
1259 default:
1260 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
1261 return -EINVAL;
1262 }
1263
1264 return 0;
1265 }
1266
1267 union get_voltage_info {
1268 struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
1269 struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
1270 };
1271
1272 int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev,
1273 u16 virtual_voltage_id,
1274 u16 *voltage)
1275 {
1276 int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
1277 u32 entry_id;
1278 u32 count = adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
1279 union get_voltage_info args;
1280
1281 for (entry_id = 0; entry_id < count; entry_id++) {
1282 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
1283 virtual_voltage_id)
1284 break;
1285 }
1286
1287 if (entry_id >= count)
1288 return -EINVAL;
1289
1290 args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
1291 args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1292 args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id);
1293 args.in.ulSCLKFreq =
1294 cpu_to_le32(adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
1295
1296 amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
1297
1298 *voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
1299
1300 return 0;
1301 }
1302
1303 union voltage_object_info {
1304 struct _ATOM_VOLTAGE_OBJECT_INFO v1;
1305 struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
1306 struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3;
1307 };
1308
1309 union voltage_object {
1310 struct _ATOM_VOLTAGE_OBJECT v1;
1311 struct _ATOM_VOLTAGE_OBJECT_V2 v2;
1312 union _ATOM_VOLTAGE_OBJECT_V3 v3;
1313 };
1314
1315
1316 static ATOM_VOLTAGE_OBJECT_V3 *amdgpu_atombios_lookup_voltage_object_v3(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *v3,
1317 u8 voltage_type, u8 voltage_mode)
1318 {
1319 u32 size = le16_to_cpu(v3->sHeader.usStructureSize);
1320 u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
1321 u8 *start = (u8*)v3;
1322
1323 while (offset < size) {
1324 ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
1325 if ((vo->asGpioVoltageObj.sHeader.ucVoltageType == voltage_type) &&
1326 (vo->asGpioVoltageObj.sHeader.ucVoltageMode == voltage_mode))
1327 return vo;
1328 offset += le16_to_cpu(vo->asGpioVoltageObj.sHeader.usSize);
1329 }
1330 return NULL;
1331 }
1332
1333 bool
1334 amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
1335 u8 voltage_type, u8 voltage_mode)
1336 {
1337 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
1338 u8 frev, crev;
1339 u16 data_offset, size;
1340 union voltage_object_info *voltage_info;
1341
1342 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
1343 &frev, &crev, &data_offset)) {
1344 voltage_info = (union voltage_object_info *)
1345 ((char *)adev->mode_info.atom_context->bios + data_offset);
1346
1347 switch (frev) {
1348 case 3:
1349 switch (crev) {
1350 case 1:
1351 if (amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3,
1352 voltage_type, voltage_mode))
1353 return true;
1354 break;
1355 default:
1356 DRM_ERROR("unknown voltage object table\n");
1357 return false;
1358 }
1359 break;
1360 default:
1361 DRM_ERROR("unknown voltage object table\n");
1362 return false;
1363 }
1364
1365 }
1366 return false;
1367 }
1368
1369 int amdgpu_atombios_get_voltage_table(struct amdgpu_device *adev,
1370 u8 voltage_type, u8 voltage_mode,
1371 struct atom_voltage_table *voltage_table)
1372 {
1373 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
1374 u8 frev, crev;
1375 u16 data_offset, size;
1376 int i;
1377 union voltage_object_info *voltage_info;
1378 union voltage_object *voltage_object = NULL;
1379
1380 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
1381 &frev, &crev, &data_offset)) {
1382 voltage_info = (union voltage_object_info *)
1383 ((char *)adev->mode_info.atom_context->bios + data_offset);
1384
1385 switch (frev) {
1386 case 3:
1387 switch (crev) {
1388 case 1:
1389 voltage_object = (union voltage_object *)
1390 amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3,
1391 voltage_type, voltage_mode);
1392 if (voltage_object) {
1393 ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio =
1394 &voltage_object->v3.asGpioVoltageObj;
1395 VOLTAGE_LUT_ENTRY_V2 *lut;
1396 if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES)
1397 return -EINVAL;
1398 lut = &gpio->asVolGpioLut[0];
1399 for (i = 0; i < gpio->ucGpioEntryNum; i++) {
1400 voltage_table->entries[i].value =
1401 le16_to_cpu(lut->usVoltageValue);
1402 voltage_table->entries[i].smio_low =
1403 le32_to_cpu(lut->ulVoltageId);
1404 lut = (VOLTAGE_LUT_ENTRY_V2 *)
1405 ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY_V2));
1406 }
1407 voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal);
1408 voltage_table->count = gpio->ucGpioEntryNum;
1409 voltage_table->phase_delay = gpio->ucPhaseDelay;
1410 return 0;
1411 }
1412 break;
1413 default:
1414 DRM_ERROR("unknown voltage object table\n");
1415 return -EINVAL;
1416 }
1417 break;
1418 default:
1419 DRM_ERROR("unknown voltage object table\n");
1420 return -EINVAL;
1421 }
1422 }
1423 return -EINVAL;
1424 }
1425
1426 union vram_info {
1427 struct _ATOM_VRAM_INFO_V3 v1_3;
1428 struct _ATOM_VRAM_INFO_V4 v1_4;
1429 struct _ATOM_VRAM_INFO_HEADER_V2_1 v2_1;
1430 };
1431
1432 #define MEM_ID_MASK 0xff000000
1433 #define MEM_ID_SHIFT 24
1434 #define CLOCK_RANGE_MASK 0x00ffffff
1435 #define CLOCK_RANGE_SHIFT 0
1436 #define LOW_NIBBLE_MASK 0xf
1437 #define DATA_EQU_PREV 0
1438 #define DATA_FROM_TABLE 4
1439
1440 int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
1441 u8 module_index,
1442 struct atom_mc_reg_table *reg_table)
1443 {
1444 int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
1445 u8 frev, crev, num_entries, t_mem_id, num_ranges = 0;
1446 u32 i = 0, j;
1447 u16 data_offset, size;
1448 union vram_info *vram_info;
1449
1450 memset(reg_table, 0, sizeof(struct atom_mc_reg_table));
1451
1452 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
1453 &frev, &crev, &data_offset)) {
1454 vram_info = (union vram_info *)
1455 ((char *)adev->mode_info.atom_context->bios + data_offset);
1456 switch (frev) {
1457 case 1:
1458 DRM_ERROR("old table version %d, %d\n", frev, crev);
1459 return -EINVAL;
1460 case 2:
1461 switch (crev) {
1462 case 1:
1463 if (module_index < vram_info->v2_1.ucNumOfVRAMModule) {
1464 ATOM_INIT_REG_BLOCK *reg_block =
1465 (ATOM_INIT_REG_BLOCK *)
1466 ((u8 *)vram_info + le16_to_cpu(vram_info->v2_1.usMemClkPatchTblOffset));
1467 ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data =
1468 (ATOM_MEMORY_SETTING_DATA_BLOCK *)
1469 ((u8 *)reg_block + (2 * sizeof(u16)) +
1470 le16_to_cpu(reg_block->usRegIndexTblSize));
1471 ATOM_INIT_REG_INDEX_FORMAT *format = ®_block->asRegIndexBuf[0];
1472 num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) /
1473 sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1;
1474 if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE)
1475 return -EINVAL;
1476 while (i < num_entries) {
1477 if (format->ucPreRegDataLength & ACCESS_PLACEHOLDER)
1478 break;
1479 reg_table->mc_reg_address[i].s1 =
1480 (u16)(le16_to_cpu(format->usRegIndex));
1481 reg_table->mc_reg_address[i].pre_reg_data =
1482 (u8)(format->ucPreRegDataLength);
1483 i++;
1484 format = (ATOM_INIT_REG_INDEX_FORMAT *)
1485 ((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
1486 }
1487 reg_table->last = i;
1488 while ((le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) &&
1489 (num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) {
1490 t_mem_id = (u8)((le32_to_cpu(*(u32 *)reg_data) & MEM_ID_MASK)
1491 >> MEM_ID_SHIFT);
1492 if (module_index == t_mem_id) {
1493 reg_table->mc_reg_table_entry[num_ranges].mclk_max =
1494 (u32)((le32_to_cpu(*(u32 *)reg_data) & CLOCK_RANGE_MASK)
1495 >> CLOCK_RANGE_SHIFT);
1496 for (i = 0, j = 1; i < reg_table->last; i++) {
1497 if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
1498 reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
1499 (u32)le32_to_cpu(*((u32 *)reg_data + j));
1500 j++;
1501 } else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
1502 reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
1503 reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
1504 }
1505 }
1506 num_ranges++;
1507 }
1508 reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
1509 ((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize));
1510 }
1511 if (le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK)
1512 return -EINVAL;
1513 reg_table->num_entries = num_ranges;
1514 } else
1515 return -EINVAL;
1516 break;
1517 default:
1518 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
1519 return -EINVAL;
1520 }
1521 break;
1522 default:
1523 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
1524 return -EINVAL;
1525 }
1526 return 0;
1527 }
1528 return -EINVAL;
1529 }
1530
1531 void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
1532 {
1533 uint32_t bios_6_scratch;
1534
1535 bios_6_scratch = RREG32(mmBIOS_SCRATCH_6);
1536
1537 if (lock) {
1538 bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
1539 bios_6_scratch &= ~ATOM_S6_ACC_MODE;
1540 } else {
1541 bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
1542 bios_6_scratch |= ATOM_S6_ACC_MODE;
1543 }
1544
1545 WREG32(mmBIOS_SCRATCH_6, bios_6_scratch);
1546 }
1547
1548 void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
1549 {
1550 uint32_t bios_2_scratch, bios_6_scratch;
1551
1552 bios_2_scratch = RREG32(mmBIOS_SCRATCH_2);
1553 bios_6_scratch = RREG32(mmBIOS_SCRATCH_6);
1554
1555 /* let the bios control the backlight */
1556 bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
1557
1558 /* tell the bios not to handle mode switching */
1559 bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
1560
1561 /* clear the vbios dpms state */
1562 bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
1563
1564 WREG32(mmBIOS_SCRATCH_2, bios_2_scratch);
1565 WREG32(mmBIOS_SCRATCH_6, bios_6_scratch);
1566 }
1567
1568 void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev)
1569 {
1570 int i;
1571
1572 for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
1573 adev->bios_scratch[i] = RREG32(mmBIOS_SCRATCH_0 + i);
1574 }
1575
1576 void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
1577 {
1578 int i;
1579
1580 for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
1581 WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
1582 }
1583
1584 /* Atom needs data in little endian format so swap as appropriate when copying
1585 * data to or from atom. Note that atom operates on dw units.
1586 *
1587 * Use to_le=true when sending data to atom and provide at least
1588 * ALIGN(num_bytes,4) bytes in the dst buffer.
1589 *
1590 * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
1591 * byes in the src buffer.
1592 */
1593 void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
1594 {
1595 #ifdef __BIG_ENDIAN
1596 u32 src_tmp[5], dst_tmp[5];
1597 int i;
1598 u8 align_num_bytes = ALIGN(num_bytes, 4);
1599
1600 if (to_le) {
1601 memcpy(src_tmp, src, num_bytes);
1602 for (i = 0; i < align_num_bytes / 4; i++)
1603 dst_tmp[i] = cpu_to_le32(src_tmp[i]);
1604 memcpy(dst, dst_tmp, align_num_bytes);
1605 } else {
1606 memcpy(src_tmp, src, align_num_bytes);
1607 for (i = 0; i < align_num_bytes / 4; i++)
1608 dst_tmp[i] = le32_to_cpu(src_tmp[i]);
1609 memcpy(dst, dst_tmp, num_bytes);
1610 }
1611 #else
1612 memcpy(dst, src, num_bytes);
1613 #endif
1614 }
1615