amdgpu_atom.c revision 1.1.1.1 1 /* $NetBSD: amdgpu_atom.c,v 1.1.1.1 2021/12/18 20:11:04 riastradh Exp $ */
2
3 /*
4 * Copyright 2008 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Author: Stanislaw Skowronek
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: amdgpu_atom.c,v 1.1.1.1 2021/12/18 20:11:04 riastradh Exp $");
29
30 #include <linux/module.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <asm/unaligned.h>
34
35 #include <drm/drm_util.h>
36
37 #define ATOM_DEBUG
38
39 #include "atom.h"
40 #include "atom-names.h"
41 #include "atom-bits.h"
42 #include "amdgpu.h"
43
44 #define ATOM_COND_ABOVE 0
45 #define ATOM_COND_ABOVEOREQUAL 1
46 #define ATOM_COND_ALWAYS 2
47 #define ATOM_COND_BELOW 3
48 #define ATOM_COND_BELOWOREQUAL 4
49 #define ATOM_COND_EQUAL 5
50 #define ATOM_COND_NOTEQUAL 6
51
52 #define ATOM_PORT_ATI 0
53 #define ATOM_PORT_PCI 1
54 #define ATOM_PORT_SYSIO 2
55
56 #define ATOM_UNIT_MICROSEC 0
57 #define ATOM_UNIT_MILLISEC 1
58
59 #define PLL_INDEX 2
60 #define PLL_DATA 3
61
62 typedef struct {
63 struct atom_context *ctx;
64 uint32_t *ps, *ws;
65 int ps_shift;
66 uint16_t start;
67 unsigned last_jump;
68 unsigned long last_jump_jiffies;
69 bool abort;
70 } atom_exec_context;
71
72 int amdgpu_atom_debug = 0;
73 static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
74 int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
75
76 static uint32_t atom_arg_mask[8] =
77 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
78 0xFF000000 };
79 static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
80
81 static int atom_dst_to_src[8][4] = {
82 /* translate destination alignment field to the source alignment encoding */
83 {0, 0, 0, 0},
84 {1, 2, 3, 0},
85 {1, 2, 3, 0},
86 {1, 2, 3, 0},
87 {4, 5, 6, 7},
88 {4, 5, 6, 7},
89 {4, 5, 6, 7},
90 {4, 5, 6, 7},
91 };
92 static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
93
94 static int debug_depth = 0;
95 #ifdef ATOM_DEBUG
96 static void debug_print_spaces(int n)
97 {
98 while (n--)
99 printk(" ");
100 }
101
102 #define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
103 #define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
104 #else
105 #define DEBUG(...) do { } while (0)
106 #define SDEBUG(...) do { } while (0)
107 #endif
108
109 static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
110 uint32_t index, uint32_t data)
111 {
112 uint32_t temp = 0xCDCDCDCD;
113
114 while (1)
115 switch (CU8(base)) {
116 case ATOM_IIO_NOP:
117 base++;
118 break;
119 case ATOM_IIO_READ:
120 temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
121 base += 3;
122 break;
123 case ATOM_IIO_WRITE:
124 ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
125 base += 3;
126 break;
127 case ATOM_IIO_CLEAR:
128 temp &=
129 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
130 CU8(base + 2));
131 base += 3;
132 break;
133 case ATOM_IIO_SET:
134 temp |=
135 (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
136 2);
137 base += 3;
138 break;
139 case ATOM_IIO_MOVE_INDEX:
140 temp &=
141 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
142 CU8(base + 3));
143 temp |=
144 ((index >> CU8(base + 2)) &
145 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
146 3);
147 base += 4;
148 break;
149 case ATOM_IIO_MOVE_DATA:
150 temp &=
151 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
152 CU8(base + 3));
153 temp |=
154 ((data >> CU8(base + 2)) &
155 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
156 3);
157 base += 4;
158 break;
159 case ATOM_IIO_MOVE_ATTR:
160 temp &=
161 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
162 CU8(base + 3));
163 temp |=
164 ((ctx->
165 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
166 CU8
167 (base
168 +
169 1))))
170 << CU8(base + 3);
171 base += 4;
172 break;
173 case ATOM_IIO_END:
174 return temp;
175 default:
176 pr_info("Unknown IIO opcode\n");
177 return 0;
178 }
179 }
180
181 static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
182 int *ptr, uint32_t *saved, int print)
183 {
184 uint32_t idx, val = 0xCDCDCDCD, align, arg;
185 struct atom_context *gctx = ctx->ctx;
186 arg = attr & 7;
187 align = (attr >> 3) & 7;
188 switch (arg) {
189 case ATOM_ARG_REG:
190 idx = U16(*ptr);
191 (*ptr) += 2;
192 if (print)
193 DEBUG("REG[0x%04X]", idx);
194 idx += gctx->reg_block;
195 switch (gctx->io_mode) {
196 case ATOM_IO_MM:
197 val = gctx->card->reg_read(gctx->card, idx);
198 break;
199 case ATOM_IO_PCI:
200 pr_info("PCI registers are not implemented\n");
201 return 0;
202 case ATOM_IO_SYSIO:
203 pr_info("SYSIO registers are not implemented\n");
204 return 0;
205 default:
206 if (!(gctx->io_mode & 0x80)) {
207 pr_info("Bad IO mode\n");
208 return 0;
209 }
210 if (!gctx->iio[gctx->io_mode & 0x7F]) {
211 pr_info("Undefined indirect IO read method %d\n",
212 gctx->io_mode & 0x7F);
213 return 0;
214 }
215 val =
216 atom_iio_execute(gctx,
217 gctx->iio[gctx->io_mode & 0x7F],
218 idx, 0);
219 }
220 break;
221 case ATOM_ARG_PS:
222 idx = U8(*ptr);
223 (*ptr)++;
224 /* get_unaligned_le32 avoids unaligned accesses from atombios
225 * tables, noticed on a DEC Alpha. */
226 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
227 if (print)
228 DEBUG("PS[0x%02X,0x%04X]", idx, val);
229 break;
230 case ATOM_ARG_WS:
231 idx = U8(*ptr);
232 (*ptr)++;
233 if (print)
234 DEBUG("WS[0x%02X]", idx);
235 switch (idx) {
236 case ATOM_WS_QUOTIENT:
237 val = gctx->divmul[0];
238 break;
239 case ATOM_WS_REMAINDER:
240 val = gctx->divmul[1];
241 break;
242 case ATOM_WS_DATAPTR:
243 val = gctx->data_block;
244 break;
245 case ATOM_WS_SHIFT:
246 val = gctx->shift;
247 break;
248 case ATOM_WS_OR_MASK:
249 val = 1 << gctx->shift;
250 break;
251 case ATOM_WS_AND_MASK:
252 val = ~(1 << gctx->shift);
253 break;
254 case ATOM_WS_FB_WINDOW:
255 val = gctx->fb_base;
256 break;
257 case ATOM_WS_ATTRIBUTES:
258 val = gctx->io_attr;
259 break;
260 case ATOM_WS_REGPTR:
261 val = gctx->reg_block;
262 break;
263 default:
264 val = ctx->ws[idx];
265 }
266 break;
267 case ATOM_ARG_ID:
268 idx = U16(*ptr);
269 (*ptr) += 2;
270 if (print) {
271 if (gctx->data_block)
272 DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
273 else
274 DEBUG("ID[0x%04X]", idx);
275 }
276 val = U32(idx + gctx->data_block);
277 break;
278 case ATOM_ARG_FB:
279 idx = U8(*ptr);
280 (*ptr)++;
281 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
282 DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
283 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
284 val = 0;
285 } else
286 val = gctx->scratch[(gctx->fb_base / 4) + idx];
287 if (print)
288 DEBUG("FB[0x%02X]", idx);
289 break;
290 case ATOM_ARG_IMM:
291 switch (align) {
292 case ATOM_SRC_DWORD:
293 val = U32(*ptr);
294 (*ptr) += 4;
295 if (print)
296 DEBUG("IMM 0x%08X\n", val);
297 return val;
298 case ATOM_SRC_WORD0:
299 case ATOM_SRC_WORD8:
300 case ATOM_SRC_WORD16:
301 val = U16(*ptr);
302 (*ptr) += 2;
303 if (print)
304 DEBUG("IMM 0x%04X\n", val);
305 return val;
306 case ATOM_SRC_BYTE0:
307 case ATOM_SRC_BYTE8:
308 case ATOM_SRC_BYTE16:
309 case ATOM_SRC_BYTE24:
310 val = U8(*ptr);
311 (*ptr)++;
312 if (print)
313 DEBUG("IMM 0x%02X\n", val);
314 return val;
315 }
316 return 0;
317 case ATOM_ARG_PLL:
318 idx = U8(*ptr);
319 (*ptr)++;
320 if (print)
321 DEBUG("PLL[0x%02X]", idx);
322 val = gctx->card->pll_read(gctx->card, idx);
323 break;
324 case ATOM_ARG_MC:
325 idx = U8(*ptr);
326 (*ptr)++;
327 if (print)
328 DEBUG("MC[0x%02X]", idx);
329 val = gctx->card->mc_read(gctx->card, idx);
330 break;
331 }
332 if (saved)
333 *saved = val;
334 val &= atom_arg_mask[align];
335 val >>= atom_arg_shift[align];
336 if (print)
337 switch (align) {
338 case ATOM_SRC_DWORD:
339 DEBUG(".[31:0] -> 0x%08X\n", val);
340 break;
341 case ATOM_SRC_WORD0:
342 DEBUG(".[15:0] -> 0x%04X\n", val);
343 break;
344 case ATOM_SRC_WORD8:
345 DEBUG(".[23:8] -> 0x%04X\n", val);
346 break;
347 case ATOM_SRC_WORD16:
348 DEBUG(".[31:16] -> 0x%04X\n", val);
349 break;
350 case ATOM_SRC_BYTE0:
351 DEBUG(".[7:0] -> 0x%02X\n", val);
352 break;
353 case ATOM_SRC_BYTE8:
354 DEBUG(".[15:8] -> 0x%02X\n", val);
355 break;
356 case ATOM_SRC_BYTE16:
357 DEBUG(".[23:16] -> 0x%02X\n", val);
358 break;
359 case ATOM_SRC_BYTE24:
360 DEBUG(".[31:24] -> 0x%02X\n", val);
361 break;
362 }
363 return val;
364 }
365
366 static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
367 {
368 uint32_t align = (attr >> 3) & 7, arg = attr & 7;
369 switch (arg) {
370 case ATOM_ARG_REG:
371 case ATOM_ARG_ID:
372 (*ptr) += 2;
373 break;
374 case ATOM_ARG_PLL:
375 case ATOM_ARG_MC:
376 case ATOM_ARG_PS:
377 case ATOM_ARG_WS:
378 case ATOM_ARG_FB:
379 (*ptr)++;
380 break;
381 case ATOM_ARG_IMM:
382 switch (align) {
383 case ATOM_SRC_DWORD:
384 (*ptr) += 4;
385 return;
386 case ATOM_SRC_WORD0:
387 case ATOM_SRC_WORD8:
388 case ATOM_SRC_WORD16:
389 (*ptr) += 2;
390 return;
391 case ATOM_SRC_BYTE0:
392 case ATOM_SRC_BYTE8:
393 case ATOM_SRC_BYTE16:
394 case ATOM_SRC_BYTE24:
395 (*ptr)++;
396 return;
397 }
398 return;
399 }
400 }
401
402 static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
403 {
404 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
405 }
406
407 static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
408 {
409 uint32_t val = 0xCDCDCDCD;
410
411 switch (align) {
412 case ATOM_SRC_DWORD:
413 val = U32(*ptr);
414 (*ptr) += 4;
415 break;
416 case ATOM_SRC_WORD0:
417 case ATOM_SRC_WORD8:
418 case ATOM_SRC_WORD16:
419 val = U16(*ptr);
420 (*ptr) += 2;
421 break;
422 case ATOM_SRC_BYTE0:
423 case ATOM_SRC_BYTE8:
424 case ATOM_SRC_BYTE16:
425 case ATOM_SRC_BYTE24:
426 val = U8(*ptr);
427 (*ptr)++;
428 break;
429 }
430 return val;
431 }
432
433 static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
434 int *ptr, uint32_t *saved, int print)
435 {
436 return atom_get_src_int(ctx,
437 arg | atom_dst_to_src[(attr >> 3) &
438 7][(attr >> 6) & 3] << 3,
439 ptr, saved, print);
440 }
441
442 static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
443 {
444 atom_skip_src_int(ctx,
445 arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
446 3] << 3, ptr);
447 }
448
449 static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
450 int *ptr, uint32_t val, uint32_t saved)
451 {
452 uint32_t align =
453 atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
454 val, idx;
455 struct atom_context *gctx = ctx->ctx;
456 old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
457 val <<= atom_arg_shift[align];
458 val &= atom_arg_mask[align];
459 saved &= ~atom_arg_mask[align];
460 val |= saved;
461 switch (arg) {
462 case ATOM_ARG_REG:
463 idx = U16(*ptr);
464 (*ptr) += 2;
465 DEBUG("REG[0x%04X]", idx);
466 idx += gctx->reg_block;
467 switch (gctx->io_mode) {
468 case ATOM_IO_MM:
469 if (idx == 0)
470 gctx->card->reg_write(gctx->card, idx,
471 val << 2);
472 else
473 gctx->card->reg_write(gctx->card, idx, val);
474 break;
475 case ATOM_IO_PCI:
476 pr_info("PCI registers are not implemented\n");
477 return;
478 case ATOM_IO_SYSIO:
479 pr_info("SYSIO registers are not implemented\n");
480 return;
481 default:
482 if (!(gctx->io_mode & 0x80)) {
483 pr_info("Bad IO mode\n");
484 return;
485 }
486 if (!gctx->iio[gctx->io_mode & 0xFF]) {
487 pr_info("Undefined indirect IO write method %d\n",
488 gctx->io_mode & 0x7F);
489 return;
490 }
491 atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
492 idx, val);
493 }
494 break;
495 case ATOM_ARG_PS:
496 idx = U8(*ptr);
497 (*ptr)++;
498 DEBUG("PS[0x%02X]", idx);
499 ctx->ps[idx] = cpu_to_le32(val);
500 break;
501 case ATOM_ARG_WS:
502 idx = U8(*ptr);
503 (*ptr)++;
504 DEBUG("WS[0x%02X]", idx);
505 switch (idx) {
506 case ATOM_WS_QUOTIENT:
507 gctx->divmul[0] = val;
508 break;
509 case ATOM_WS_REMAINDER:
510 gctx->divmul[1] = val;
511 break;
512 case ATOM_WS_DATAPTR:
513 gctx->data_block = val;
514 break;
515 case ATOM_WS_SHIFT:
516 gctx->shift = val;
517 break;
518 case ATOM_WS_OR_MASK:
519 case ATOM_WS_AND_MASK:
520 break;
521 case ATOM_WS_FB_WINDOW:
522 gctx->fb_base = val;
523 break;
524 case ATOM_WS_ATTRIBUTES:
525 gctx->io_attr = val;
526 break;
527 case ATOM_WS_REGPTR:
528 gctx->reg_block = val;
529 break;
530 default:
531 ctx->ws[idx] = val;
532 }
533 break;
534 case ATOM_ARG_FB:
535 idx = U8(*ptr);
536 (*ptr)++;
537 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
538 DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
539 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
540 } else
541 gctx->scratch[(gctx->fb_base / 4) + idx] = val;
542 DEBUG("FB[0x%02X]", idx);
543 break;
544 case ATOM_ARG_PLL:
545 idx = U8(*ptr);
546 (*ptr)++;
547 DEBUG("PLL[0x%02X]", idx);
548 gctx->card->pll_write(gctx->card, idx, val);
549 break;
550 case ATOM_ARG_MC:
551 idx = U8(*ptr);
552 (*ptr)++;
553 DEBUG("MC[0x%02X]", idx);
554 gctx->card->mc_write(gctx->card, idx, val);
555 return;
556 }
557 switch (align) {
558 case ATOM_SRC_DWORD:
559 DEBUG(".[31:0] <- 0x%08X\n", old_val);
560 break;
561 case ATOM_SRC_WORD0:
562 DEBUG(".[15:0] <- 0x%04X\n", old_val);
563 break;
564 case ATOM_SRC_WORD8:
565 DEBUG(".[23:8] <- 0x%04X\n", old_val);
566 break;
567 case ATOM_SRC_WORD16:
568 DEBUG(".[31:16] <- 0x%04X\n", old_val);
569 break;
570 case ATOM_SRC_BYTE0:
571 DEBUG(".[7:0] <- 0x%02X\n", old_val);
572 break;
573 case ATOM_SRC_BYTE8:
574 DEBUG(".[15:8] <- 0x%02X\n", old_val);
575 break;
576 case ATOM_SRC_BYTE16:
577 DEBUG(".[23:16] <- 0x%02X\n", old_val);
578 break;
579 case ATOM_SRC_BYTE24:
580 DEBUG(".[31:24] <- 0x%02X\n", old_val);
581 break;
582 }
583 }
584
585 static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
586 {
587 uint8_t attr = U8((*ptr)++);
588 uint32_t dst, src, saved;
589 int dptr = *ptr;
590 SDEBUG(" dst: ");
591 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
592 SDEBUG(" src: ");
593 src = atom_get_src(ctx, attr, ptr);
594 dst += src;
595 SDEBUG(" dst: ");
596 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
597 }
598
599 static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
600 {
601 uint8_t attr = U8((*ptr)++);
602 uint32_t dst, src, saved;
603 int dptr = *ptr;
604 SDEBUG(" dst: ");
605 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
606 SDEBUG(" src: ");
607 src = atom_get_src(ctx, attr, ptr);
608 dst &= src;
609 SDEBUG(" dst: ");
610 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
611 }
612
613 static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
614 {
615 printk("ATOM BIOS beeped!\n");
616 }
617
618 static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
619 {
620 int idx = U8((*ptr)++);
621 int r = 0;
622
623 if (idx < ATOM_TABLE_NAMES_CNT)
624 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
625 else
626 SDEBUG(" table: %d\n", idx);
627 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
628 r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
629 if (r) {
630 ctx->abort = true;
631 }
632 }
633
634 static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
635 {
636 uint8_t attr = U8((*ptr)++);
637 uint32_t saved;
638 int dptr = *ptr;
639 attr &= 0x38;
640 attr |= atom_def_dst[attr >> 3] << 6;
641 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
642 SDEBUG(" dst: ");
643 atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
644 }
645
646 static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
647 {
648 uint8_t attr = U8((*ptr)++);
649 uint32_t dst, src;
650 SDEBUG(" src1: ");
651 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
652 SDEBUG(" src2: ");
653 src = atom_get_src(ctx, attr, ptr);
654 ctx->ctx->cs_equal = (dst == src);
655 ctx->ctx->cs_above = (dst > src);
656 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
657 ctx->ctx->cs_above ? "GT" : "LE");
658 }
659
660 static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
661 {
662 unsigned count = U8((*ptr)++);
663 SDEBUG(" count: %d\n", count);
664 if (arg == ATOM_UNIT_MICROSEC)
665 udelay(count);
666 else if (!drm_can_sleep())
667 mdelay(count);
668 else
669 msleep(count);
670 }
671
672 static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
673 {
674 uint8_t attr = U8((*ptr)++);
675 uint32_t dst, src;
676 SDEBUG(" src1: ");
677 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
678 SDEBUG(" src2: ");
679 src = atom_get_src(ctx, attr, ptr);
680 if (src != 0) {
681 ctx->ctx->divmul[0] = dst / src;
682 ctx->ctx->divmul[1] = dst % src;
683 } else {
684 ctx->ctx->divmul[0] = 0;
685 ctx->ctx->divmul[1] = 0;
686 }
687 }
688
689 static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
690 {
691 uint64_t val64;
692 uint8_t attr = U8((*ptr)++);
693 uint32_t dst, src;
694 SDEBUG(" src1: ");
695 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
696 SDEBUG(" src2: ");
697 src = atom_get_src(ctx, attr, ptr);
698 if (src != 0) {
699 val64 = dst;
700 val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
701 do_div(val64, src);
702 ctx->ctx->divmul[0] = lower_32_bits(val64);
703 ctx->ctx->divmul[1] = upper_32_bits(val64);
704 } else {
705 ctx->ctx->divmul[0] = 0;
706 ctx->ctx->divmul[1] = 0;
707 }
708 }
709
710 static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
711 {
712 /* functionally, a nop */
713 }
714
715 static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
716 {
717 int execute = 0, target = U16(*ptr);
718 unsigned long cjiffies;
719
720 (*ptr) += 2;
721 switch (arg) {
722 case ATOM_COND_ABOVE:
723 execute = ctx->ctx->cs_above;
724 break;
725 case ATOM_COND_ABOVEOREQUAL:
726 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
727 break;
728 case ATOM_COND_ALWAYS:
729 execute = 1;
730 break;
731 case ATOM_COND_BELOW:
732 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
733 break;
734 case ATOM_COND_BELOWOREQUAL:
735 execute = !ctx->ctx->cs_above;
736 break;
737 case ATOM_COND_EQUAL:
738 execute = ctx->ctx->cs_equal;
739 break;
740 case ATOM_COND_NOTEQUAL:
741 execute = !ctx->ctx->cs_equal;
742 break;
743 }
744 if (arg != ATOM_COND_ALWAYS)
745 SDEBUG(" taken: %s\n", execute ? "yes" : "no");
746 SDEBUG(" target: 0x%04X\n", target);
747 if (execute) {
748 if (ctx->last_jump == (ctx->start + target)) {
749 cjiffies = jiffies;
750 if (time_after(cjiffies, ctx->last_jump_jiffies)) {
751 cjiffies -= ctx->last_jump_jiffies;
752 if ((jiffies_to_msecs(cjiffies) > 5000)) {
753 DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
754 ctx->abort = true;
755 }
756 } else {
757 /* jiffies wrap around we will just wait a little longer */
758 ctx->last_jump_jiffies = jiffies;
759 }
760 } else {
761 ctx->last_jump = ctx->start + target;
762 ctx->last_jump_jiffies = jiffies;
763 }
764 *ptr = ctx->start + target;
765 }
766 }
767
768 static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
769 {
770 uint8_t attr = U8((*ptr)++);
771 uint32_t dst, mask, src, saved;
772 int dptr = *ptr;
773 SDEBUG(" dst: ");
774 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
775 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
776 SDEBUG(" mask: 0x%08x", mask);
777 SDEBUG(" src: ");
778 src = atom_get_src(ctx, attr, ptr);
779 dst &= mask;
780 dst |= src;
781 SDEBUG(" dst: ");
782 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
783 }
784
785 static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
786 {
787 uint8_t attr = U8((*ptr)++);
788 uint32_t src, saved;
789 int dptr = *ptr;
790 if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
791 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
792 else {
793 atom_skip_dst(ctx, arg, attr, ptr);
794 saved = 0xCDCDCDCD;
795 }
796 SDEBUG(" src: ");
797 src = atom_get_src(ctx, attr, ptr);
798 SDEBUG(" dst: ");
799 atom_put_dst(ctx, arg, attr, &dptr, src, saved);
800 }
801
802 static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
803 {
804 uint8_t attr = U8((*ptr)++);
805 uint32_t dst, src;
806 SDEBUG(" src1: ");
807 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
808 SDEBUG(" src2: ");
809 src = atom_get_src(ctx, attr, ptr);
810 ctx->ctx->divmul[0] = dst * src;
811 }
812
813 static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
814 {
815 uint64_t val64;
816 uint8_t attr = U8((*ptr)++);
817 uint32_t dst, src;
818 SDEBUG(" src1: ");
819 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
820 SDEBUG(" src2: ");
821 src = atom_get_src(ctx, attr, ptr);
822 val64 = (uint64_t)dst * (uint64_t)src;
823 ctx->ctx->divmul[0] = lower_32_bits(val64);
824 ctx->ctx->divmul[1] = upper_32_bits(val64);
825 }
826
827 static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
828 {
829 /* nothing */
830 }
831
832 static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
833 {
834 uint8_t attr = U8((*ptr)++);
835 uint32_t dst, src, saved;
836 int dptr = *ptr;
837 SDEBUG(" dst: ");
838 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
839 SDEBUG(" src: ");
840 src = atom_get_src(ctx, attr, ptr);
841 dst |= src;
842 SDEBUG(" dst: ");
843 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
844 }
845
846 static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
847 {
848 uint8_t val = U8((*ptr)++);
849 SDEBUG("POST card output: 0x%02X\n", val);
850 }
851
852 static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
853 {
854 pr_info("unimplemented!\n");
855 }
856
857 static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
858 {
859 pr_info("unimplemented!\n");
860 }
861
862 static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
863 {
864 pr_info("unimplemented!\n");
865 }
866
867 static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
868 {
869 int idx = U8(*ptr);
870 (*ptr)++;
871 SDEBUG(" block: %d\n", idx);
872 if (!idx)
873 ctx->ctx->data_block = 0;
874 else if (idx == 255)
875 ctx->ctx->data_block = ctx->start;
876 else
877 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
878 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
879 }
880
881 static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
882 {
883 uint8_t attr = U8((*ptr)++);
884 SDEBUG(" fb_base: ");
885 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
886 }
887
888 static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
889 {
890 int port;
891 switch (arg) {
892 case ATOM_PORT_ATI:
893 port = U16(*ptr);
894 if (port < ATOM_IO_NAMES_CNT)
895 SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
896 else
897 SDEBUG(" port: %d\n", port);
898 if (!port)
899 ctx->ctx->io_mode = ATOM_IO_MM;
900 else
901 ctx->ctx->io_mode = ATOM_IO_IIO | port;
902 (*ptr) += 2;
903 break;
904 case ATOM_PORT_PCI:
905 ctx->ctx->io_mode = ATOM_IO_PCI;
906 (*ptr)++;
907 break;
908 case ATOM_PORT_SYSIO:
909 ctx->ctx->io_mode = ATOM_IO_SYSIO;
910 (*ptr)++;
911 break;
912 }
913 }
914
915 static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
916 {
917 ctx->ctx->reg_block = U16(*ptr);
918 (*ptr) += 2;
919 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
920 }
921
922 static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
923 {
924 uint8_t attr = U8((*ptr)++), shift;
925 uint32_t saved, dst;
926 int dptr = *ptr;
927 attr &= 0x38;
928 attr |= atom_def_dst[attr >> 3] << 6;
929 SDEBUG(" dst: ");
930 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
931 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
932 SDEBUG(" shift: %d\n", shift);
933 dst <<= shift;
934 SDEBUG(" dst: ");
935 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
936 }
937
938 static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
939 {
940 uint8_t attr = U8((*ptr)++), shift;
941 uint32_t saved, dst;
942 int dptr = *ptr;
943 attr &= 0x38;
944 attr |= atom_def_dst[attr >> 3] << 6;
945 SDEBUG(" dst: ");
946 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
947 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
948 SDEBUG(" shift: %d\n", shift);
949 dst >>= shift;
950 SDEBUG(" dst: ");
951 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
952 }
953
954 static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
955 {
956 uint8_t attr = U8((*ptr)++), shift;
957 uint32_t saved, dst;
958 int dptr = *ptr;
959 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
960 SDEBUG(" dst: ");
961 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
962 /* op needs to full dst value */
963 dst = saved;
964 shift = atom_get_src(ctx, attr, ptr);
965 SDEBUG(" shift: %d\n", shift);
966 dst <<= shift;
967 dst &= atom_arg_mask[dst_align];
968 dst >>= atom_arg_shift[dst_align];
969 SDEBUG(" dst: ");
970 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
971 }
972
973 static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
974 {
975 uint8_t attr = U8((*ptr)++), shift;
976 uint32_t saved, dst;
977 int dptr = *ptr;
978 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
979 SDEBUG(" dst: ");
980 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
981 /* op needs to full dst value */
982 dst = saved;
983 shift = atom_get_src(ctx, attr, ptr);
984 SDEBUG(" shift: %d\n", shift);
985 dst >>= shift;
986 dst &= atom_arg_mask[dst_align];
987 dst >>= atom_arg_shift[dst_align];
988 SDEBUG(" dst: ");
989 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
990 }
991
992 static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
993 {
994 uint8_t attr = U8((*ptr)++);
995 uint32_t dst, src, saved;
996 int dptr = *ptr;
997 SDEBUG(" dst: ");
998 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
999 SDEBUG(" src: ");
1000 src = atom_get_src(ctx, attr, ptr);
1001 dst -= src;
1002 SDEBUG(" dst: ");
1003 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1004 }
1005
1006 static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
1007 {
1008 uint8_t attr = U8((*ptr)++);
1009 uint32_t src, val, target;
1010 SDEBUG(" switch: ");
1011 src = atom_get_src(ctx, attr, ptr);
1012 while (U16(*ptr) != ATOM_CASE_END)
1013 if (U8(*ptr) == ATOM_CASE_MAGIC) {
1014 (*ptr)++;
1015 SDEBUG(" case: ");
1016 val =
1017 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
1018 ptr);
1019 target = U16(*ptr);
1020 if (val == src) {
1021 SDEBUG(" target: %04X\n", target);
1022 *ptr = ctx->start + target;
1023 return;
1024 }
1025 (*ptr) += 2;
1026 } else {
1027 pr_info("Bad case\n");
1028 return;
1029 }
1030 (*ptr) += 2;
1031 }
1032
1033 static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
1034 {
1035 uint8_t attr = U8((*ptr)++);
1036 uint32_t dst, src;
1037 SDEBUG(" src1: ");
1038 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
1039 SDEBUG(" src2: ");
1040 src = atom_get_src(ctx, attr, ptr);
1041 ctx->ctx->cs_equal = ((dst & src) == 0);
1042 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
1043 }
1044
1045 static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1046 {
1047 uint8_t attr = U8((*ptr)++);
1048 uint32_t dst, src, saved;
1049 int dptr = *ptr;
1050 SDEBUG(" dst: ");
1051 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1052 SDEBUG(" src: ");
1053 src = atom_get_src(ctx, attr, ptr);
1054 dst ^= src;
1055 SDEBUG(" dst: ");
1056 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1057 }
1058
1059 static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1060 {
1061 uint8_t val = U8((*ptr)++);
1062 SDEBUG("DEBUG output: 0x%02X\n", val);
1063 }
1064
1065 static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
1066 {
1067 uint16_t val = U16(*ptr);
1068 (*ptr) += val + 2;
1069 SDEBUG("PROCESSDS output: 0x%02X\n", val);
1070 }
1071
1072 static struct {
1073 void (*func) (atom_exec_context *, int *, int);
1074 int arg;
1075 } opcode_table[ATOM_OP_CNT] = {
1076 {
1077 NULL, 0}, {
1078 atom_op_move, ATOM_ARG_REG}, {
1079 atom_op_move, ATOM_ARG_PS}, {
1080 atom_op_move, ATOM_ARG_WS}, {
1081 atom_op_move, ATOM_ARG_FB}, {
1082 atom_op_move, ATOM_ARG_PLL}, {
1083 atom_op_move, ATOM_ARG_MC}, {
1084 atom_op_and, ATOM_ARG_REG}, {
1085 atom_op_and, ATOM_ARG_PS}, {
1086 atom_op_and, ATOM_ARG_WS}, {
1087 atom_op_and, ATOM_ARG_FB}, {
1088 atom_op_and, ATOM_ARG_PLL}, {
1089 atom_op_and, ATOM_ARG_MC}, {
1090 atom_op_or, ATOM_ARG_REG}, {
1091 atom_op_or, ATOM_ARG_PS}, {
1092 atom_op_or, ATOM_ARG_WS}, {
1093 atom_op_or, ATOM_ARG_FB}, {
1094 atom_op_or, ATOM_ARG_PLL}, {
1095 atom_op_or, ATOM_ARG_MC}, {
1096 atom_op_shift_left, ATOM_ARG_REG}, {
1097 atom_op_shift_left, ATOM_ARG_PS}, {
1098 atom_op_shift_left, ATOM_ARG_WS}, {
1099 atom_op_shift_left, ATOM_ARG_FB}, {
1100 atom_op_shift_left, ATOM_ARG_PLL}, {
1101 atom_op_shift_left, ATOM_ARG_MC}, {
1102 atom_op_shift_right, ATOM_ARG_REG}, {
1103 atom_op_shift_right, ATOM_ARG_PS}, {
1104 atom_op_shift_right, ATOM_ARG_WS}, {
1105 atom_op_shift_right, ATOM_ARG_FB}, {
1106 atom_op_shift_right, ATOM_ARG_PLL}, {
1107 atom_op_shift_right, ATOM_ARG_MC}, {
1108 atom_op_mul, ATOM_ARG_REG}, {
1109 atom_op_mul, ATOM_ARG_PS}, {
1110 atom_op_mul, ATOM_ARG_WS}, {
1111 atom_op_mul, ATOM_ARG_FB}, {
1112 atom_op_mul, ATOM_ARG_PLL}, {
1113 atom_op_mul, ATOM_ARG_MC}, {
1114 atom_op_div, ATOM_ARG_REG}, {
1115 atom_op_div, ATOM_ARG_PS}, {
1116 atom_op_div, ATOM_ARG_WS}, {
1117 atom_op_div, ATOM_ARG_FB}, {
1118 atom_op_div, ATOM_ARG_PLL}, {
1119 atom_op_div, ATOM_ARG_MC}, {
1120 atom_op_add, ATOM_ARG_REG}, {
1121 atom_op_add, ATOM_ARG_PS}, {
1122 atom_op_add, ATOM_ARG_WS}, {
1123 atom_op_add, ATOM_ARG_FB}, {
1124 atom_op_add, ATOM_ARG_PLL}, {
1125 atom_op_add, ATOM_ARG_MC}, {
1126 atom_op_sub, ATOM_ARG_REG}, {
1127 atom_op_sub, ATOM_ARG_PS}, {
1128 atom_op_sub, ATOM_ARG_WS}, {
1129 atom_op_sub, ATOM_ARG_FB}, {
1130 atom_op_sub, ATOM_ARG_PLL}, {
1131 atom_op_sub, ATOM_ARG_MC}, {
1132 atom_op_setport, ATOM_PORT_ATI}, {
1133 atom_op_setport, ATOM_PORT_PCI}, {
1134 atom_op_setport, ATOM_PORT_SYSIO}, {
1135 atom_op_setregblock, 0}, {
1136 atom_op_setfbbase, 0}, {
1137 atom_op_compare, ATOM_ARG_REG}, {
1138 atom_op_compare, ATOM_ARG_PS}, {
1139 atom_op_compare, ATOM_ARG_WS}, {
1140 atom_op_compare, ATOM_ARG_FB}, {
1141 atom_op_compare, ATOM_ARG_PLL}, {
1142 atom_op_compare, ATOM_ARG_MC}, {
1143 atom_op_switch, 0}, {
1144 atom_op_jump, ATOM_COND_ALWAYS}, {
1145 atom_op_jump, ATOM_COND_EQUAL}, {
1146 atom_op_jump, ATOM_COND_BELOW}, {
1147 atom_op_jump, ATOM_COND_ABOVE}, {
1148 atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
1149 atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
1150 atom_op_jump, ATOM_COND_NOTEQUAL}, {
1151 atom_op_test, ATOM_ARG_REG}, {
1152 atom_op_test, ATOM_ARG_PS}, {
1153 atom_op_test, ATOM_ARG_WS}, {
1154 atom_op_test, ATOM_ARG_FB}, {
1155 atom_op_test, ATOM_ARG_PLL}, {
1156 atom_op_test, ATOM_ARG_MC}, {
1157 atom_op_delay, ATOM_UNIT_MILLISEC}, {
1158 atom_op_delay, ATOM_UNIT_MICROSEC}, {
1159 atom_op_calltable, 0}, {
1160 atom_op_repeat, 0}, {
1161 atom_op_clear, ATOM_ARG_REG}, {
1162 atom_op_clear, ATOM_ARG_PS}, {
1163 atom_op_clear, ATOM_ARG_WS}, {
1164 atom_op_clear, ATOM_ARG_FB}, {
1165 atom_op_clear, ATOM_ARG_PLL}, {
1166 atom_op_clear, ATOM_ARG_MC}, {
1167 atom_op_nop, 0}, {
1168 atom_op_eot, 0}, {
1169 atom_op_mask, ATOM_ARG_REG}, {
1170 atom_op_mask, ATOM_ARG_PS}, {
1171 atom_op_mask, ATOM_ARG_WS}, {
1172 atom_op_mask, ATOM_ARG_FB}, {
1173 atom_op_mask, ATOM_ARG_PLL}, {
1174 atom_op_mask, ATOM_ARG_MC}, {
1175 atom_op_postcard, 0}, {
1176 atom_op_beep, 0}, {
1177 atom_op_savereg, 0}, {
1178 atom_op_restorereg, 0}, {
1179 atom_op_setdatablock, 0}, {
1180 atom_op_xor, ATOM_ARG_REG}, {
1181 atom_op_xor, ATOM_ARG_PS}, {
1182 atom_op_xor, ATOM_ARG_WS}, {
1183 atom_op_xor, ATOM_ARG_FB}, {
1184 atom_op_xor, ATOM_ARG_PLL}, {
1185 atom_op_xor, ATOM_ARG_MC}, {
1186 atom_op_shl, ATOM_ARG_REG}, {
1187 atom_op_shl, ATOM_ARG_PS}, {
1188 atom_op_shl, ATOM_ARG_WS}, {
1189 atom_op_shl, ATOM_ARG_FB}, {
1190 atom_op_shl, ATOM_ARG_PLL}, {
1191 atom_op_shl, ATOM_ARG_MC}, {
1192 atom_op_shr, ATOM_ARG_REG}, {
1193 atom_op_shr, ATOM_ARG_PS}, {
1194 atom_op_shr, ATOM_ARG_WS}, {
1195 atom_op_shr, ATOM_ARG_FB}, {
1196 atom_op_shr, ATOM_ARG_PLL}, {
1197 atom_op_shr, ATOM_ARG_MC}, {
1198 atom_op_debug, 0}, {
1199 atom_op_processds, 0}, {
1200 atom_op_mul32, ATOM_ARG_PS}, {
1201 atom_op_mul32, ATOM_ARG_WS}, {
1202 atom_op_div32, ATOM_ARG_PS}, {
1203 atom_op_div32, ATOM_ARG_WS},
1204 };
1205
1206 static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
1207 {
1208 int base = CU16(ctx->cmd_table + 4 + 2 * index);
1209 int len, ws, ps, ptr;
1210 unsigned char op;
1211 atom_exec_context ectx;
1212 int ret = 0;
1213
1214 if (!base)
1215 return -EINVAL;
1216
1217 len = CU16(base + ATOM_CT_SIZE_PTR);
1218 ws = CU8(base + ATOM_CT_WS_PTR);
1219 ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
1220 ptr = base + ATOM_CT_CODE_PTR;
1221
1222 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1223
1224 ectx.ctx = ctx;
1225 ectx.ps_shift = ps / 4;
1226 ectx.start = base;
1227 ectx.ps = params;
1228 ectx.abort = false;
1229 ectx.last_jump = 0;
1230 if (ws)
1231 ectx.ws = kcalloc(4, ws, GFP_KERNEL);
1232 else
1233 ectx.ws = NULL;
1234
1235 debug_depth++;
1236 while (1) {
1237 op = CU8(ptr++);
1238 if (op < ATOM_OP_NAMES_CNT)
1239 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1240 else
1241 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1242 if (ectx.abort) {
1243 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1244 base, len, ws, ps, ptr - 1);
1245 ret = -EINVAL;
1246 goto free;
1247 }
1248
1249 if (op < ATOM_OP_CNT && op > 0)
1250 opcode_table[op].func(&ectx, &ptr,
1251 opcode_table[op].arg);
1252 else
1253 break;
1254
1255 if (op == ATOM_OP_EOT)
1256 break;
1257 }
1258 debug_depth--;
1259 SDEBUG("<<\n");
1260
1261 free:
1262 if (ws)
1263 kfree(ectx.ws);
1264 return ret;
1265 }
1266
1267 int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1268 {
1269 int r;
1270
1271 mutex_lock(&ctx->mutex);
1272 /* reset data block */
1273 ctx->data_block = 0;
1274 /* reset reg block */
1275 ctx->reg_block = 0;
1276 /* reset fb window */
1277 ctx->fb_base = 0;
1278 /* reset io mode */
1279 ctx->io_mode = ATOM_IO_MM;
1280 /* reset divmul */
1281 ctx->divmul[0] = 0;
1282 ctx->divmul[1] = 0;
1283 r = amdgpu_atom_execute_table_locked(ctx, index, params);
1284 mutex_unlock(&ctx->mutex);
1285 return r;
1286 }
1287
1288 static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1289
1290 static void atom_index_iio(struct atom_context *ctx, int base)
1291 {
1292 ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1293 if (!ctx->iio)
1294 return;
1295 while (CU8(base) == ATOM_IIO_START) {
1296 ctx->iio[CU8(base + 1)] = base + 2;
1297 base += 2;
1298 while (CU8(base) != ATOM_IIO_END)
1299 base += atom_iio_len[CU8(base)];
1300 base += 3;
1301 }
1302 }
1303
1304 struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
1305 {
1306 int base;
1307 struct atom_context *ctx =
1308 kzalloc(sizeof(struct atom_context), GFP_KERNEL);
1309 char *str;
1310 u16 idx;
1311
1312 if (!ctx)
1313 return NULL;
1314
1315 ctx->card = card;
1316 ctx->bios = bios;
1317
1318 if (CU16(0) != ATOM_BIOS_MAGIC) {
1319 pr_info("Invalid BIOS magic\n");
1320 kfree(ctx);
1321 return NULL;
1322 }
1323 if (strncmp
1324 (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
1325 strlen(ATOM_ATI_MAGIC))) {
1326 pr_info("Invalid ATI magic\n");
1327 kfree(ctx);
1328 return NULL;
1329 }
1330
1331 base = CU16(ATOM_ROM_TABLE_PTR);
1332 if (strncmp
1333 (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
1334 strlen(ATOM_ROM_MAGIC))) {
1335 pr_info("Invalid ATOM magic\n");
1336 kfree(ctx);
1337 return NULL;
1338 }
1339
1340 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1341 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1342 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1343 if (!ctx->iio) {
1344 amdgpu_atom_destroy(ctx);
1345 return NULL;
1346 }
1347
1348 idx = CU16(ATOM_ROM_PART_NUMBER_PTR);
1349 if (idx == 0)
1350 idx = 0x80;
1351
1352 str = CSTR(idx);
1353 if (*str != '\0') {
1354 pr_info("ATOM BIOS: %s\n", str);
1355 strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
1356 }
1357
1358
1359 return ctx;
1360 }
1361
1362 int amdgpu_atom_asic_init(struct atom_context *ctx)
1363 {
1364 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1365 uint32_t ps[16];
1366 int ret;
1367
1368 memset(ps, 0, 64);
1369
1370 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1371 ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
1372 if (!ps[0] || !ps[1])
1373 return 1;
1374
1375 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1376 return 1;
1377 ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1378 if (ret)
1379 return ret;
1380
1381 memset(ps, 0, 64);
1382
1383 return ret;
1384 }
1385
1386 void amdgpu_atom_destroy(struct atom_context *ctx)
1387 {
1388 kfree(ctx->iio);
1389 kfree(ctx);
1390 }
1391
1392 bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
1393 uint16_t * size, uint8_t * frev, uint8_t * crev,
1394 uint16_t * data_start)
1395 {
1396 int offset = index * 2 + 4;
1397 int idx = CU16(ctx->data_table + offset);
1398 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1399
1400 if (!mdt[index])
1401 return false;
1402
1403 if (size)
1404 *size = CU16(idx);
1405 if (frev)
1406 *frev = CU8(idx + 2);
1407 if (crev)
1408 *crev = CU8(idx + 3);
1409 *data_start = idx;
1410 return true;
1411 }
1412
1413 bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
1414 uint8_t * crev)
1415 {
1416 int offset = index * 2 + 4;
1417 int idx = CU16(ctx->cmd_table + offset);
1418 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1419
1420 if (!mct[index])
1421 return false;
1422
1423 if (frev)
1424 *frev = CU8(idx + 2);
1425 if (crev)
1426 *crev = CU8(idx + 3);
1427 return true;
1428 }
1429
1430