amdgpu_atom.c revision 1.2 1 /* $NetBSD: amdgpu_atom.c,v 1.2 2020/02/14 04:30:04 riastradh Exp $ */
2
3 /*
4 * Copyright 2008 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Author: Stanislaw Skowronek
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: amdgpu_atom.c,v 1.2 2020/02/14 04:30:04 riastradh Exp $");
29
30 #include <linux/module.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <asm/unaligned.h>
34 #include <asm/byteorder.h>
35
36 #define ATOM_DEBUG
37
38 #include "atom.h"
39 #include "atom-names.h"
40 #include "atom-bits.h"
41 #include "amdgpu.h"
42
43 #define ATOM_COND_ABOVE 0
44 #define ATOM_COND_ABOVEOREQUAL 1
45 #define ATOM_COND_ALWAYS 2
46 #define ATOM_COND_BELOW 3
47 #define ATOM_COND_BELOWOREQUAL 4
48 #define ATOM_COND_EQUAL 5
49 #define ATOM_COND_NOTEQUAL 6
50
51 #define ATOM_PORT_ATI 0
52 #define ATOM_PORT_PCI 1
53 #define ATOM_PORT_SYSIO 2
54
55 #define ATOM_UNIT_MICROSEC 0
56 #define ATOM_UNIT_MILLISEC 1
57
58 #define PLL_INDEX 2
59 #define PLL_DATA 3
60
61 typedef struct {
62 struct atom_context *ctx;
63 uint32_t *ps, *ws;
64 int ps_shift;
65 uint16_t start;
66 unsigned last_jump;
67 unsigned long last_jump_jiffies;
68 bool abort;
69 } atom_exec_context;
70
71 int amdgpu_atom_debug = 0;
72 static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
73 int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
74
75 static uint32_t atom_arg_mask[8] =
76 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
77 0xFF000000 };
78 static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
79
80 static int atom_dst_to_src[8][4] = {
81 /* translate destination alignment field to the source alignment encoding */
82 {0, 0, 0, 0},
83 {1, 2, 3, 0},
84 {1, 2, 3, 0},
85 {1, 2, 3, 0},
86 {4, 5, 6, 7},
87 {4, 5, 6, 7},
88 {4, 5, 6, 7},
89 {4, 5, 6, 7},
90 };
91 static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
92
93 static int debug_depth = 0;
94 #undef DEBUG /* XXX NetBSD kludge */
95 #ifdef ATOM_DEBUG
96 static void debug_print_spaces(int n)
97 {
98 while (n--)
99 printk(" ");
100 }
101
102 #define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
103 #define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
104 #else
105 #define DEBUG(...) do { } while (0)
106 #define SDEBUG(...) do { } while (0)
107 #endif
108
109 static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
110 uint32_t index, uint32_t data)
111 {
112 uint32_t temp = 0xCDCDCDCD;
113
114 while (1)
115 switch (CU8(base)) {
116 case ATOM_IIO_NOP:
117 base++;
118 break;
119 case ATOM_IIO_READ:
120 temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
121 base += 3;
122 break;
123 case ATOM_IIO_WRITE:
124 ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
125 base += 3;
126 break;
127 case ATOM_IIO_CLEAR:
128 temp &=
129 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
130 CU8(base + 2));
131 base += 3;
132 break;
133 case ATOM_IIO_SET:
134 temp |=
135 (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
136 2);
137 base += 3;
138 break;
139 case ATOM_IIO_MOVE_INDEX:
140 temp &=
141 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
142 CU8(base + 3));
143 temp |=
144 ((index >> CU8(base + 2)) &
145 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
146 3);
147 base += 4;
148 break;
149 case ATOM_IIO_MOVE_DATA:
150 temp &=
151 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
152 CU8(base + 3));
153 temp |=
154 ((data >> CU8(base + 2)) &
155 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
156 3);
157 base += 4;
158 break;
159 case ATOM_IIO_MOVE_ATTR:
160 temp &=
161 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
162 CU8(base + 3));
163 temp |=
164 ((ctx->
165 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
166 CU8
167 (base
168 +
169 1))))
170 << CU8(base + 3);
171 base += 4;
172 break;
173 case ATOM_IIO_END:
174 return temp;
175 default:
176 printk(KERN_INFO "Unknown IIO opcode.\n");
177 return 0;
178 }
179 }
180
181 static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
182 int *ptr, uint32_t *saved, int print)
183 {
184 uint32_t idx, val = 0xCDCDCDCD, align, arg;
185 struct atom_context *gctx = ctx->ctx;
186 arg = attr & 7;
187 align = (attr >> 3) & 7;
188 switch (arg) {
189 case ATOM_ARG_REG:
190 idx = U16(*ptr);
191 (*ptr) += 2;
192 if (print)
193 DEBUG("REG[0x%04X]", idx);
194 idx += gctx->reg_block;
195 switch (gctx->io_mode) {
196 case ATOM_IO_MM:
197 val = gctx->card->reg_read(gctx->card, idx);
198 break;
199 case ATOM_IO_PCI:
200 printk(KERN_INFO
201 "PCI registers are not implemented.\n");
202 return 0;
203 case ATOM_IO_SYSIO:
204 printk(KERN_INFO
205 "SYSIO registers are not implemented.\n");
206 return 0;
207 default:
208 if (!(gctx->io_mode & 0x80)) {
209 printk(KERN_INFO "Bad IO mode.\n");
210 return 0;
211 }
212 if (!gctx->iio[gctx->io_mode & 0x7F]) {
213 printk(KERN_INFO
214 "Undefined indirect IO read method %d.\n",
215 gctx->io_mode & 0x7F);
216 return 0;
217 }
218 val =
219 atom_iio_execute(gctx,
220 gctx->iio[gctx->io_mode & 0x7F],
221 idx, 0);
222 }
223 break;
224 case ATOM_ARG_PS:
225 idx = U8(*ptr);
226 (*ptr)++;
227 /* get_unaligned_le32 avoids unaligned accesses from atombios
228 * tables, noticed on a DEC Alpha. */
229 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
230 if (print)
231 DEBUG("PS[0x%02X,0x%04X]", idx, val);
232 break;
233 case ATOM_ARG_WS:
234 idx = U8(*ptr);
235 (*ptr)++;
236 if (print)
237 DEBUG("WS[0x%02X]", idx);
238 switch (idx) {
239 case ATOM_WS_QUOTIENT:
240 val = gctx->divmul[0];
241 break;
242 case ATOM_WS_REMAINDER:
243 val = gctx->divmul[1];
244 break;
245 case ATOM_WS_DATAPTR:
246 val = gctx->data_block;
247 break;
248 case ATOM_WS_SHIFT:
249 val = gctx->shift;
250 break;
251 case ATOM_WS_OR_MASK:
252 val = 1 << gctx->shift;
253 break;
254 case ATOM_WS_AND_MASK:
255 val = ~(1 << gctx->shift);
256 break;
257 case ATOM_WS_FB_WINDOW:
258 val = gctx->fb_base;
259 break;
260 case ATOM_WS_ATTRIBUTES:
261 val = gctx->io_attr;
262 break;
263 case ATOM_WS_REGPTR:
264 val = gctx->reg_block;
265 break;
266 default:
267 val = ctx->ws[idx];
268 }
269 break;
270 case ATOM_ARG_ID:
271 idx = U16(*ptr);
272 (*ptr) += 2;
273 if (print) {
274 if (gctx->data_block)
275 DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
276 else
277 DEBUG("ID[0x%04X]", idx);
278 }
279 val = U32(idx + gctx->data_block);
280 break;
281 case ATOM_ARG_FB:
282 idx = U8(*ptr);
283 (*ptr)++;
284 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
285 DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
286 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
287 val = 0;
288 } else
289 val = gctx->scratch[(gctx->fb_base / 4) + idx];
290 if (print)
291 DEBUG("FB[0x%02X]", idx);
292 break;
293 case ATOM_ARG_IMM:
294 switch (align) {
295 case ATOM_SRC_DWORD:
296 val = U32(*ptr);
297 (*ptr) += 4;
298 if (print)
299 DEBUG("IMM 0x%08X\n", val);
300 return val;
301 case ATOM_SRC_WORD0:
302 case ATOM_SRC_WORD8:
303 case ATOM_SRC_WORD16:
304 val = U16(*ptr);
305 (*ptr) += 2;
306 if (print)
307 DEBUG("IMM 0x%04X\n", val);
308 return val;
309 case ATOM_SRC_BYTE0:
310 case ATOM_SRC_BYTE8:
311 case ATOM_SRC_BYTE16:
312 case ATOM_SRC_BYTE24:
313 val = U8(*ptr);
314 (*ptr)++;
315 if (print)
316 DEBUG("IMM 0x%02X\n", val);
317 return val;
318 }
319 return 0;
320 case ATOM_ARG_PLL:
321 idx = U8(*ptr);
322 (*ptr)++;
323 if (print)
324 DEBUG("PLL[0x%02X]", idx);
325 val = gctx->card->pll_read(gctx->card, idx);
326 break;
327 case ATOM_ARG_MC:
328 idx = U8(*ptr);
329 (*ptr)++;
330 if (print)
331 DEBUG("MC[0x%02X]", idx);
332 val = gctx->card->mc_read(gctx->card, idx);
333 break;
334 }
335 if (saved)
336 *saved = val;
337 val &= atom_arg_mask[align];
338 val >>= atom_arg_shift[align];
339 if (print)
340 switch (align) {
341 case ATOM_SRC_DWORD:
342 DEBUG(".[31:0] -> 0x%08X\n", val);
343 break;
344 case ATOM_SRC_WORD0:
345 DEBUG(".[15:0] -> 0x%04X\n", val);
346 break;
347 case ATOM_SRC_WORD8:
348 DEBUG(".[23:8] -> 0x%04X\n", val);
349 break;
350 case ATOM_SRC_WORD16:
351 DEBUG(".[31:16] -> 0x%04X\n", val);
352 break;
353 case ATOM_SRC_BYTE0:
354 DEBUG(".[7:0] -> 0x%02X\n", val);
355 break;
356 case ATOM_SRC_BYTE8:
357 DEBUG(".[15:8] -> 0x%02X\n", val);
358 break;
359 case ATOM_SRC_BYTE16:
360 DEBUG(".[23:16] -> 0x%02X\n", val);
361 break;
362 case ATOM_SRC_BYTE24:
363 DEBUG(".[31:24] -> 0x%02X\n", val);
364 break;
365 }
366 return val;
367 }
368
369 static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
370 {
371 uint32_t align = (attr >> 3) & 7, arg = attr & 7;
372 switch (arg) {
373 case ATOM_ARG_REG:
374 case ATOM_ARG_ID:
375 (*ptr) += 2;
376 break;
377 case ATOM_ARG_PLL:
378 case ATOM_ARG_MC:
379 case ATOM_ARG_PS:
380 case ATOM_ARG_WS:
381 case ATOM_ARG_FB:
382 (*ptr)++;
383 break;
384 case ATOM_ARG_IMM:
385 switch (align) {
386 case ATOM_SRC_DWORD:
387 (*ptr) += 4;
388 return;
389 case ATOM_SRC_WORD0:
390 case ATOM_SRC_WORD8:
391 case ATOM_SRC_WORD16:
392 (*ptr) += 2;
393 return;
394 case ATOM_SRC_BYTE0:
395 case ATOM_SRC_BYTE8:
396 case ATOM_SRC_BYTE16:
397 case ATOM_SRC_BYTE24:
398 (*ptr)++;
399 return;
400 }
401 return;
402 }
403 }
404
405 static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
406 {
407 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
408 }
409
410 static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
411 {
412 uint32_t val = 0xCDCDCDCD;
413
414 switch (align) {
415 case ATOM_SRC_DWORD:
416 val = U32(*ptr);
417 (*ptr) += 4;
418 break;
419 case ATOM_SRC_WORD0:
420 case ATOM_SRC_WORD8:
421 case ATOM_SRC_WORD16:
422 val = U16(*ptr);
423 (*ptr) += 2;
424 break;
425 case ATOM_SRC_BYTE0:
426 case ATOM_SRC_BYTE8:
427 case ATOM_SRC_BYTE16:
428 case ATOM_SRC_BYTE24:
429 val = U8(*ptr);
430 (*ptr)++;
431 break;
432 }
433 return val;
434 }
435
436 static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
437 int *ptr, uint32_t *saved, int print)
438 {
439 return atom_get_src_int(ctx,
440 arg | atom_dst_to_src[(attr >> 3) &
441 7][(attr >> 6) & 3] << 3,
442 ptr, saved, print);
443 }
444
445 static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
446 {
447 atom_skip_src_int(ctx,
448 arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
449 3] << 3, ptr);
450 }
451
452 static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
453 int *ptr, uint32_t val, uint32_t saved)
454 {
455 uint32_t align =
456 atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
457 val, idx;
458 struct atom_context *gctx = ctx->ctx;
459 old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
460 val <<= atom_arg_shift[align];
461 val &= atom_arg_mask[align];
462 saved &= ~atom_arg_mask[align];
463 val |= saved;
464 switch (arg) {
465 case ATOM_ARG_REG:
466 idx = U16(*ptr);
467 (*ptr) += 2;
468 DEBUG("REG[0x%04X]", idx);
469 idx += gctx->reg_block;
470 switch (gctx->io_mode) {
471 case ATOM_IO_MM:
472 if (idx == 0)
473 gctx->card->reg_write(gctx->card, idx,
474 val << 2);
475 else
476 gctx->card->reg_write(gctx->card, idx, val);
477 break;
478 case ATOM_IO_PCI:
479 printk(KERN_INFO
480 "PCI registers are not implemented.\n");
481 return;
482 case ATOM_IO_SYSIO:
483 printk(KERN_INFO
484 "SYSIO registers are not implemented.\n");
485 return;
486 default:
487 if (!(gctx->io_mode & 0x80)) {
488 printk(KERN_INFO "Bad IO mode.\n");
489 return;
490 }
491 if (!gctx->iio[gctx->io_mode & 0xFF]) {
492 printk(KERN_INFO
493 "Undefined indirect IO write method %d.\n",
494 gctx->io_mode & 0x7F);
495 return;
496 }
497 atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
498 idx, val);
499 }
500 break;
501 case ATOM_ARG_PS:
502 idx = U8(*ptr);
503 (*ptr)++;
504 DEBUG("PS[0x%02X]", idx);
505 ctx->ps[idx] = cpu_to_le32(val);
506 break;
507 case ATOM_ARG_WS:
508 idx = U8(*ptr);
509 (*ptr)++;
510 DEBUG("WS[0x%02X]", idx);
511 switch (idx) {
512 case ATOM_WS_QUOTIENT:
513 gctx->divmul[0] = val;
514 break;
515 case ATOM_WS_REMAINDER:
516 gctx->divmul[1] = val;
517 break;
518 case ATOM_WS_DATAPTR:
519 gctx->data_block = val;
520 break;
521 case ATOM_WS_SHIFT:
522 gctx->shift = val;
523 break;
524 case ATOM_WS_OR_MASK:
525 case ATOM_WS_AND_MASK:
526 break;
527 case ATOM_WS_FB_WINDOW:
528 gctx->fb_base = val;
529 break;
530 case ATOM_WS_ATTRIBUTES:
531 gctx->io_attr = val;
532 break;
533 case ATOM_WS_REGPTR:
534 gctx->reg_block = val;
535 break;
536 default:
537 ctx->ws[idx] = val;
538 }
539 break;
540 case ATOM_ARG_FB:
541 idx = U8(*ptr);
542 (*ptr)++;
543 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
544 DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
545 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
546 } else
547 gctx->scratch[(gctx->fb_base / 4) + idx] = val;
548 DEBUG("FB[0x%02X]", idx);
549 break;
550 case ATOM_ARG_PLL:
551 idx = U8(*ptr);
552 (*ptr)++;
553 DEBUG("PLL[0x%02X]", idx);
554 gctx->card->pll_write(gctx->card, idx, val);
555 break;
556 case ATOM_ARG_MC:
557 idx = U8(*ptr);
558 (*ptr)++;
559 DEBUG("MC[0x%02X]", idx);
560 gctx->card->mc_write(gctx->card, idx, val);
561 return;
562 }
563 switch (align) {
564 case ATOM_SRC_DWORD:
565 DEBUG(".[31:0] <- 0x%08X\n", old_val);
566 break;
567 case ATOM_SRC_WORD0:
568 DEBUG(".[15:0] <- 0x%04X\n", old_val);
569 break;
570 case ATOM_SRC_WORD8:
571 DEBUG(".[23:8] <- 0x%04X\n", old_val);
572 break;
573 case ATOM_SRC_WORD16:
574 DEBUG(".[31:16] <- 0x%04X\n", old_val);
575 break;
576 case ATOM_SRC_BYTE0:
577 DEBUG(".[7:0] <- 0x%02X\n", old_val);
578 break;
579 case ATOM_SRC_BYTE8:
580 DEBUG(".[15:8] <- 0x%02X\n", old_val);
581 break;
582 case ATOM_SRC_BYTE16:
583 DEBUG(".[23:16] <- 0x%02X\n", old_val);
584 break;
585 case ATOM_SRC_BYTE24:
586 DEBUG(".[31:24] <- 0x%02X\n", old_val);
587 break;
588 }
589 }
590
591 static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
592 {
593 uint8_t attr = U8((*ptr)++);
594 uint32_t dst, src, saved;
595 int dptr = *ptr;
596 SDEBUG(" dst: ");
597 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
598 SDEBUG(" src: ");
599 src = atom_get_src(ctx, attr, ptr);
600 dst += src;
601 SDEBUG(" dst: ");
602 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
603 }
604
605 static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
606 {
607 uint8_t attr = U8((*ptr)++);
608 uint32_t dst, src, saved;
609 int dptr = *ptr;
610 SDEBUG(" dst: ");
611 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
612 SDEBUG(" src: ");
613 src = atom_get_src(ctx, attr, ptr);
614 dst &= src;
615 SDEBUG(" dst: ");
616 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
617 }
618
619 static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
620 {
621 printk("ATOM BIOS beeped!\n");
622 }
623
624 static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
625 {
626 int idx = U8((*ptr)++);
627 int r = 0;
628
629 if (idx < ATOM_TABLE_NAMES_CNT)
630 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
631 else
632 SDEBUG(" table: %d\n", idx);
633 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
634 r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
635 if (r) {
636 ctx->abort = true;
637 }
638 }
639
640 static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
641 {
642 uint8_t attr = U8((*ptr)++);
643 uint32_t saved;
644 int dptr = *ptr;
645 attr &= 0x38;
646 attr |= atom_def_dst[attr >> 3] << 6;
647 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
648 SDEBUG(" dst: ");
649 atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
650 }
651
652 static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
653 {
654 uint8_t attr = U8((*ptr)++);
655 uint32_t dst, src;
656 SDEBUG(" src1: ");
657 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
658 SDEBUG(" src2: ");
659 src = atom_get_src(ctx, attr, ptr);
660 ctx->ctx->cs_equal = (dst == src);
661 ctx->ctx->cs_above = (dst > src);
662 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
663 ctx->ctx->cs_above ? "GT" : "LE");
664 }
665
666 static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
667 {
668 unsigned count = U8((*ptr)++);
669 SDEBUG(" count: %d\n", count);
670 if (arg == ATOM_UNIT_MICROSEC)
671 udelay(count);
672 else if (!drm_can_sleep())
673 mdelay(count);
674 else
675 msleep(count);
676 }
677
678 static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
679 {
680 uint8_t attr = U8((*ptr)++);
681 uint32_t dst, src;
682 SDEBUG(" src1: ");
683 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
684 SDEBUG(" src2: ");
685 src = atom_get_src(ctx, attr, ptr);
686 if (src != 0) {
687 ctx->ctx->divmul[0] = dst / src;
688 ctx->ctx->divmul[1] = dst % src;
689 } else {
690 ctx->ctx->divmul[0] = 0;
691 ctx->ctx->divmul[1] = 0;
692 }
693 }
694
695 static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
696 {
697 uint64_t val64;
698 uint8_t attr = U8((*ptr)++);
699 uint32_t dst, src;
700 SDEBUG(" src1: ");
701 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
702 SDEBUG(" src2: ");
703 src = atom_get_src(ctx, attr, ptr);
704 if (src != 0) {
705 val64 = dst;
706 val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
707 do_div(val64, src);
708 ctx->ctx->divmul[0] = lower_32_bits(val64);
709 ctx->ctx->divmul[1] = upper_32_bits(val64);
710 } else {
711 ctx->ctx->divmul[0] = 0;
712 ctx->ctx->divmul[1] = 0;
713 }
714 }
715
716 static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
717 {
718 /* functionally, a nop */
719 }
720
721 static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
722 {
723 int execute = 0, target = U16(*ptr);
724 unsigned long cjiffies;
725
726 (*ptr) += 2;
727 switch (arg) {
728 case ATOM_COND_ABOVE:
729 execute = ctx->ctx->cs_above;
730 break;
731 case ATOM_COND_ABOVEOREQUAL:
732 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
733 break;
734 case ATOM_COND_ALWAYS:
735 execute = 1;
736 break;
737 case ATOM_COND_BELOW:
738 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
739 break;
740 case ATOM_COND_BELOWOREQUAL:
741 execute = !ctx->ctx->cs_above;
742 break;
743 case ATOM_COND_EQUAL:
744 execute = ctx->ctx->cs_equal;
745 break;
746 case ATOM_COND_NOTEQUAL:
747 execute = !ctx->ctx->cs_equal;
748 break;
749 }
750 if (arg != ATOM_COND_ALWAYS)
751 SDEBUG(" taken: %s\n", execute ? "yes" : "no");
752 SDEBUG(" target: 0x%04X\n", target);
753 if (execute) {
754 if (ctx->last_jump == (ctx->start + target)) {
755 cjiffies = jiffies;
756 if (time_after(cjiffies, ctx->last_jump_jiffies)) {
757 cjiffies -= ctx->last_jump_jiffies;
758 if ((jiffies_to_msecs(cjiffies) > 5000)) {
759 DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
760 ctx->abort = true;
761 }
762 } else {
763 /* jiffies wrap around we will just wait a little longer */
764 ctx->last_jump_jiffies = jiffies;
765 }
766 } else {
767 ctx->last_jump = ctx->start + target;
768 ctx->last_jump_jiffies = jiffies;
769 }
770 *ptr = ctx->start + target;
771 }
772 }
773
774 static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
775 {
776 uint8_t attr = U8((*ptr)++);
777 uint32_t dst, mask, src, saved;
778 int dptr = *ptr;
779 SDEBUG(" dst: ");
780 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
781 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
782 SDEBUG(" mask: 0x%08x", mask);
783 SDEBUG(" src: ");
784 src = atom_get_src(ctx, attr, ptr);
785 dst &= mask;
786 dst |= src;
787 SDEBUG(" dst: ");
788 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
789 }
790
791 static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
792 {
793 uint8_t attr = U8((*ptr)++);
794 uint32_t src, saved;
795 int dptr = *ptr;
796 if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
797 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
798 else {
799 atom_skip_dst(ctx, arg, attr, ptr);
800 saved = 0xCDCDCDCD;
801 }
802 SDEBUG(" src: ");
803 src = atom_get_src(ctx, attr, ptr);
804 SDEBUG(" dst: ");
805 atom_put_dst(ctx, arg, attr, &dptr, src, saved);
806 }
807
808 static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
809 {
810 uint8_t attr = U8((*ptr)++);
811 uint32_t dst, src;
812 SDEBUG(" src1: ");
813 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
814 SDEBUG(" src2: ");
815 src = atom_get_src(ctx, attr, ptr);
816 ctx->ctx->divmul[0] = dst * src;
817 }
818
819 static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
820 {
821 uint64_t val64;
822 uint8_t attr = U8((*ptr)++);
823 uint32_t dst, src;
824 SDEBUG(" src1: ");
825 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
826 SDEBUG(" src2: ");
827 src = atom_get_src(ctx, attr, ptr);
828 val64 = (uint64_t)dst * (uint64_t)src;
829 ctx->ctx->divmul[0] = lower_32_bits(val64);
830 ctx->ctx->divmul[1] = upper_32_bits(val64);
831 }
832
833 static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
834 {
835 /* nothing */
836 }
837
838 static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
839 {
840 uint8_t attr = U8((*ptr)++);
841 uint32_t dst, src, saved;
842 int dptr = *ptr;
843 SDEBUG(" dst: ");
844 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
845 SDEBUG(" src: ");
846 src = atom_get_src(ctx, attr, ptr);
847 dst |= src;
848 SDEBUG(" dst: ");
849 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
850 }
851
852 static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
853 {
854 uint8_t val = U8((*ptr)++);
855 SDEBUG("POST card output: 0x%02X\n", val);
856 }
857
858 static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
859 {
860 printk(KERN_INFO "unimplemented!\n");
861 }
862
863 static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
864 {
865 printk(KERN_INFO "unimplemented!\n");
866 }
867
868 static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
869 {
870 printk(KERN_INFO "unimplemented!\n");
871 }
872
873 static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
874 {
875 int idx = U8(*ptr);
876 (*ptr)++;
877 SDEBUG(" block: %d\n", idx);
878 if (!idx)
879 ctx->ctx->data_block = 0;
880 else if (idx == 255)
881 ctx->ctx->data_block = ctx->start;
882 else
883 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
884 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
885 }
886
887 static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
888 {
889 uint8_t attr = U8((*ptr)++);
890 SDEBUG(" fb_base: ");
891 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
892 }
893
894 static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
895 {
896 int port;
897 switch (arg) {
898 case ATOM_PORT_ATI:
899 port = U16(*ptr);
900 if (port < ATOM_IO_NAMES_CNT)
901 SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
902 else
903 SDEBUG(" port: %d\n", port);
904 if (!port)
905 ctx->ctx->io_mode = ATOM_IO_MM;
906 else
907 ctx->ctx->io_mode = ATOM_IO_IIO | port;
908 (*ptr) += 2;
909 break;
910 case ATOM_PORT_PCI:
911 ctx->ctx->io_mode = ATOM_IO_PCI;
912 (*ptr)++;
913 break;
914 case ATOM_PORT_SYSIO:
915 ctx->ctx->io_mode = ATOM_IO_SYSIO;
916 (*ptr)++;
917 break;
918 }
919 }
920
921 static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
922 {
923 ctx->ctx->reg_block = U16(*ptr);
924 (*ptr) += 2;
925 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
926 }
927
928 static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
929 {
930 uint8_t attr = U8((*ptr)++), shift;
931 uint32_t saved, dst;
932 int dptr = *ptr;
933 attr &= 0x38;
934 attr |= atom_def_dst[attr >> 3] << 6;
935 SDEBUG(" dst: ");
936 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
937 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
938 SDEBUG(" shift: %d\n", shift);
939 dst <<= shift;
940 SDEBUG(" dst: ");
941 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
942 }
943
944 static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
945 {
946 uint8_t attr = U8((*ptr)++), shift;
947 uint32_t saved, dst;
948 int dptr = *ptr;
949 attr &= 0x38;
950 attr |= atom_def_dst[attr >> 3] << 6;
951 SDEBUG(" dst: ");
952 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
953 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
954 SDEBUG(" shift: %d\n", shift);
955 dst >>= shift;
956 SDEBUG(" dst: ");
957 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
958 }
959
960 static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
961 {
962 uint8_t attr = U8((*ptr)++), shift;
963 uint32_t saved, dst;
964 int dptr = *ptr;
965 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
966 SDEBUG(" dst: ");
967 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
968 /* op needs to full dst value */
969 dst = saved;
970 shift = atom_get_src(ctx, attr, ptr);
971 SDEBUG(" shift: %d\n", shift);
972 dst <<= shift;
973 dst &= atom_arg_mask[dst_align];
974 dst >>= atom_arg_shift[dst_align];
975 SDEBUG(" dst: ");
976 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
977 }
978
979 static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
980 {
981 uint8_t attr = U8((*ptr)++), shift;
982 uint32_t saved, dst;
983 int dptr = *ptr;
984 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
985 SDEBUG(" dst: ");
986 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
987 /* op needs to full dst value */
988 dst = saved;
989 shift = atom_get_src(ctx, attr, ptr);
990 SDEBUG(" shift: %d\n", shift);
991 dst >>= shift;
992 dst &= atom_arg_mask[dst_align];
993 dst >>= atom_arg_shift[dst_align];
994 SDEBUG(" dst: ");
995 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
996 }
997
998 static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
999 {
1000 uint8_t attr = U8((*ptr)++);
1001 uint32_t dst, src, saved;
1002 int dptr = *ptr;
1003 SDEBUG(" dst: ");
1004 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1005 SDEBUG(" src: ");
1006 src = atom_get_src(ctx, attr, ptr);
1007 dst -= src;
1008 SDEBUG(" dst: ");
1009 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1010 }
1011
1012 static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
1013 {
1014 uint8_t attr = U8((*ptr)++);
1015 uint32_t src, val, target;
1016 SDEBUG(" switch: ");
1017 src = atom_get_src(ctx, attr, ptr);
1018 while (U16(*ptr) != ATOM_CASE_END)
1019 if (U8(*ptr) == ATOM_CASE_MAGIC) {
1020 (*ptr)++;
1021 SDEBUG(" case: ");
1022 val =
1023 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
1024 ptr);
1025 target = U16(*ptr);
1026 if (val == src) {
1027 SDEBUG(" target: %04X\n", target);
1028 *ptr = ctx->start + target;
1029 return;
1030 }
1031 (*ptr) += 2;
1032 } else {
1033 printk(KERN_INFO "Bad case.\n");
1034 return;
1035 }
1036 (*ptr) += 2;
1037 }
1038
1039 static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
1040 {
1041 uint8_t attr = U8((*ptr)++);
1042 uint32_t dst, src;
1043 SDEBUG(" src1: ");
1044 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
1045 SDEBUG(" src2: ");
1046 src = atom_get_src(ctx, attr, ptr);
1047 ctx->ctx->cs_equal = ((dst & src) == 0);
1048 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
1049 }
1050
1051 static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1052 {
1053 uint8_t attr = U8((*ptr)++);
1054 uint32_t dst, src, saved;
1055 int dptr = *ptr;
1056 SDEBUG(" dst: ");
1057 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1058 SDEBUG(" src: ");
1059 src = atom_get_src(ctx, attr, ptr);
1060 dst ^= src;
1061 SDEBUG(" dst: ");
1062 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1063 }
1064
1065 static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1066 {
1067 uint8_t val = U8((*ptr)++);
1068 SDEBUG("DEBUG output: 0x%02X\n", val);
1069 }
1070
1071 static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
1072 {
1073 uint16_t val = U16(*ptr);
1074 (*ptr) += val + 2;
1075 SDEBUG("PROCESSDS output: 0x%02X\n", val);
1076 }
1077
1078 static struct {
1079 void (*func) (atom_exec_context *, int *, int);
1080 int arg;
1081 } opcode_table[ATOM_OP_CNT] = {
1082 {
1083 NULL, 0}, {
1084 atom_op_move, ATOM_ARG_REG}, {
1085 atom_op_move, ATOM_ARG_PS}, {
1086 atom_op_move, ATOM_ARG_WS}, {
1087 atom_op_move, ATOM_ARG_FB}, {
1088 atom_op_move, ATOM_ARG_PLL}, {
1089 atom_op_move, ATOM_ARG_MC}, {
1090 atom_op_and, ATOM_ARG_REG}, {
1091 atom_op_and, ATOM_ARG_PS}, {
1092 atom_op_and, ATOM_ARG_WS}, {
1093 atom_op_and, ATOM_ARG_FB}, {
1094 atom_op_and, ATOM_ARG_PLL}, {
1095 atom_op_and, ATOM_ARG_MC}, {
1096 atom_op_or, ATOM_ARG_REG}, {
1097 atom_op_or, ATOM_ARG_PS}, {
1098 atom_op_or, ATOM_ARG_WS}, {
1099 atom_op_or, ATOM_ARG_FB}, {
1100 atom_op_or, ATOM_ARG_PLL}, {
1101 atom_op_or, ATOM_ARG_MC}, {
1102 atom_op_shift_left, ATOM_ARG_REG}, {
1103 atom_op_shift_left, ATOM_ARG_PS}, {
1104 atom_op_shift_left, ATOM_ARG_WS}, {
1105 atom_op_shift_left, ATOM_ARG_FB}, {
1106 atom_op_shift_left, ATOM_ARG_PLL}, {
1107 atom_op_shift_left, ATOM_ARG_MC}, {
1108 atom_op_shift_right, ATOM_ARG_REG}, {
1109 atom_op_shift_right, ATOM_ARG_PS}, {
1110 atom_op_shift_right, ATOM_ARG_WS}, {
1111 atom_op_shift_right, ATOM_ARG_FB}, {
1112 atom_op_shift_right, ATOM_ARG_PLL}, {
1113 atom_op_shift_right, ATOM_ARG_MC}, {
1114 atom_op_mul, ATOM_ARG_REG}, {
1115 atom_op_mul, ATOM_ARG_PS}, {
1116 atom_op_mul, ATOM_ARG_WS}, {
1117 atom_op_mul, ATOM_ARG_FB}, {
1118 atom_op_mul, ATOM_ARG_PLL}, {
1119 atom_op_mul, ATOM_ARG_MC}, {
1120 atom_op_div, ATOM_ARG_REG}, {
1121 atom_op_div, ATOM_ARG_PS}, {
1122 atom_op_div, ATOM_ARG_WS}, {
1123 atom_op_div, ATOM_ARG_FB}, {
1124 atom_op_div, ATOM_ARG_PLL}, {
1125 atom_op_div, ATOM_ARG_MC}, {
1126 atom_op_add, ATOM_ARG_REG}, {
1127 atom_op_add, ATOM_ARG_PS}, {
1128 atom_op_add, ATOM_ARG_WS}, {
1129 atom_op_add, ATOM_ARG_FB}, {
1130 atom_op_add, ATOM_ARG_PLL}, {
1131 atom_op_add, ATOM_ARG_MC}, {
1132 atom_op_sub, ATOM_ARG_REG}, {
1133 atom_op_sub, ATOM_ARG_PS}, {
1134 atom_op_sub, ATOM_ARG_WS}, {
1135 atom_op_sub, ATOM_ARG_FB}, {
1136 atom_op_sub, ATOM_ARG_PLL}, {
1137 atom_op_sub, ATOM_ARG_MC}, {
1138 atom_op_setport, ATOM_PORT_ATI}, {
1139 atom_op_setport, ATOM_PORT_PCI}, {
1140 atom_op_setport, ATOM_PORT_SYSIO}, {
1141 atom_op_setregblock, 0}, {
1142 atom_op_setfbbase, 0}, {
1143 atom_op_compare, ATOM_ARG_REG}, {
1144 atom_op_compare, ATOM_ARG_PS}, {
1145 atom_op_compare, ATOM_ARG_WS}, {
1146 atom_op_compare, ATOM_ARG_FB}, {
1147 atom_op_compare, ATOM_ARG_PLL}, {
1148 atom_op_compare, ATOM_ARG_MC}, {
1149 atom_op_switch, 0}, {
1150 atom_op_jump, ATOM_COND_ALWAYS}, {
1151 atom_op_jump, ATOM_COND_EQUAL}, {
1152 atom_op_jump, ATOM_COND_BELOW}, {
1153 atom_op_jump, ATOM_COND_ABOVE}, {
1154 atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
1155 atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
1156 atom_op_jump, ATOM_COND_NOTEQUAL}, {
1157 atom_op_test, ATOM_ARG_REG}, {
1158 atom_op_test, ATOM_ARG_PS}, {
1159 atom_op_test, ATOM_ARG_WS}, {
1160 atom_op_test, ATOM_ARG_FB}, {
1161 atom_op_test, ATOM_ARG_PLL}, {
1162 atom_op_test, ATOM_ARG_MC}, {
1163 atom_op_delay, ATOM_UNIT_MILLISEC}, {
1164 atom_op_delay, ATOM_UNIT_MICROSEC}, {
1165 atom_op_calltable, 0}, {
1166 atom_op_repeat, 0}, {
1167 atom_op_clear, ATOM_ARG_REG}, {
1168 atom_op_clear, ATOM_ARG_PS}, {
1169 atom_op_clear, ATOM_ARG_WS}, {
1170 atom_op_clear, ATOM_ARG_FB}, {
1171 atom_op_clear, ATOM_ARG_PLL}, {
1172 atom_op_clear, ATOM_ARG_MC}, {
1173 atom_op_nop, 0}, {
1174 atom_op_eot, 0}, {
1175 atom_op_mask, ATOM_ARG_REG}, {
1176 atom_op_mask, ATOM_ARG_PS}, {
1177 atom_op_mask, ATOM_ARG_WS}, {
1178 atom_op_mask, ATOM_ARG_FB}, {
1179 atom_op_mask, ATOM_ARG_PLL}, {
1180 atom_op_mask, ATOM_ARG_MC}, {
1181 atom_op_postcard, 0}, {
1182 atom_op_beep, 0}, {
1183 atom_op_savereg, 0}, {
1184 atom_op_restorereg, 0}, {
1185 atom_op_setdatablock, 0}, {
1186 atom_op_xor, ATOM_ARG_REG}, {
1187 atom_op_xor, ATOM_ARG_PS}, {
1188 atom_op_xor, ATOM_ARG_WS}, {
1189 atom_op_xor, ATOM_ARG_FB}, {
1190 atom_op_xor, ATOM_ARG_PLL}, {
1191 atom_op_xor, ATOM_ARG_MC}, {
1192 atom_op_shl, ATOM_ARG_REG}, {
1193 atom_op_shl, ATOM_ARG_PS}, {
1194 atom_op_shl, ATOM_ARG_WS}, {
1195 atom_op_shl, ATOM_ARG_FB}, {
1196 atom_op_shl, ATOM_ARG_PLL}, {
1197 atom_op_shl, ATOM_ARG_MC}, {
1198 atom_op_shr, ATOM_ARG_REG}, {
1199 atom_op_shr, ATOM_ARG_PS}, {
1200 atom_op_shr, ATOM_ARG_WS}, {
1201 atom_op_shr, ATOM_ARG_FB}, {
1202 atom_op_shr, ATOM_ARG_PLL}, {
1203 atom_op_shr, ATOM_ARG_MC}, {
1204 atom_op_debug, 0}, {
1205 atom_op_processds, 0}, {
1206 atom_op_mul32, ATOM_ARG_PS}, {
1207 atom_op_mul32, ATOM_ARG_WS}, {
1208 atom_op_div32, ATOM_ARG_PS}, {
1209 atom_op_div32, ATOM_ARG_WS},
1210 };
1211
1212 static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
1213 {
1214 int base = CU16(ctx->cmd_table + 4 + 2 * index);
1215 int len, ws, ps, ptr;
1216 unsigned char op;
1217 atom_exec_context ectx;
1218 int ret = 0;
1219
1220 if (!base)
1221 return -EINVAL;
1222
1223 len = CU16(base + ATOM_CT_SIZE_PTR);
1224 ws = CU8(base + ATOM_CT_WS_PTR);
1225 ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
1226 ptr = base + ATOM_CT_CODE_PTR;
1227
1228 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1229
1230 ectx.ctx = ctx;
1231 ectx.ps_shift = ps / 4;
1232 ectx.start = base;
1233 ectx.ps = params;
1234 ectx.abort = false;
1235 ectx.last_jump = 0;
1236 if (ws)
1237 ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
1238 else
1239 ectx.ws = NULL;
1240
1241 debug_depth++;
1242 while (1) {
1243 op = CU8(ptr++);
1244 if (op < ATOM_OP_NAMES_CNT)
1245 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1246 else
1247 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1248 if (ectx.abort) {
1249 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1250 base, len, ws, ps, ptr - 1);
1251 ret = -EINVAL;
1252 goto free;
1253 }
1254
1255 if (op < ATOM_OP_CNT && op > 0)
1256 opcode_table[op].func(&ectx, &ptr,
1257 opcode_table[op].arg);
1258 else
1259 break;
1260
1261 if (op == ATOM_OP_EOT)
1262 break;
1263 }
1264 debug_depth--;
1265 SDEBUG("<<\n");
1266
1267 free:
1268 if (ws)
1269 kfree(ectx.ws);
1270 return ret;
1271 }
1272
1273 int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1274 {
1275 int r;
1276
1277 mutex_lock(&ctx->mutex);
1278 /* reset data block */
1279 ctx->data_block = 0;
1280 /* reset reg block */
1281 ctx->reg_block = 0;
1282 /* reset fb window */
1283 ctx->fb_base = 0;
1284 /* reset io mode */
1285 ctx->io_mode = ATOM_IO_MM;
1286 /* reset divmul */
1287 ctx->divmul[0] = 0;
1288 ctx->divmul[1] = 0;
1289 r = amdgpu_atom_execute_table_locked(ctx, index, params);
1290 mutex_unlock(&ctx->mutex);
1291 return r;
1292 }
1293
1294 static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1295
1296 static void atom_index_iio(struct atom_context *ctx, int base)
1297 {
1298 ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1299 if (!ctx->iio)
1300 return;
1301 while (CU8(base) == ATOM_IIO_START) {
1302 ctx->iio[CU8(base + 1)] = base + 2;
1303 base += 2;
1304 while (CU8(base) != ATOM_IIO_END)
1305 base += atom_iio_len[CU8(base)];
1306 base += 3;
1307 }
1308 }
1309
1310 struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
1311 {
1312 int base;
1313 struct atom_context *ctx =
1314 kzalloc(sizeof(struct atom_context), GFP_KERNEL);
1315 char *str;
1316 char name[512];
1317 int i;
1318
1319 if (!ctx)
1320 return NULL;
1321
1322 ctx->card = card;
1323 ctx->bios = bios;
1324
1325 if (CU16(0) != ATOM_BIOS_MAGIC) {
1326 printk(KERN_INFO "Invalid BIOS magic.\n");
1327 kfree(ctx);
1328 return NULL;
1329 }
1330 if (strncmp
1331 (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
1332 strlen(ATOM_ATI_MAGIC))) {
1333 printk(KERN_INFO "Invalid ATI magic.\n");
1334 kfree(ctx);
1335 return NULL;
1336 }
1337
1338 base = CU16(ATOM_ROM_TABLE_PTR);
1339 if (strncmp
1340 (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
1341 strlen(ATOM_ROM_MAGIC))) {
1342 printk(KERN_INFO "Invalid ATOM magic.\n");
1343 kfree(ctx);
1344 return NULL;
1345 }
1346
1347 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1348 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1349 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1350 if (!ctx->iio) {
1351 amdgpu_atom_destroy(ctx);
1352 return NULL;
1353 }
1354
1355 str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
1356 while (*str && ((*str == '\n') || (*str == '\r')))
1357 str++;
1358 /* name string isn't always 0 terminated */
1359 for (i = 0; i < 511; i++) {
1360 name[i] = str[i];
1361 if (name[i] < '.' || name[i] > 'z') {
1362 name[i] = 0;
1363 break;
1364 }
1365 }
1366 printk(KERN_INFO "ATOM BIOS: %s\n", name);
1367
1368 return ctx;
1369 }
1370
1371 int amdgpu_atom_asic_init(struct atom_context *ctx)
1372 {
1373 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1374 uint32_t ps[16];
1375 int ret;
1376
1377 memset(ps, 0, 64);
1378
1379 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1380 ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
1381 if (!ps[0] || !ps[1])
1382 return 1;
1383
1384 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1385 return 1;
1386 ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1387 if (ret)
1388 return ret;
1389
1390 memset(ps, 0, 64);
1391
1392 return ret;
1393 }
1394
1395 void amdgpu_atom_destroy(struct atom_context *ctx)
1396 {
1397 kfree(ctx->iio);
1398 kfree(ctx);
1399 }
1400
1401 bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
1402 uint16_t * size, uint8_t * frev, uint8_t * crev,
1403 uint16_t * data_start)
1404 {
1405 int offset = index * 2 + 4;
1406 int idx = CU16(ctx->data_table + offset);
1407 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1408
1409 if (!mdt[index])
1410 return false;
1411
1412 if (size)
1413 *size = CU16(idx);
1414 if (frev)
1415 *frev = CU8(idx + 2);
1416 if (crev)
1417 *crev = CU8(idx + 3);
1418 *data_start = idx;
1419 return true;
1420 }
1421
1422 bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
1423 uint8_t * crev)
1424 {
1425 int offset = index * 2 + 4;
1426 int idx = CU16(ctx->cmd_table + offset);
1427 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1428
1429 if (!mct[index])
1430 return false;
1431
1432 if (frev)
1433 *frev = CU8(idx + 2);
1434 if (crev)
1435 *crev = CU8(idx + 3);
1436 return true;
1437 }
1438
1439 int amdgpu_atom_allocate_fb_scratch(struct atom_context *ctx)
1440 {
1441 int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
1442 uint16_t data_offset;
1443 int usage_bytes = 0;
1444 struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
1445
1446 if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
1447 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
1448
1449 DRM_DEBUG("atom firmware requested %08x %dkb\n",
1450 le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
1451 le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
1452
1453 usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
1454 }
1455 ctx->scratch_size_bytes = 0;
1456 if (usage_bytes == 0)
1457 usage_bytes = 20 * 1024;
1458 /* allocate some scratch memory */
1459 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
1460 if (!ctx->scratch)
1461 return -ENOMEM;
1462 ctx->scratch_size_bytes = usage_bytes;
1463 return 0;
1464 }
1465