1 /* $NetBSD: radeon_atom.c,v 1.2 2021/12/18 23:45:43 riastradh Exp $ */
2
3 /*
4 * Copyright 2008 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Author: Stanislaw Skowronek
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: radeon_atom.c,v 1.2 2021/12/18 23:45:43 riastradh Exp $");
29
30 #include <linux/module.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33
34 #include <asm/unaligned.h>
35
36 #include <drm/drm_device.h>
37 #include <drm/drm_util.h>
38
39 #define ATOM_DEBUG
40
41 #include "atom.h"
42 #include "atom-names.h"
43 #include "atom-bits.h"
44 #include "radeon.h"
45
46 #define ATOM_COND_ABOVE 0
47 #define ATOM_COND_ABOVEOREQUAL 1
48 #define ATOM_COND_ALWAYS 2
49 #define ATOM_COND_BELOW 3
50 #define ATOM_COND_BELOWOREQUAL 4
51 #define ATOM_COND_EQUAL 5
52 #define ATOM_COND_NOTEQUAL 6
53
54 #define ATOM_PORT_ATI 0
55 #define ATOM_PORT_PCI 1
56 #define ATOM_PORT_SYSIO 2
57
58 #define ATOM_UNIT_MICROSEC 0
59 #define ATOM_UNIT_MILLISEC 1
60
61 #define PLL_INDEX 2
62 #define PLL_DATA 3
63
64 typedef struct {
65 struct atom_context *ctx;
66 uint32_t *ps, *ws;
67 int ps_shift;
68 uint16_t start;
69 unsigned last_jump;
70 unsigned long last_jump_jiffies;
71 bool abort;
72 } atom_exec_context;
73
74 int atom_debug = 0;
75 static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
76 int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
77
78 static uint32_t atom_arg_mask[8] = {
79 0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000,
80 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000
81 };
82 static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
83
84 static int atom_dst_to_src[8][4] = {
85 /* translate destination alignment field to the source alignment encoding */
86 {0, 0, 0, 0},
87 {1, 2, 3, 0},
88 {1, 2, 3, 0},
89 {1, 2, 3, 0},
90 {4, 5, 6, 7},
91 {4, 5, 6, 7},
92 {4, 5, 6, 7},
93 {4, 5, 6, 7},
94 };
95 static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
96
97 static int debug_depth = 0;
98 #ifdef ATOM_DEBUG
debug_print_spaces(int n)99 static void debug_print_spaces(int n)
100 {
101 while (n--)
102 printk(" ");
103 }
104
105 #ifdef __NetBSD__ /* XXX */
106 /*
107 * Kludge: NetBSD defines DEBUG to mean debugging is enabled. Since
108 * we're not going to include any more header files, it's OK for it to
109 * be defined unconditionally after this.
110 */
111 #undef DEBUG
112 #endif
113
114 #define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
115 #define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
116 #else
117 #define DEBUG(...) do { } while (0)
118 #define SDEBUG(...) do { } while (0)
119 #endif
120
atom_iio_execute(struct atom_context * ctx,int base,uint32_t index,uint32_t data)121 static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
122 uint32_t index, uint32_t data)
123 {
124 struct radeon_device *rdev = ctx->card->dev->dev_private;
125 uint32_t temp = 0xCDCDCDCD;
126
127 while (1)
128 switch (CU8(base)) {
129 case ATOM_IIO_NOP:
130 base++;
131 break;
132 case ATOM_IIO_READ:
133 temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
134 base += 3;
135 break;
136 case ATOM_IIO_WRITE:
137 if (rdev->family == CHIP_RV515)
138 (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
139 ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
140 base += 3;
141 break;
142 case ATOM_IIO_CLEAR:
143 temp &=
144 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
145 CU8(base + 2));
146 base += 3;
147 break;
148 case ATOM_IIO_SET:
149 temp |=
150 (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
151 2);
152 base += 3;
153 break;
154 case ATOM_IIO_MOVE_INDEX:
155 temp &=
156 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
157 CU8(base + 3));
158 temp |=
159 ((index >> CU8(base + 2)) &
160 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
161 3);
162 base += 4;
163 break;
164 case ATOM_IIO_MOVE_DATA:
165 temp &=
166 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
167 CU8(base + 3));
168 temp |=
169 ((data >> CU8(base + 2)) &
170 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
171 3);
172 base += 4;
173 break;
174 case ATOM_IIO_MOVE_ATTR:
175 temp &=
176 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
177 CU8(base + 3));
178 temp |=
179 ((ctx->
180 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
181 CU8
182 (base
183 +
184 1))))
185 << CU8(base + 3);
186 base += 4;
187 break;
188 case ATOM_IIO_END:
189 return temp;
190 default:
191 pr_info("Unknown IIO opcode\n");
192 return 0;
193 }
194 }
195
atom_get_src_int(atom_exec_context * ctx,uint8_t attr,int * ptr,uint32_t * saved,int print)196 static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
197 int *ptr, uint32_t *saved, int print)
198 {
199 uint32_t idx, val = 0xCDCDCDCD, align, arg;
200 struct atom_context *gctx = ctx->ctx;
201 arg = attr & 7;
202 align = (attr >> 3) & 7;
203 switch (arg) {
204 case ATOM_ARG_REG:
205 idx = U16(*ptr);
206 (*ptr) += 2;
207 if (print)
208 DEBUG("REG[0x%04X]", idx);
209 idx += gctx->reg_block;
210 switch (gctx->io_mode) {
211 case ATOM_IO_MM:
212 val = gctx->card->reg_read(gctx->card, idx);
213 break;
214 case ATOM_IO_PCI:
215 pr_info("PCI registers are not implemented\n");
216 return 0;
217 case ATOM_IO_SYSIO:
218 pr_info("SYSIO registers are not implemented\n");
219 return 0;
220 default:
221 if (!(gctx->io_mode & 0x80)) {
222 pr_info("Bad IO mode\n");
223 return 0;
224 }
225 if (!gctx->iio[gctx->io_mode & 0x7F]) {
226 pr_info("Undefined indirect IO read method %d\n",
227 gctx->io_mode & 0x7F);
228 return 0;
229 }
230 val =
231 atom_iio_execute(gctx,
232 gctx->iio[gctx->io_mode & 0x7F],
233 idx, 0);
234 }
235 break;
236 case ATOM_ARG_PS:
237 idx = U8(*ptr);
238 (*ptr)++;
239 /* get_unaligned_le32 avoids unaligned accesses from atombios
240 * tables, noticed on a DEC Alpha. */
241 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
242 if (print)
243 DEBUG("PS[0x%02X,0x%04X]", idx, val);
244 break;
245 case ATOM_ARG_WS:
246 idx = U8(*ptr);
247 (*ptr)++;
248 if (print)
249 DEBUG("WS[0x%02X]", idx);
250 switch (idx) {
251 case ATOM_WS_QUOTIENT:
252 val = gctx->divmul[0];
253 break;
254 case ATOM_WS_REMAINDER:
255 val = gctx->divmul[1];
256 break;
257 case ATOM_WS_DATAPTR:
258 val = gctx->data_block;
259 break;
260 case ATOM_WS_SHIFT:
261 val = gctx->shift;
262 break;
263 case ATOM_WS_OR_MASK:
264 val = 1 << gctx->shift;
265 break;
266 case ATOM_WS_AND_MASK:
267 val = ~(1 << gctx->shift);
268 break;
269 case ATOM_WS_FB_WINDOW:
270 val = gctx->fb_base;
271 break;
272 case ATOM_WS_ATTRIBUTES:
273 val = gctx->io_attr;
274 break;
275 case ATOM_WS_REGPTR:
276 val = gctx->reg_block;
277 break;
278 default:
279 val = ctx->ws[idx];
280 }
281 break;
282 case ATOM_ARG_ID:
283 idx = U16(*ptr);
284 (*ptr) += 2;
285 if (print) {
286 if (gctx->data_block)
287 DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
288 else
289 DEBUG("ID[0x%04X]", idx);
290 }
291 val = U32(idx + gctx->data_block);
292 break;
293 case ATOM_ARG_FB:
294 idx = U8(*ptr);
295 (*ptr)++;
296 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
297 DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
298 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
299 val = 0;
300 } else
301 val = gctx->scratch[(gctx->fb_base / 4) + idx];
302 if (print)
303 DEBUG("FB[0x%02X]", idx);
304 break;
305 case ATOM_ARG_IMM:
306 switch (align) {
307 case ATOM_SRC_DWORD:
308 val = U32(*ptr);
309 (*ptr) += 4;
310 if (print)
311 DEBUG("IMM 0x%08X\n", val);
312 return val;
313 case ATOM_SRC_WORD0:
314 case ATOM_SRC_WORD8:
315 case ATOM_SRC_WORD16:
316 val = U16(*ptr);
317 (*ptr) += 2;
318 if (print)
319 DEBUG("IMM 0x%04X\n", val);
320 return val;
321 case ATOM_SRC_BYTE0:
322 case ATOM_SRC_BYTE8:
323 case ATOM_SRC_BYTE16:
324 case ATOM_SRC_BYTE24:
325 val = U8(*ptr);
326 (*ptr)++;
327 if (print)
328 DEBUG("IMM 0x%02X\n", val);
329 return val;
330 }
331 return 0;
332 case ATOM_ARG_PLL:
333 idx = U8(*ptr);
334 (*ptr)++;
335 if (print)
336 DEBUG("PLL[0x%02X]", idx);
337 val = gctx->card->pll_read(gctx->card, idx);
338 break;
339 case ATOM_ARG_MC:
340 idx = U8(*ptr);
341 (*ptr)++;
342 if (print)
343 DEBUG("MC[0x%02X]", idx);
344 val = gctx->card->mc_read(gctx->card, idx);
345 break;
346 }
347 if (saved)
348 *saved = val;
349 val &= atom_arg_mask[align];
350 val >>= atom_arg_shift[align];
351 if (print)
352 switch (align) {
353 case ATOM_SRC_DWORD:
354 DEBUG(".[31:0] -> 0x%08X\n", val);
355 break;
356 case ATOM_SRC_WORD0:
357 DEBUG(".[15:0] -> 0x%04X\n", val);
358 break;
359 case ATOM_SRC_WORD8:
360 DEBUG(".[23:8] -> 0x%04X\n", val);
361 break;
362 case ATOM_SRC_WORD16:
363 DEBUG(".[31:16] -> 0x%04X\n", val);
364 break;
365 case ATOM_SRC_BYTE0:
366 DEBUG(".[7:0] -> 0x%02X\n", val);
367 break;
368 case ATOM_SRC_BYTE8:
369 DEBUG(".[15:8] -> 0x%02X\n", val);
370 break;
371 case ATOM_SRC_BYTE16:
372 DEBUG(".[23:16] -> 0x%02X\n", val);
373 break;
374 case ATOM_SRC_BYTE24:
375 DEBUG(".[31:24] -> 0x%02X\n", val);
376 break;
377 }
378 return val;
379 }
380
atom_skip_src_int(atom_exec_context * ctx,uint8_t attr,int * ptr)381 static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
382 {
383 uint32_t align = (attr >> 3) & 7, arg = attr & 7;
384 switch (arg) {
385 case ATOM_ARG_REG:
386 case ATOM_ARG_ID:
387 (*ptr) += 2;
388 break;
389 case ATOM_ARG_PLL:
390 case ATOM_ARG_MC:
391 case ATOM_ARG_PS:
392 case ATOM_ARG_WS:
393 case ATOM_ARG_FB:
394 (*ptr)++;
395 break;
396 case ATOM_ARG_IMM:
397 switch (align) {
398 case ATOM_SRC_DWORD:
399 (*ptr) += 4;
400 return;
401 case ATOM_SRC_WORD0:
402 case ATOM_SRC_WORD8:
403 case ATOM_SRC_WORD16:
404 (*ptr) += 2;
405 return;
406 case ATOM_SRC_BYTE0:
407 case ATOM_SRC_BYTE8:
408 case ATOM_SRC_BYTE16:
409 case ATOM_SRC_BYTE24:
410 (*ptr)++;
411 return;
412 }
413 return;
414 }
415 }
416
atom_get_src(atom_exec_context * ctx,uint8_t attr,int * ptr)417 static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
418 {
419 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
420 }
421
atom_get_src_direct(atom_exec_context * ctx,uint8_t align,int * ptr)422 static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
423 {
424 uint32_t val = 0xCDCDCDCD;
425
426 switch (align) {
427 case ATOM_SRC_DWORD:
428 val = U32(*ptr);
429 (*ptr) += 4;
430 break;
431 case ATOM_SRC_WORD0:
432 case ATOM_SRC_WORD8:
433 case ATOM_SRC_WORD16:
434 val = U16(*ptr);
435 (*ptr) += 2;
436 break;
437 case ATOM_SRC_BYTE0:
438 case ATOM_SRC_BYTE8:
439 case ATOM_SRC_BYTE16:
440 case ATOM_SRC_BYTE24:
441 val = U8(*ptr);
442 (*ptr)++;
443 break;
444 }
445 return val;
446 }
447
atom_get_dst(atom_exec_context * ctx,int arg,uint8_t attr,int * ptr,uint32_t * saved,int print)448 static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
449 int *ptr, uint32_t *saved, int print)
450 {
451 return atom_get_src_int(ctx,
452 arg | atom_dst_to_src[(attr >> 3) &
453 7][(attr >> 6) & 3] << 3,
454 ptr, saved, print);
455 }
456
atom_skip_dst(atom_exec_context * ctx,int arg,uint8_t attr,int * ptr)457 static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
458 {
459 atom_skip_src_int(ctx,
460 arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
461 3] << 3, ptr);
462 }
463
atom_put_dst(atom_exec_context * ctx,int arg,uint8_t attr,int * ptr,uint32_t val,uint32_t saved)464 static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
465 int *ptr, uint32_t val, uint32_t saved)
466 {
467 uint32_t align =
468 atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
469 val, idx;
470 struct atom_context *gctx = ctx->ctx;
471 old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
472 val <<= atom_arg_shift[align];
473 val &= atom_arg_mask[align];
474 saved &= ~atom_arg_mask[align];
475 val |= saved;
476 switch (arg) {
477 case ATOM_ARG_REG:
478 idx = U16(*ptr);
479 (*ptr) += 2;
480 DEBUG("REG[0x%04X]", idx);
481 idx += gctx->reg_block;
482 switch (gctx->io_mode) {
483 case ATOM_IO_MM:
484 if (idx == 0)
485 gctx->card->reg_write(gctx->card, idx,
486 val << 2);
487 else
488 gctx->card->reg_write(gctx->card, idx, val);
489 break;
490 case ATOM_IO_PCI:
491 pr_info("PCI registers are not implemented\n");
492 return;
493 case ATOM_IO_SYSIO:
494 pr_info("SYSIO registers are not implemented\n");
495 return;
496 default:
497 if (!(gctx->io_mode & 0x80)) {
498 pr_info("Bad IO mode\n");
499 return;
500 }
501 if (!gctx->iio[gctx->io_mode & 0xFF]) {
502 pr_info("Undefined indirect IO write method %d\n",
503 gctx->io_mode & 0x7F);
504 return;
505 }
506 atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
507 idx, val);
508 }
509 break;
510 case ATOM_ARG_PS:
511 idx = U8(*ptr);
512 (*ptr)++;
513 DEBUG("PS[0x%02X]", idx);
514 ctx->ps[idx] = cpu_to_le32(val);
515 break;
516 case ATOM_ARG_WS:
517 idx = U8(*ptr);
518 (*ptr)++;
519 DEBUG("WS[0x%02X]", idx);
520 switch (idx) {
521 case ATOM_WS_QUOTIENT:
522 gctx->divmul[0] = val;
523 break;
524 case ATOM_WS_REMAINDER:
525 gctx->divmul[1] = val;
526 break;
527 case ATOM_WS_DATAPTR:
528 gctx->data_block = val;
529 break;
530 case ATOM_WS_SHIFT:
531 gctx->shift = val;
532 break;
533 case ATOM_WS_OR_MASK:
534 case ATOM_WS_AND_MASK:
535 break;
536 case ATOM_WS_FB_WINDOW:
537 gctx->fb_base = val;
538 break;
539 case ATOM_WS_ATTRIBUTES:
540 gctx->io_attr = val;
541 break;
542 case ATOM_WS_REGPTR:
543 gctx->reg_block = val;
544 break;
545 default:
546 ctx->ws[idx] = val;
547 }
548 break;
549 case ATOM_ARG_FB:
550 idx = U8(*ptr);
551 (*ptr)++;
552 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
553 DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
554 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
555 } else
556 gctx->scratch[(gctx->fb_base / 4) + idx] = val;
557 DEBUG("FB[0x%02X]", idx);
558 break;
559 case ATOM_ARG_PLL:
560 idx = U8(*ptr);
561 (*ptr)++;
562 DEBUG("PLL[0x%02X]", idx);
563 gctx->card->pll_write(gctx->card, idx, val);
564 break;
565 case ATOM_ARG_MC:
566 idx = U8(*ptr);
567 (*ptr)++;
568 DEBUG("MC[0x%02X]", idx);
569 gctx->card->mc_write(gctx->card, idx, val);
570 return;
571 }
572 switch (align) {
573 case ATOM_SRC_DWORD:
574 DEBUG(".[31:0] <- 0x%08X\n", old_val);
575 break;
576 case ATOM_SRC_WORD0:
577 DEBUG(".[15:0] <- 0x%04X\n", old_val);
578 break;
579 case ATOM_SRC_WORD8:
580 DEBUG(".[23:8] <- 0x%04X\n", old_val);
581 break;
582 case ATOM_SRC_WORD16:
583 DEBUG(".[31:16] <- 0x%04X\n", old_val);
584 break;
585 case ATOM_SRC_BYTE0:
586 DEBUG(".[7:0] <- 0x%02X\n", old_val);
587 break;
588 case ATOM_SRC_BYTE8:
589 DEBUG(".[15:8] <- 0x%02X\n", old_val);
590 break;
591 case ATOM_SRC_BYTE16:
592 DEBUG(".[23:16] <- 0x%02X\n", old_val);
593 break;
594 case ATOM_SRC_BYTE24:
595 DEBUG(".[31:24] <- 0x%02X\n", old_val);
596 break;
597 }
598 }
599
atom_op_add(atom_exec_context * ctx,int * ptr,int arg)600 static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
601 {
602 uint8_t attr = U8((*ptr)++);
603 uint32_t dst, src, saved;
604 int dptr = *ptr;
605 SDEBUG(" dst: ");
606 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
607 SDEBUG(" src: ");
608 src = atom_get_src(ctx, attr, ptr);
609 dst += src;
610 SDEBUG(" dst: ");
611 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
612 }
613
atom_op_and(atom_exec_context * ctx,int * ptr,int arg)614 static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
615 {
616 uint8_t attr = U8((*ptr)++);
617 uint32_t dst, src, saved;
618 int dptr = *ptr;
619 SDEBUG(" dst: ");
620 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
621 SDEBUG(" src: ");
622 src = atom_get_src(ctx, attr, ptr);
623 dst &= src;
624 SDEBUG(" dst: ");
625 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
626 }
627
atom_op_beep(atom_exec_context * ctx,int * ptr,int arg)628 static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
629 {
630 printk("ATOM BIOS beeped!\n");
631 }
632
atom_op_calltable(atom_exec_context * ctx,int * ptr,int arg)633 static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
634 {
635 int idx = U8((*ptr)++);
636 int r = 0;
637
638 if (idx < ATOM_TABLE_NAMES_CNT)
639 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
640 else
641 SDEBUG(" table: %d\n", idx);
642 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
643 r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
644 if (r) {
645 ctx->abort = true;
646 }
647 }
648
atom_op_clear(atom_exec_context * ctx,int * ptr,int arg)649 static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
650 {
651 uint8_t attr = U8((*ptr)++);
652 uint32_t saved;
653 int dptr = *ptr;
654 attr &= 0x38;
655 attr |= atom_def_dst[attr >> 3] << 6;
656 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
657 SDEBUG(" dst: ");
658 atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
659 }
660
atom_op_compare(atom_exec_context * ctx,int * ptr,int arg)661 static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
662 {
663 uint8_t attr = U8((*ptr)++);
664 uint32_t dst, src;
665 SDEBUG(" src1: ");
666 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
667 SDEBUG(" src2: ");
668 src = atom_get_src(ctx, attr, ptr);
669 ctx->ctx->cs_equal = (dst == src);
670 ctx->ctx->cs_above = (dst > src);
671 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
672 ctx->ctx->cs_above ? "GT" : "LE");
673 }
674
atom_op_delay(atom_exec_context * ctx,int * ptr,int arg)675 static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
676 {
677 unsigned count = U8((*ptr)++);
678 SDEBUG(" count: %d\n", count);
679 if (arg == ATOM_UNIT_MICROSEC)
680 udelay(count);
681 else if (!drm_can_sleep())
682 mdelay(count);
683 else
684 msleep(count);
685 }
686
atom_op_div(atom_exec_context * ctx,int * ptr,int arg)687 static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
688 {
689 uint8_t attr = U8((*ptr)++);
690 uint32_t dst, src;
691 SDEBUG(" src1: ");
692 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
693 SDEBUG(" src2: ");
694 src = atom_get_src(ctx, attr, ptr);
695 if (src != 0) {
696 ctx->ctx->divmul[0] = dst / src;
697 ctx->ctx->divmul[1] = dst % src;
698 } else {
699 ctx->ctx->divmul[0] = 0;
700 ctx->ctx->divmul[1] = 0;
701 }
702 }
703
atom_op_eot(atom_exec_context * ctx,int * ptr,int arg)704 static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
705 {
706 /* functionally, a nop */
707 }
708
atom_op_jump(atom_exec_context * ctx,int * ptr,int arg)709 static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
710 {
711 int execute = 0, target = U16(*ptr);
712 unsigned long cjiffies;
713
714 (*ptr) += 2;
715 switch (arg) {
716 case ATOM_COND_ABOVE:
717 execute = ctx->ctx->cs_above;
718 break;
719 case ATOM_COND_ABOVEOREQUAL:
720 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
721 break;
722 case ATOM_COND_ALWAYS:
723 execute = 1;
724 break;
725 case ATOM_COND_BELOW:
726 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
727 break;
728 case ATOM_COND_BELOWOREQUAL:
729 execute = !ctx->ctx->cs_above;
730 break;
731 case ATOM_COND_EQUAL:
732 execute = ctx->ctx->cs_equal;
733 break;
734 case ATOM_COND_NOTEQUAL:
735 execute = !ctx->ctx->cs_equal;
736 break;
737 }
738 if (arg != ATOM_COND_ALWAYS)
739 SDEBUG(" taken: %s\n", execute ? "yes" : "no");
740 SDEBUG(" target: 0x%04X\n", target);
741 if (execute) {
742 if (ctx->last_jump == (ctx->start + target)) {
743 cjiffies = jiffies;
744 if (time_after(cjiffies, ctx->last_jump_jiffies)) {
745 cjiffies -= ctx->last_jump_jiffies;
746 if ((jiffies_to_msecs(cjiffies) > 5000)) {
747 DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
748 ctx->abort = true;
749 }
750 } else {
751 /* jiffies wrap around we will just wait a little longer */
752 ctx->last_jump_jiffies = jiffies;
753 }
754 } else {
755 ctx->last_jump = ctx->start + target;
756 ctx->last_jump_jiffies = jiffies;
757 }
758 *ptr = ctx->start + target;
759 }
760 }
761
atom_op_mask(atom_exec_context * ctx,int * ptr,int arg)762 static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
763 {
764 uint8_t attr = U8((*ptr)++);
765 uint32_t dst, mask, src, saved;
766 int dptr = *ptr;
767 SDEBUG(" dst: ");
768 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
769 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
770 SDEBUG(" mask: 0x%08x", mask);
771 SDEBUG(" src: ");
772 src = atom_get_src(ctx, attr, ptr);
773 dst &= mask;
774 dst |= src;
775 SDEBUG(" dst: ");
776 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
777 }
778
atom_op_move(atom_exec_context * ctx,int * ptr,int arg)779 static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
780 {
781 uint8_t attr = U8((*ptr)++);
782 uint32_t src, saved;
783 int dptr = *ptr;
784 if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
785 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
786 else {
787 atom_skip_dst(ctx, arg, attr, ptr);
788 saved = 0xCDCDCDCD;
789 }
790 SDEBUG(" src: ");
791 src = atom_get_src(ctx, attr, ptr);
792 SDEBUG(" dst: ");
793 atom_put_dst(ctx, arg, attr, &dptr, src, saved);
794 }
795
atom_op_mul(atom_exec_context * ctx,int * ptr,int arg)796 static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
797 {
798 uint8_t attr = U8((*ptr)++);
799 uint32_t dst, src;
800 SDEBUG(" src1: ");
801 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
802 SDEBUG(" src2: ");
803 src = atom_get_src(ctx, attr, ptr);
804 ctx->ctx->divmul[0] = dst * src;
805 }
806
atom_op_nop(atom_exec_context * ctx,int * ptr,int arg)807 static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
808 {
809 /* nothing */
810 }
811
atom_op_or(atom_exec_context * ctx,int * ptr,int arg)812 static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
813 {
814 uint8_t attr = U8((*ptr)++);
815 uint32_t dst, src, saved;
816 int dptr = *ptr;
817 SDEBUG(" dst: ");
818 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
819 SDEBUG(" src: ");
820 src = atom_get_src(ctx, attr, ptr);
821 dst |= src;
822 SDEBUG(" dst: ");
823 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
824 }
825
atom_op_postcard(atom_exec_context * ctx,int * ptr,int arg)826 static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
827 {
828 uint8_t val = U8((*ptr)++);
829 SDEBUG("POST card output: 0x%02X\n", val);
830 }
831
atom_op_repeat(atom_exec_context * ctx,int * ptr,int arg)832 static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
833 {
834 pr_info("unimplemented!\n");
835 }
836
atom_op_restorereg(atom_exec_context * ctx,int * ptr,int arg)837 static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
838 {
839 pr_info("unimplemented!\n");
840 }
841
atom_op_savereg(atom_exec_context * ctx,int * ptr,int arg)842 static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
843 {
844 pr_info("unimplemented!\n");
845 }
846
atom_op_setdatablock(atom_exec_context * ctx,int * ptr,int arg)847 static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
848 {
849 int idx = U8(*ptr);
850 (*ptr)++;
851 SDEBUG(" block: %d\n", idx);
852 if (!idx)
853 ctx->ctx->data_block = 0;
854 else if (idx == 255)
855 ctx->ctx->data_block = ctx->start;
856 else
857 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
858 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
859 }
860
atom_op_setfbbase(atom_exec_context * ctx,int * ptr,int arg)861 static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
862 {
863 uint8_t attr = U8((*ptr)++);
864 SDEBUG(" fb_base: ");
865 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
866 }
867
atom_op_setport(atom_exec_context * ctx,int * ptr,int arg)868 static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
869 {
870 int port;
871 switch (arg) {
872 case ATOM_PORT_ATI:
873 port = U16(*ptr);
874 if (port < ATOM_IO_NAMES_CNT)
875 SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
876 else
877 SDEBUG(" port: %d\n", port);
878 if (!port)
879 ctx->ctx->io_mode = ATOM_IO_MM;
880 else
881 ctx->ctx->io_mode = ATOM_IO_IIO | port;
882 (*ptr) += 2;
883 break;
884 case ATOM_PORT_PCI:
885 ctx->ctx->io_mode = ATOM_IO_PCI;
886 (*ptr)++;
887 break;
888 case ATOM_PORT_SYSIO:
889 ctx->ctx->io_mode = ATOM_IO_SYSIO;
890 (*ptr)++;
891 break;
892 }
893 }
894
atom_op_setregblock(atom_exec_context * ctx,int * ptr,int arg)895 static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
896 {
897 ctx->ctx->reg_block = U16(*ptr);
898 (*ptr) += 2;
899 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
900 }
901
atom_op_shift_left(atom_exec_context * ctx,int * ptr,int arg)902 static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
903 {
904 uint8_t attr = U8((*ptr)++), shift;
905 uint32_t saved, dst;
906 int dptr = *ptr;
907 attr &= 0x38;
908 attr |= atom_def_dst[attr >> 3] << 6;
909 SDEBUG(" dst: ");
910 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
911 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
912 SDEBUG(" shift: %d\n", shift);
913 dst <<= shift;
914 SDEBUG(" dst: ");
915 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
916 }
917
atom_op_shift_right(atom_exec_context * ctx,int * ptr,int arg)918 static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
919 {
920 uint8_t attr = U8((*ptr)++), shift;
921 uint32_t saved, dst;
922 int dptr = *ptr;
923 attr &= 0x38;
924 attr |= atom_def_dst[attr >> 3] << 6;
925 SDEBUG(" dst: ");
926 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
927 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
928 SDEBUG(" shift: %d\n", shift);
929 dst >>= shift;
930 SDEBUG(" dst: ");
931 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
932 }
933
atom_op_shl(atom_exec_context * ctx,int * ptr,int arg)934 static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
935 {
936 uint8_t attr = U8((*ptr)++), shift;
937 uint32_t saved, dst;
938 int dptr = *ptr;
939 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
940 SDEBUG(" dst: ");
941 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
942 /* op needs to full dst value */
943 dst = saved;
944 shift = atom_get_src(ctx, attr, ptr);
945 SDEBUG(" shift: %d\n", shift);
946 dst <<= shift;
947 dst &= atom_arg_mask[dst_align];
948 dst >>= atom_arg_shift[dst_align];
949 SDEBUG(" dst: ");
950 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
951 }
952
atom_op_shr(atom_exec_context * ctx,int * ptr,int arg)953 static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
954 {
955 uint8_t attr = U8((*ptr)++), shift;
956 uint32_t saved, dst;
957 int dptr = *ptr;
958 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
959 SDEBUG(" dst: ");
960 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
961 /* op needs to full dst value */
962 dst = saved;
963 shift = atom_get_src(ctx, attr, ptr);
964 SDEBUG(" shift: %d\n", shift);
965 dst >>= shift;
966 dst &= atom_arg_mask[dst_align];
967 dst >>= atom_arg_shift[dst_align];
968 SDEBUG(" dst: ");
969 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
970 }
971
atom_op_sub(atom_exec_context * ctx,int * ptr,int arg)972 static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
973 {
974 uint8_t attr = U8((*ptr)++);
975 uint32_t dst, src, saved;
976 int dptr = *ptr;
977 SDEBUG(" dst: ");
978 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
979 SDEBUG(" src: ");
980 src = atom_get_src(ctx, attr, ptr);
981 dst -= src;
982 SDEBUG(" dst: ");
983 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
984 }
985
atom_op_switch(atom_exec_context * ctx,int * ptr,int arg)986 static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
987 {
988 uint8_t attr = U8((*ptr)++);
989 uint32_t src, val, target;
990 SDEBUG(" switch: ");
991 src = atom_get_src(ctx, attr, ptr);
992 while (U16(*ptr) != ATOM_CASE_END)
993 if (U8(*ptr) == ATOM_CASE_MAGIC) {
994 (*ptr)++;
995 SDEBUG(" case: ");
996 val =
997 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
998 ptr);
999 target = U16(*ptr);
1000 if (val == src) {
1001 SDEBUG(" target: %04X\n", target);
1002 *ptr = ctx->start + target;
1003 return;
1004 }
1005 (*ptr) += 2;
1006 } else {
1007 pr_info("Bad case\n");
1008 return;
1009 }
1010 (*ptr) += 2;
1011 }
1012
atom_op_test(atom_exec_context * ctx,int * ptr,int arg)1013 static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
1014 {
1015 uint8_t attr = U8((*ptr)++);
1016 uint32_t dst, src;
1017 SDEBUG(" src1: ");
1018 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
1019 SDEBUG(" src2: ");
1020 src = atom_get_src(ctx, attr, ptr);
1021 ctx->ctx->cs_equal = ((dst & src) == 0);
1022 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
1023 }
1024
atom_op_xor(atom_exec_context * ctx,int * ptr,int arg)1025 static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1026 {
1027 uint8_t attr = U8((*ptr)++);
1028 uint32_t dst, src, saved;
1029 int dptr = *ptr;
1030 SDEBUG(" dst: ");
1031 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1032 SDEBUG(" src: ");
1033 src = atom_get_src(ctx, attr, ptr);
1034 dst ^= src;
1035 SDEBUG(" dst: ");
1036 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1037 }
1038
atom_op_debug(atom_exec_context * ctx,int * ptr,int arg)1039 static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1040 {
1041 pr_info("unimplemented!\n");
1042 }
1043
1044 static struct {
1045 void (*func) (atom_exec_context *, int *, int);
1046 int arg;
1047 } opcode_table[ATOM_OP_CNT] = {
1048 {
1049 NULL, 0}, {
1050 atom_op_move, ATOM_ARG_REG}, {
1051 atom_op_move, ATOM_ARG_PS}, {
1052 atom_op_move, ATOM_ARG_WS}, {
1053 atom_op_move, ATOM_ARG_FB}, {
1054 atom_op_move, ATOM_ARG_PLL}, {
1055 atom_op_move, ATOM_ARG_MC}, {
1056 atom_op_and, ATOM_ARG_REG}, {
1057 atom_op_and, ATOM_ARG_PS}, {
1058 atom_op_and, ATOM_ARG_WS}, {
1059 atom_op_and, ATOM_ARG_FB}, {
1060 atom_op_and, ATOM_ARG_PLL}, {
1061 atom_op_and, ATOM_ARG_MC}, {
1062 atom_op_or, ATOM_ARG_REG}, {
1063 atom_op_or, ATOM_ARG_PS}, {
1064 atom_op_or, ATOM_ARG_WS}, {
1065 atom_op_or, ATOM_ARG_FB}, {
1066 atom_op_or, ATOM_ARG_PLL}, {
1067 atom_op_or, ATOM_ARG_MC}, {
1068 atom_op_shift_left, ATOM_ARG_REG}, {
1069 atom_op_shift_left, ATOM_ARG_PS}, {
1070 atom_op_shift_left, ATOM_ARG_WS}, {
1071 atom_op_shift_left, ATOM_ARG_FB}, {
1072 atom_op_shift_left, ATOM_ARG_PLL}, {
1073 atom_op_shift_left, ATOM_ARG_MC}, {
1074 atom_op_shift_right, ATOM_ARG_REG}, {
1075 atom_op_shift_right, ATOM_ARG_PS}, {
1076 atom_op_shift_right, ATOM_ARG_WS}, {
1077 atom_op_shift_right, ATOM_ARG_FB}, {
1078 atom_op_shift_right, ATOM_ARG_PLL}, {
1079 atom_op_shift_right, ATOM_ARG_MC}, {
1080 atom_op_mul, ATOM_ARG_REG}, {
1081 atom_op_mul, ATOM_ARG_PS}, {
1082 atom_op_mul, ATOM_ARG_WS}, {
1083 atom_op_mul, ATOM_ARG_FB}, {
1084 atom_op_mul, ATOM_ARG_PLL}, {
1085 atom_op_mul, ATOM_ARG_MC}, {
1086 atom_op_div, ATOM_ARG_REG}, {
1087 atom_op_div, ATOM_ARG_PS}, {
1088 atom_op_div, ATOM_ARG_WS}, {
1089 atom_op_div, ATOM_ARG_FB}, {
1090 atom_op_div, ATOM_ARG_PLL}, {
1091 atom_op_div, ATOM_ARG_MC}, {
1092 atom_op_add, ATOM_ARG_REG}, {
1093 atom_op_add, ATOM_ARG_PS}, {
1094 atom_op_add, ATOM_ARG_WS}, {
1095 atom_op_add, ATOM_ARG_FB}, {
1096 atom_op_add, ATOM_ARG_PLL}, {
1097 atom_op_add, ATOM_ARG_MC}, {
1098 atom_op_sub, ATOM_ARG_REG}, {
1099 atom_op_sub, ATOM_ARG_PS}, {
1100 atom_op_sub, ATOM_ARG_WS}, {
1101 atom_op_sub, ATOM_ARG_FB}, {
1102 atom_op_sub, ATOM_ARG_PLL}, {
1103 atom_op_sub, ATOM_ARG_MC}, {
1104 atom_op_setport, ATOM_PORT_ATI}, {
1105 atom_op_setport, ATOM_PORT_PCI}, {
1106 atom_op_setport, ATOM_PORT_SYSIO}, {
1107 atom_op_setregblock, 0}, {
1108 atom_op_setfbbase, 0}, {
1109 atom_op_compare, ATOM_ARG_REG}, {
1110 atom_op_compare, ATOM_ARG_PS}, {
1111 atom_op_compare, ATOM_ARG_WS}, {
1112 atom_op_compare, ATOM_ARG_FB}, {
1113 atom_op_compare, ATOM_ARG_PLL}, {
1114 atom_op_compare, ATOM_ARG_MC}, {
1115 atom_op_switch, 0}, {
1116 atom_op_jump, ATOM_COND_ALWAYS}, {
1117 atom_op_jump, ATOM_COND_EQUAL}, {
1118 atom_op_jump, ATOM_COND_BELOW}, {
1119 atom_op_jump, ATOM_COND_ABOVE}, {
1120 atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
1121 atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
1122 atom_op_jump, ATOM_COND_NOTEQUAL}, {
1123 atom_op_test, ATOM_ARG_REG}, {
1124 atom_op_test, ATOM_ARG_PS}, {
1125 atom_op_test, ATOM_ARG_WS}, {
1126 atom_op_test, ATOM_ARG_FB}, {
1127 atom_op_test, ATOM_ARG_PLL}, {
1128 atom_op_test, ATOM_ARG_MC}, {
1129 atom_op_delay, ATOM_UNIT_MILLISEC}, {
1130 atom_op_delay, ATOM_UNIT_MICROSEC}, {
1131 atom_op_calltable, 0}, {
1132 atom_op_repeat, 0}, {
1133 atom_op_clear, ATOM_ARG_REG}, {
1134 atom_op_clear, ATOM_ARG_PS}, {
1135 atom_op_clear, ATOM_ARG_WS}, {
1136 atom_op_clear, ATOM_ARG_FB}, {
1137 atom_op_clear, ATOM_ARG_PLL}, {
1138 atom_op_clear, ATOM_ARG_MC}, {
1139 atom_op_nop, 0}, {
1140 atom_op_eot, 0}, {
1141 atom_op_mask, ATOM_ARG_REG}, {
1142 atom_op_mask, ATOM_ARG_PS}, {
1143 atom_op_mask, ATOM_ARG_WS}, {
1144 atom_op_mask, ATOM_ARG_FB}, {
1145 atom_op_mask, ATOM_ARG_PLL}, {
1146 atom_op_mask, ATOM_ARG_MC}, {
1147 atom_op_postcard, 0}, {
1148 atom_op_beep, 0}, {
1149 atom_op_savereg, 0}, {
1150 atom_op_restorereg, 0}, {
1151 atom_op_setdatablock, 0}, {
1152 atom_op_xor, ATOM_ARG_REG}, {
1153 atom_op_xor, ATOM_ARG_PS}, {
1154 atom_op_xor, ATOM_ARG_WS}, {
1155 atom_op_xor, ATOM_ARG_FB}, {
1156 atom_op_xor, ATOM_ARG_PLL}, {
1157 atom_op_xor, ATOM_ARG_MC}, {
1158 atom_op_shl, ATOM_ARG_REG}, {
1159 atom_op_shl, ATOM_ARG_PS}, {
1160 atom_op_shl, ATOM_ARG_WS}, {
1161 atom_op_shl, ATOM_ARG_FB}, {
1162 atom_op_shl, ATOM_ARG_PLL}, {
1163 atom_op_shl, ATOM_ARG_MC}, {
1164 atom_op_shr, ATOM_ARG_REG}, {
1165 atom_op_shr, ATOM_ARG_PS}, {
1166 atom_op_shr, ATOM_ARG_WS}, {
1167 atom_op_shr, ATOM_ARG_FB}, {
1168 atom_op_shr, ATOM_ARG_PLL}, {
1169 atom_op_shr, ATOM_ARG_MC}, {
1170 atom_op_debug, 0},};
1171
atom_execute_table_locked(struct atom_context * ctx,int index,uint32_t * params)1172 static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
1173 {
1174 int base = CU16(ctx->cmd_table + 4 + 2 * index);
1175 int len, ws, ps, ptr;
1176 unsigned char op;
1177 atom_exec_context ectx;
1178 int ret = 0;
1179
1180 if (!base)
1181 return -EINVAL;
1182
1183 len = CU16(base + ATOM_CT_SIZE_PTR);
1184 ws = CU8(base + ATOM_CT_WS_PTR);
1185 ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
1186 ptr = base + ATOM_CT_CODE_PTR;
1187
1188 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1189
1190 ectx.ctx = ctx;
1191 ectx.ps_shift = ps / 4;
1192 ectx.start = base;
1193 ectx.ps = params;
1194 ectx.abort = false;
1195 ectx.last_jump = 0;
1196 if (ws)
1197 ectx.ws = kcalloc(4, ws, GFP_KERNEL);
1198 else
1199 ectx.ws = NULL;
1200
1201 debug_depth++;
1202 while (1) {
1203 op = CU8(ptr++);
1204 if (op < ATOM_OP_NAMES_CNT)
1205 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1206 else
1207 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1208 if (ectx.abort) {
1209 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1210 base, len, ws, ps, ptr - 1);
1211 ret = -EINVAL;
1212 goto free;
1213 }
1214
1215 if (op < ATOM_OP_CNT && op > 0)
1216 opcode_table[op].func(&ectx, &ptr,
1217 opcode_table[op].arg);
1218 else
1219 break;
1220
1221 if (op == ATOM_OP_EOT)
1222 break;
1223 }
1224 debug_depth--;
1225 SDEBUG("<<\n");
1226
1227 free:
1228 if (ws)
1229 kfree(ectx.ws);
1230 return ret;
1231 }
1232
atom_execute_table_scratch_unlocked(struct atom_context * ctx,int index,uint32_t * params)1233 int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params)
1234 {
1235 int r;
1236
1237 mutex_lock(&ctx->mutex);
1238 /* reset data block */
1239 ctx->data_block = 0;
1240 /* reset reg block */
1241 ctx->reg_block = 0;
1242 /* reset fb window */
1243 ctx->fb_base = 0;
1244 /* reset io mode */
1245 ctx->io_mode = ATOM_IO_MM;
1246 /* reset divmul */
1247 ctx->divmul[0] = 0;
1248 ctx->divmul[1] = 0;
1249 r = atom_execute_table_locked(ctx, index, params);
1250 mutex_unlock(&ctx->mutex);
1251 return r;
1252 }
1253
atom_execute_table(struct atom_context * ctx,int index,uint32_t * params)1254 int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1255 {
1256 int r;
1257 mutex_lock(&ctx->scratch_mutex);
1258 r = atom_execute_table_scratch_unlocked(ctx, index, params);
1259 mutex_unlock(&ctx->scratch_mutex);
1260 return r;
1261 }
1262
1263 static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1264
atom_index_iio(struct atom_context * ctx,int base)1265 static void atom_index_iio(struct atom_context *ctx, int base)
1266 {
1267 ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1268 if (!ctx->iio)
1269 return;
1270 while (CU8(base) == ATOM_IIO_START) {
1271 ctx->iio[CU8(base + 1)] = base + 2;
1272 base += 2;
1273 while (CU8(base) != ATOM_IIO_END)
1274 base += atom_iio_len[CU8(base)];
1275 base += 3;
1276 }
1277 }
1278
atom_parse(struct card_info * card,void * bios)1279 struct atom_context *atom_parse(struct card_info *card, void *bios)
1280 {
1281 int base;
1282 struct atom_context *ctx =
1283 kzalloc(sizeof(struct atom_context), GFP_KERNEL);
1284 char *str;
1285 char name[512];
1286 int i;
1287
1288 if (!ctx)
1289 return NULL;
1290
1291 ctx->card = card;
1292 ctx->bios = bios;
1293
1294 if (CU16(0) != ATOM_BIOS_MAGIC) {
1295 pr_info("Invalid BIOS magic\n");
1296 kfree(ctx);
1297 return NULL;
1298 }
1299 if (strncmp
1300 (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
1301 strlen(ATOM_ATI_MAGIC))) {
1302 pr_info("Invalid ATI magic\n");
1303 kfree(ctx);
1304 return NULL;
1305 }
1306
1307 base = CU16(ATOM_ROM_TABLE_PTR);
1308 if (strncmp
1309 (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
1310 strlen(ATOM_ROM_MAGIC))) {
1311 pr_info("Invalid ATOM magic\n");
1312 kfree(ctx);
1313 return NULL;
1314 }
1315
1316 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1317 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1318 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1319 if (!ctx->iio) {
1320 atom_destroy(ctx);
1321 return NULL;
1322 }
1323
1324 str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
1325 while (*str && ((*str == '\n') || (*str == '\r')))
1326 str++;
1327 /* name string isn't always 0 terminated */
1328 for (i = 0; i < 511; i++) {
1329 name[i] = str[i];
1330 if (name[i] < '.' || name[i] > 'z') {
1331 name[i] = 0;
1332 break;
1333 }
1334 }
1335 pr_info("ATOM BIOS: %s\n", name);
1336
1337 return ctx;
1338 }
1339
atom_asic_init(struct atom_context * ctx)1340 int atom_asic_init(struct atom_context *ctx)
1341 {
1342 struct radeon_device *rdev = ctx->card->dev->dev_private;
1343 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1344 uint32_t ps[16];
1345 int ret;
1346
1347 memset(ps, 0, 64);
1348
1349 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1350 ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
1351 if (!ps[0] || !ps[1])
1352 return 1;
1353
1354 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1355 return 1;
1356 ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1357 if (ret)
1358 return ret;
1359
1360 memset(ps, 0, 64);
1361
1362 if (rdev->family < CHIP_R600) {
1363 if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
1364 atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
1365 }
1366 return ret;
1367 }
1368
atom_destroy(struct atom_context * ctx)1369 void atom_destroy(struct atom_context *ctx)
1370 {
1371 kfree(ctx->iio);
1372 kfree(ctx);
1373 }
1374
atom_parse_data_header(struct atom_context * ctx,int index,uint16_t * size,uint8_t * frev,uint8_t * crev,uint16_t * data_start)1375 bool atom_parse_data_header(struct atom_context *ctx, int index,
1376 uint16_t * size, uint8_t * frev, uint8_t * crev,
1377 uint16_t * data_start)
1378 {
1379 int offset = index * 2 + 4;
1380 int idx = CU16(ctx->data_table + offset);
1381 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1382
1383 if (!mdt[index])
1384 return false;
1385
1386 if (size)
1387 *size = CU16(idx);
1388 if (frev)
1389 *frev = CU8(idx + 2);
1390 if (crev)
1391 *crev = CU8(idx + 3);
1392 *data_start = idx;
1393 return true;
1394 }
1395
atom_parse_cmd_header(struct atom_context * ctx,int index,uint8_t * frev,uint8_t * crev)1396 bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
1397 uint8_t * crev)
1398 {
1399 int offset = index * 2 + 4;
1400 int idx = CU16(ctx->cmd_table + offset);
1401 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1402
1403 if (!mct[index])
1404 return false;
1405
1406 if (frev)
1407 *frev = CU8(idx + 2);
1408 if (crev)
1409 *crev = CU8(idx + 3);
1410 return true;
1411 }
1412
atom_allocate_fb_scratch(struct atom_context * ctx)1413 int atom_allocate_fb_scratch(struct atom_context *ctx)
1414 {
1415 int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
1416 uint16_t data_offset;
1417 int usage_bytes = 0;
1418 struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
1419
1420 if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
1421 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
1422
1423 DRM_DEBUG("atom firmware requested %08x %dkb\n",
1424 le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
1425 le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
1426
1427 usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
1428 }
1429 ctx->scratch_size_bytes = 0;
1430 if (usage_bytes == 0)
1431 usage_bytes = 20 * 1024;
1432 /* allocate some scratch memory */
1433 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
1434 if (!ctx->scratch)
1435 return -ENOMEM;
1436 ctx->scratch_size_bytes = usage_bytes;
1437 return 0;
1438 }
1439