1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Author: Stanislaw Skowronek |
23 | */ |
24 | |
25 | #include <linux/module.h> |
26 | #include <linux/sched.h> |
27 | #include <linux/slab.h> |
28 | #include <asm/unaligned.h> |
29 | |
30 | #define ATOM_DEBUG |
31 | |
32 | #include "atom.h" |
33 | #include "atom-names.h" |
34 | #include "atom-bits.h" |
35 | #include "radeon.h" |
36 | |
37 | #define ATOM_COND_ABOVE 0 |
38 | #define ATOM_COND_ABOVEOREQUAL 1 |
39 | #define ATOM_COND_ALWAYS 2 |
40 | #define ATOM_COND_BELOW 3 |
41 | #define ATOM_COND_BELOWOREQUAL 4 |
42 | #define ATOM_COND_EQUAL 5 |
43 | #define ATOM_COND_NOTEQUAL 6 |
44 | |
45 | #define ATOM_PORT_ATI 0 |
46 | #define ATOM_PORT_PCI 1 |
47 | #define ATOM_PORT_SYSIO 2 |
48 | |
49 | #define ATOM_UNIT_MICROSEC 0 |
50 | #define ATOM_UNIT_MILLISEC 1 |
51 | |
52 | #define PLL_INDEX 2 |
53 | #define PLL_DATA 3 |
54 | |
55 | typedef struct { |
56 | struct atom_context *ctx; |
57 | uint32_t *ps, *ws; |
58 | int ps_shift; |
59 | uint16_t start; |
60 | unsigned last_jump; |
61 | unsigned long last_jump_jiffies; |
62 | bool abort; |
63 | } atom_exec_context; |
64 | |
65 | int atom_debug = 0; |
66 | static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); |
67 | int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); |
68 | |
69 | static uint32_t atom_arg_mask[8] = |
70 | { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, |
71 | 0xFF000000 }; |
72 | static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 }; |
73 | |
74 | static int atom_dst_to_src[8][4] = { |
75 | /* translate destination alignment field to the source alignment encoding */ |
76 | {0, 0, 0, 0}, |
77 | {1, 2, 3, 0}, |
78 | {1, 2, 3, 0}, |
79 | {1, 2, 3, 0}, |
80 | {4, 5, 6, 7}, |
81 | {4, 5, 6, 7}, |
82 | {4, 5, 6, 7}, |
83 | {4, 5, 6, 7}, |
84 | }; |
85 | static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 }; |
86 | |
87 | static int debug_depth = 0; |
88 | #ifdef ATOM_DEBUG |
89 | static void debug_print_spaces(int n) |
90 | { |
91 | while (n--) |
92 | printk(" " ); |
93 | } |
94 | |
95 | #ifdef __NetBSD__ /* XXX */ |
96 | /* |
97 | * Kludge: NetBSD defines DEBUG to mean debugging is enabled. Since |
98 | * we're not going to include any more header files, it's OK for it to |
99 | * be defined unconditionally after this. |
100 | */ |
101 | #undef DEBUG |
102 | #endif |
103 | |
104 | #define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0) |
105 | #define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0) |
106 | #else |
107 | #define DEBUG(...) do { } while (0) |
108 | #define SDEBUG(...) do { } while (0) |
109 | #endif |
110 | |
111 | static uint32_t atom_iio_execute(struct atom_context *ctx, int base, |
112 | uint32_t index, uint32_t data) |
113 | { |
114 | struct radeon_device *rdev = ctx->card->dev->dev_private; |
115 | uint32_t temp = 0xCDCDCDCD; |
116 | |
117 | while (1) |
118 | switch (CU8(base)) { |
119 | case ATOM_IIO_NOP: |
120 | base++; |
121 | break; |
122 | case ATOM_IIO_READ: |
123 | temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1)); |
124 | base += 3; |
125 | break; |
126 | case ATOM_IIO_WRITE: |
127 | if (rdev->family == CHIP_RV515) |
128 | (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); |
129 | ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); |
130 | base += 3; |
131 | break; |
132 | case ATOM_IIO_CLEAR: |
133 | temp &= |
134 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << |
135 | CU8(base + 2)); |
136 | base += 3; |
137 | break; |
138 | case ATOM_IIO_SET: |
139 | temp |= |
140 | (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + |
141 | 2); |
142 | base += 3; |
143 | break; |
144 | case ATOM_IIO_MOVE_INDEX: |
145 | temp &= |
146 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << |
147 | CU8(base + 3)); |
148 | temp |= |
149 | ((index >> CU8(base + 2)) & |
150 | (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + |
151 | 3); |
152 | base += 4; |
153 | break; |
154 | case ATOM_IIO_MOVE_DATA: |
155 | temp &= |
156 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << |
157 | CU8(base + 3)); |
158 | temp |= |
159 | ((data >> CU8(base + 2)) & |
160 | (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + |
161 | 3); |
162 | base += 4; |
163 | break; |
164 | case ATOM_IIO_MOVE_ATTR: |
165 | temp &= |
166 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << |
167 | CU8(base + 3)); |
168 | temp |= |
169 | ((ctx-> |
170 | io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - |
171 | CU8 |
172 | (base |
173 | + |
174 | 1)))) |
175 | << CU8(base + 3); |
176 | base += 4; |
177 | break; |
178 | case ATOM_IIO_END: |
179 | return temp; |
180 | default: |
181 | printk(KERN_INFO "Unknown IIO opcode.\n" ); |
182 | return 0; |
183 | } |
184 | } |
185 | |
186 | static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, |
187 | int *ptr, uint32_t *saved, int print) |
188 | { |
189 | uint32_t idx, val = 0xCDCDCDCD, align, arg; |
190 | struct atom_context *gctx = ctx->ctx; |
191 | arg = attr & 7; |
192 | align = (attr >> 3) & 7; |
193 | switch (arg) { |
194 | case ATOM_ARG_REG: |
195 | idx = U16(*ptr); |
196 | (*ptr) += 2; |
197 | if (print) |
198 | DEBUG("REG[0x%04X]" , idx); |
199 | idx += gctx->reg_block; |
200 | switch (gctx->io_mode) { |
201 | case ATOM_IO_MM: |
202 | val = gctx->card->reg_read(gctx->card, idx); |
203 | break; |
204 | case ATOM_IO_PCI: |
205 | printk(KERN_INFO |
206 | "PCI registers are not implemented.\n" ); |
207 | return 0; |
208 | case ATOM_IO_SYSIO: |
209 | printk(KERN_INFO |
210 | "SYSIO registers are not implemented.\n" ); |
211 | return 0; |
212 | default: |
213 | if (!(gctx->io_mode & 0x80)) { |
214 | printk(KERN_INFO "Bad IO mode.\n" ); |
215 | return 0; |
216 | } |
217 | if (!gctx->iio[gctx->io_mode & 0x7F]) { |
218 | printk(KERN_INFO |
219 | "Undefined indirect IO read method %d.\n" , |
220 | gctx->io_mode & 0x7F); |
221 | return 0; |
222 | } |
223 | val = |
224 | atom_iio_execute(gctx, |
225 | gctx->iio[gctx->io_mode & 0x7F], |
226 | idx, 0); |
227 | } |
228 | break; |
229 | case ATOM_ARG_PS: |
230 | idx = U8(*ptr); |
231 | (*ptr)++; |
232 | /* get_unaligned_le32 avoids unaligned accesses from atombios |
233 | * tables, noticed on a DEC Alpha. */ |
234 | val = get_unaligned_le32((u32 *)&ctx->ps[idx]); |
235 | if (print) |
236 | DEBUG("PS[0x%02X,0x%04X]" , idx, val); |
237 | break; |
238 | case ATOM_ARG_WS: |
239 | idx = U8(*ptr); |
240 | (*ptr)++; |
241 | if (print) |
242 | DEBUG("WS[0x%02X]" , idx); |
243 | switch (idx) { |
244 | case ATOM_WS_QUOTIENT: |
245 | val = gctx->divmul[0]; |
246 | break; |
247 | case ATOM_WS_REMAINDER: |
248 | val = gctx->divmul[1]; |
249 | break; |
250 | case ATOM_WS_DATAPTR: |
251 | val = gctx->data_block; |
252 | break; |
253 | case ATOM_WS_SHIFT: |
254 | val = gctx->shift; |
255 | break; |
256 | case ATOM_WS_OR_MASK: |
257 | val = 1 << gctx->shift; |
258 | break; |
259 | case ATOM_WS_AND_MASK: |
260 | val = ~(1 << gctx->shift); |
261 | break; |
262 | case ATOM_WS_FB_WINDOW: |
263 | val = gctx->fb_base; |
264 | break; |
265 | case ATOM_WS_ATTRIBUTES: |
266 | val = gctx->io_attr; |
267 | break; |
268 | case ATOM_WS_REGPTR: |
269 | val = gctx->reg_block; |
270 | break; |
271 | default: |
272 | val = ctx->ws[idx]; |
273 | } |
274 | break; |
275 | case ATOM_ARG_ID: |
276 | idx = U16(*ptr); |
277 | (*ptr) += 2; |
278 | if (print) { |
279 | if (gctx->data_block) |
280 | DEBUG("ID[0x%04X+%04X]" , idx, gctx->data_block); |
281 | else |
282 | DEBUG("ID[0x%04X]" , idx); |
283 | } |
284 | val = U32(idx + gctx->data_block); |
285 | break; |
286 | case ATOM_ARG_FB: |
287 | idx = U8(*ptr); |
288 | (*ptr)++; |
289 | if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { |
290 | DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n" , |
291 | gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); |
292 | val = 0; |
293 | } else |
294 | val = gctx->scratch[(gctx->fb_base / 4) + idx]; |
295 | if (print) |
296 | DEBUG("FB[0x%02X]" , idx); |
297 | break; |
298 | case ATOM_ARG_IMM: |
299 | switch (align) { |
300 | case ATOM_SRC_DWORD: |
301 | val = U32(*ptr); |
302 | (*ptr) += 4; |
303 | if (print) |
304 | DEBUG("IMM 0x%08X\n" , val); |
305 | return val; |
306 | case ATOM_SRC_WORD0: |
307 | case ATOM_SRC_WORD8: |
308 | case ATOM_SRC_WORD16: |
309 | val = U16(*ptr); |
310 | (*ptr) += 2; |
311 | if (print) |
312 | DEBUG("IMM 0x%04X\n" , val); |
313 | return val; |
314 | case ATOM_SRC_BYTE0: |
315 | case ATOM_SRC_BYTE8: |
316 | case ATOM_SRC_BYTE16: |
317 | case ATOM_SRC_BYTE24: |
318 | val = U8(*ptr); |
319 | (*ptr)++; |
320 | if (print) |
321 | DEBUG("IMM 0x%02X\n" , val); |
322 | return val; |
323 | } |
324 | return 0; |
325 | case ATOM_ARG_PLL: |
326 | idx = U8(*ptr); |
327 | (*ptr)++; |
328 | if (print) |
329 | DEBUG("PLL[0x%02X]" , idx); |
330 | val = gctx->card->pll_read(gctx->card, idx); |
331 | break; |
332 | case ATOM_ARG_MC: |
333 | idx = U8(*ptr); |
334 | (*ptr)++; |
335 | if (print) |
336 | DEBUG("MC[0x%02X]" , idx); |
337 | val = gctx->card->mc_read(gctx->card, idx); |
338 | break; |
339 | } |
340 | if (saved) |
341 | *saved = val; |
342 | val &= atom_arg_mask[align]; |
343 | val >>= atom_arg_shift[align]; |
344 | if (print) |
345 | switch (align) { |
346 | case ATOM_SRC_DWORD: |
347 | DEBUG(".[31:0] -> 0x%08X\n" , val); |
348 | break; |
349 | case ATOM_SRC_WORD0: |
350 | DEBUG(".[15:0] -> 0x%04X\n" , val); |
351 | break; |
352 | case ATOM_SRC_WORD8: |
353 | DEBUG(".[23:8] -> 0x%04X\n" , val); |
354 | break; |
355 | case ATOM_SRC_WORD16: |
356 | DEBUG(".[31:16] -> 0x%04X\n" , val); |
357 | break; |
358 | case ATOM_SRC_BYTE0: |
359 | DEBUG(".[7:0] -> 0x%02X\n" , val); |
360 | break; |
361 | case ATOM_SRC_BYTE8: |
362 | DEBUG(".[15:8] -> 0x%02X\n" , val); |
363 | break; |
364 | case ATOM_SRC_BYTE16: |
365 | DEBUG(".[23:16] -> 0x%02X\n" , val); |
366 | break; |
367 | case ATOM_SRC_BYTE24: |
368 | DEBUG(".[31:24] -> 0x%02X\n" , val); |
369 | break; |
370 | } |
371 | return val; |
372 | } |
373 | |
374 | static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr) |
375 | { |
376 | uint32_t align = (attr >> 3) & 7, arg = attr & 7; |
377 | switch (arg) { |
378 | case ATOM_ARG_REG: |
379 | case ATOM_ARG_ID: |
380 | (*ptr) += 2; |
381 | break; |
382 | case ATOM_ARG_PLL: |
383 | case ATOM_ARG_MC: |
384 | case ATOM_ARG_PS: |
385 | case ATOM_ARG_WS: |
386 | case ATOM_ARG_FB: |
387 | (*ptr)++; |
388 | break; |
389 | case ATOM_ARG_IMM: |
390 | switch (align) { |
391 | case ATOM_SRC_DWORD: |
392 | (*ptr) += 4; |
393 | return; |
394 | case ATOM_SRC_WORD0: |
395 | case ATOM_SRC_WORD8: |
396 | case ATOM_SRC_WORD16: |
397 | (*ptr) += 2; |
398 | return; |
399 | case ATOM_SRC_BYTE0: |
400 | case ATOM_SRC_BYTE8: |
401 | case ATOM_SRC_BYTE16: |
402 | case ATOM_SRC_BYTE24: |
403 | (*ptr)++; |
404 | return; |
405 | } |
406 | return; |
407 | } |
408 | } |
409 | |
410 | static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr) |
411 | { |
412 | return atom_get_src_int(ctx, attr, ptr, NULL, 1); |
413 | } |
414 | |
415 | static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr) |
416 | { |
417 | uint32_t val = 0xCDCDCDCD; |
418 | |
419 | switch (align) { |
420 | case ATOM_SRC_DWORD: |
421 | val = U32(*ptr); |
422 | (*ptr) += 4; |
423 | break; |
424 | case ATOM_SRC_WORD0: |
425 | case ATOM_SRC_WORD8: |
426 | case ATOM_SRC_WORD16: |
427 | val = U16(*ptr); |
428 | (*ptr) += 2; |
429 | break; |
430 | case ATOM_SRC_BYTE0: |
431 | case ATOM_SRC_BYTE8: |
432 | case ATOM_SRC_BYTE16: |
433 | case ATOM_SRC_BYTE24: |
434 | val = U8(*ptr); |
435 | (*ptr)++; |
436 | break; |
437 | } |
438 | return val; |
439 | } |
440 | |
441 | static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, |
442 | int *ptr, uint32_t *saved, int print) |
443 | { |
444 | return atom_get_src_int(ctx, |
445 | arg | atom_dst_to_src[(attr >> 3) & |
446 | 7][(attr >> 6) & 3] << 3, |
447 | ptr, saved, print); |
448 | } |
449 | |
450 | static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr) |
451 | { |
452 | atom_skip_src_int(ctx, |
453 | arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & |
454 | 3] << 3, ptr); |
455 | } |
456 | |
457 | static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, |
458 | int *ptr, uint32_t val, uint32_t saved) |
459 | { |
460 | uint32_t align = |
461 | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val = |
462 | val, idx; |
463 | struct atom_context *gctx = ctx->ctx; |
464 | old_val &= atom_arg_mask[align] >> atom_arg_shift[align]; |
465 | val <<= atom_arg_shift[align]; |
466 | val &= atom_arg_mask[align]; |
467 | saved &= ~atom_arg_mask[align]; |
468 | val |= saved; |
469 | switch (arg) { |
470 | case ATOM_ARG_REG: |
471 | idx = U16(*ptr); |
472 | (*ptr) += 2; |
473 | DEBUG("REG[0x%04X]" , idx); |
474 | idx += gctx->reg_block; |
475 | switch (gctx->io_mode) { |
476 | case ATOM_IO_MM: |
477 | if (idx == 0) |
478 | gctx->card->reg_write(gctx->card, idx, |
479 | val << 2); |
480 | else |
481 | gctx->card->reg_write(gctx->card, idx, val); |
482 | break; |
483 | case ATOM_IO_PCI: |
484 | printk(KERN_INFO |
485 | "PCI registers are not implemented.\n" ); |
486 | return; |
487 | case ATOM_IO_SYSIO: |
488 | printk(KERN_INFO |
489 | "SYSIO registers are not implemented.\n" ); |
490 | return; |
491 | default: |
492 | if (!(gctx->io_mode & 0x80)) { |
493 | printk(KERN_INFO "Bad IO mode.\n" ); |
494 | return; |
495 | } |
496 | if (!gctx->iio[gctx->io_mode & 0xFF]) { |
497 | printk(KERN_INFO |
498 | "Undefined indirect IO write method %d.\n" , |
499 | gctx->io_mode & 0x7F); |
500 | return; |
501 | } |
502 | atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF], |
503 | idx, val); |
504 | } |
505 | break; |
506 | case ATOM_ARG_PS: |
507 | idx = U8(*ptr); |
508 | (*ptr)++; |
509 | DEBUG("PS[0x%02X]" , idx); |
510 | ctx->ps[idx] = cpu_to_le32(val); |
511 | break; |
512 | case ATOM_ARG_WS: |
513 | idx = U8(*ptr); |
514 | (*ptr)++; |
515 | DEBUG("WS[0x%02X]" , idx); |
516 | switch (idx) { |
517 | case ATOM_WS_QUOTIENT: |
518 | gctx->divmul[0] = val; |
519 | break; |
520 | case ATOM_WS_REMAINDER: |
521 | gctx->divmul[1] = val; |
522 | break; |
523 | case ATOM_WS_DATAPTR: |
524 | gctx->data_block = val; |
525 | break; |
526 | case ATOM_WS_SHIFT: |
527 | gctx->shift = val; |
528 | break; |
529 | case ATOM_WS_OR_MASK: |
530 | case ATOM_WS_AND_MASK: |
531 | break; |
532 | case ATOM_WS_FB_WINDOW: |
533 | gctx->fb_base = val; |
534 | break; |
535 | case ATOM_WS_ATTRIBUTES: |
536 | gctx->io_attr = val; |
537 | break; |
538 | case ATOM_WS_REGPTR: |
539 | gctx->reg_block = val; |
540 | break; |
541 | default: |
542 | ctx->ws[idx] = val; |
543 | } |
544 | break; |
545 | case ATOM_ARG_FB: |
546 | idx = U8(*ptr); |
547 | (*ptr)++; |
548 | if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { |
549 | DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n" , |
550 | gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); |
551 | } else |
552 | gctx->scratch[(gctx->fb_base / 4) + idx] = val; |
553 | DEBUG("FB[0x%02X]" , idx); |
554 | break; |
555 | case ATOM_ARG_PLL: |
556 | idx = U8(*ptr); |
557 | (*ptr)++; |
558 | DEBUG("PLL[0x%02X]" , idx); |
559 | gctx->card->pll_write(gctx->card, idx, val); |
560 | break; |
561 | case ATOM_ARG_MC: |
562 | idx = U8(*ptr); |
563 | (*ptr)++; |
564 | DEBUG("MC[0x%02X]" , idx); |
565 | gctx->card->mc_write(gctx->card, idx, val); |
566 | return; |
567 | } |
568 | switch (align) { |
569 | case ATOM_SRC_DWORD: |
570 | DEBUG(".[31:0] <- 0x%08X\n" , old_val); |
571 | break; |
572 | case ATOM_SRC_WORD0: |
573 | DEBUG(".[15:0] <- 0x%04X\n" , old_val); |
574 | break; |
575 | case ATOM_SRC_WORD8: |
576 | DEBUG(".[23:8] <- 0x%04X\n" , old_val); |
577 | break; |
578 | case ATOM_SRC_WORD16: |
579 | DEBUG(".[31:16] <- 0x%04X\n" , old_val); |
580 | break; |
581 | case ATOM_SRC_BYTE0: |
582 | DEBUG(".[7:0] <- 0x%02X\n" , old_val); |
583 | break; |
584 | case ATOM_SRC_BYTE8: |
585 | DEBUG(".[15:8] <- 0x%02X\n" , old_val); |
586 | break; |
587 | case ATOM_SRC_BYTE16: |
588 | DEBUG(".[23:16] <- 0x%02X\n" , old_val); |
589 | break; |
590 | case ATOM_SRC_BYTE24: |
591 | DEBUG(".[31:24] <- 0x%02X\n" , old_val); |
592 | break; |
593 | } |
594 | } |
595 | |
596 | static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg) |
597 | { |
598 | uint8_t attr = U8((*ptr)++); |
599 | uint32_t dst, src, saved; |
600 | int dptr = *ptr; |
601 | SDEBUG(" dst: " ); |
602 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
603 | SDEBUG(" src: " ); |
604 | src = atom_get_src(ctx, attr, ptr); |
605 | dst += src; |
606 | SDEBUG(" dst: " ); |
607 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
608 | } |
609 | |
610 | static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg) |
611 | { |
612 | uint8_t attr = U8((*ptr)++); |
613 | uint32_t dst, src, saved; |
614 | int dptr = *ptr; |
615 | SDEBUG(" dst: " ); |
616 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
617 | SDEBUG(" src: " ); |
618 | src = atom_get_src(ctx, attr, ptr); |
619 | dst &= src; |
620 | SDEBUG(" dst: " ); |
621 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
622 | } |
623 | |
624 | static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg) |
625 | { |
626 | printk("ATOM BIOS beeped!\n" ); |
627 | } |
628 | |
629 | static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) |
630 | { |
631 | int idx = U8((*ptr)++); |
632 | int r = 0; |
633 | |
634 | if (idx < ATOM_TABLE_NAMES_CNT) |
635 | SDEBUG(" table: %d (%s)\n" , idx, atom_table_names[idx]); |
636 | else |
637 | SDEBUG(" table: %d\n" , idx); |
638 | if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) |
639 | r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); |
640 | if (r) { |
641 | ctx->abort = true; |
642 | } |
643 | } |
644 | |
645 | static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) |
646 | { |
647 | uint8_t attr = U8((*ptr)++); |
648 | uint32_t saved; |
649 | int dptr = *ptr; |
650 | attr &= 0x38; |
651 | attr |= atom_def_dst[attr >> 3] << 6; |
652 | atom_get_dst(ctx, arg, attr, ptr, &saved, 0); |
653 | SDEBUG(" dst: " ); |
654 | atom_put_dst(ctx, arg, attr, &dptr, 0, saved); |
655 | } |
656 | |
657 | static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg) |
658 | { |
659 | uint8_t attr = U8((*ptr)++); |
660 | uint32_t dst, src; |
661 | SDEBUG(" src1: " ); |
662 | dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); |
663 | SDEBUG(" src2: " ); |
664 | src = atom_get_src(ctx, attr, ptr); |
665 | ctx->ctx->cs_equal = (dst == src); |
666 | ctx->ctx->cs_above = (dst > src); |
667 | SDEBUG(" result: %s %s\n" , ctx->ctx->cs_equal ? "EQ" : "NE" , |
668 | ctx->ctx->cs_above ? "GT" : "LE" ); |
669 | } |
670 | |
671 | static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) |
672 | { |
673 | unsigned count = U8((*ptr)++); |
674 | SDEBUG(" count: %d\n" , count); |
675 | if (arg == ATOM_UNIT_MICROSEC) |
676 | udelay(count); |
677 | else if (!drm_can_sleep()) |
678 | mdelay(count); |
679 | else |
680 | msleep(count); |
681 | } |
682 | |
683 | static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg) |
684 | { |
685 | uint8_t attr = U8((*ptr)++); |
686 | uint32_t dst, src; |
687 | SDEBUG(" src1: " ); |
688 | dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); |
689 | SDEBUG(" src2: " ); |
690 | src = atom_get_src(ctx, attr, ptr); |
691 | if (src != 0) { |
692 | ctx->ctx->divmul[0] = dst / src; |
693 | ctx->ctx->divmul[1] = dst % src; |
694 | } else { |
695 | ctx->ctx->divmul[0] = 0; |
696 | ctx->ctx->divmul[1] = 0; |
697 | } |
698 | } |
699 | |
700 | static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) |
701 | { |
702 | /* functionally, a nop */ |
703 | } |
704 | |
705 | static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) |
706 | { |
707 | int execute = 0, target = U16(*ptr); |
708 | unsigned long cjiffies; |
709 | |
710 | (*ptr) += 2; |
711 | switch (arg) { |
712 | case ATOM_COND_ABOVE: |
713 | execute = ctx->ctx->cs_above; |
714 | break; |
715 | case ATOM_COND_ABOVEOREQUAL: |
716 | execute = ctx->ctx->cs_above || ctx->ctx->cs_equal; |
717 | break; |
718 | case ATOM_COND_ALWAYS: |
719 | execute = 1; |
720 | break; |
721 | case ATOM_COND_BELOW: |
722 | execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal); |
723 | break; |
724 | case ATOM_COND_BELOWOREQUAL: |
725 | execute = !ctx->ctx->cs_above; |
726 | break; |
727 | case ATOM_COND_EQUAL: |
728 | execute = ctx->ctx->cs_equal; |
729 | break; |
730 | case ATOM_COND_NOTEQUAL: |
731 | execute = !ctx->ctx->cs_equal; |
732 | break; |
733 | } |
734 | if (arg != ATOM_COND_ALWAYS) |
735 | SDEBUG(" taken: %s\n" , execute ? "yes" : "no" ); |
736 | SDEBUG(" target: 0x%04X\n" , target); |
737 | if (execute) { |
738 | if (ctx->last_jump == (ctx->start + target)) { |
739 | cjiffies = jiffies; |
740 | if (time_after(cjiffies, ctx->last_jump_jiffies)) { |
741 | cjiffies -= ctx->last_jump_jiffies; |
742 | if ((jiffies_to_msecs(cjiffies) > 5000)) { |
743 | DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n" ); |
744 | ctx->abort = true; |
745 | } |
746 | } else { |
747 | /* jiffies wrap around we will just wait a little longer */ |
748 | ctx->last_jump_jiffies = jiffies; |
749 | } |
750 | } else { |
751 | ctx->last_jump = ctx->start + target; |
752 | ctx->last_jump_jiffies = jiffies; |
753 | } |
754 | *ptr = ctx->start + target; |
755 | } |
756 | } |
757 | |
758 | static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) |
759 | { |
760 | uint8_t attr = U8((*ptr)++); |
761 | uint32_t dst, mask, src, saved; |
762 | int dptr = *ptr; |
763 | SDEBUG(" dst: " ); |
764 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
765 | mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); |
766 | SDEBUG(" mask: 0x%08x" , mask); |
767 | SDEBUG(" src: " ); |
768 | src = atom_get_src(ctx, attr, ptr); |
769 | dst &= mask; |
770 | dst |= src; |
771 | SDEBUG(" dst: " ); |
772 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
773 | } |
774 | |
775 | static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg) |
776 | { |
777 | uint8_t attr = U8((*ptr)++); |
778 | uint32_t src, saved; |
779 | int dptr = *ptr; |
780 | if (((attr >> 3) & 7) != ATOM_SRC_DWORD) |
781 | atom_get_dst(ctx, arg, attr, ptr, &saved, 0); |
782 | else { |
783 | atom_skip_dst(ctx, arg, attr, ptr); |
784 | saved = 0xCDCDCDCD; |
785 | } |
786 | SDEBUG(" src: " ); |
787 | src = atom_get_src(ctx, attr, ptr); |
788 | SDEBUG(" dst: " ); |
789 | atom_put_dst(ctx, arg, attr, &dptr, src, saved); |
790 | } |
791 | |
792 | static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg) |
793 | { |
794 | uint8_t attr = U8((*ptr)++); |
795 | uint32_t dst, src; |
796 | SDEBUG(" src1: " ); |
797 | dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); |
798 | SDEBUG(" src2: " ); |
799 | src = atom_get_src(ctx, attr, ptr); |
800 | ctx->ctx->divmul[0] = dst * src; |
801 | } |
802 | |
803 | static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg) |
804 | { |
805 | /* nothing */ |
806 | } |
807 | |
808 | static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg) |
809 | { |
810 | uint8_t attr = U8((*ptr)++); |
811 | uint32_t dst, src, saved; |
812 | int dptr = *ptr; |
813 | SDEBUG(" dst: " ); |
814 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
815 | SDEBUG(" src: " ); |
816 | src = atom_get_src(ctx, attr, ptr); |
817 | dst |= src; |
818 | SDEBUG(" dst: " ); |
819 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
820 | } |
821 | |
822 | static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg) |
823 | { |
824 | uint8_t val = U8((*ptr)++); |
825 | SDEBUG("POST card output: 0x%02X\n" , val); |
826 | } |
827 | |
828 | static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg) |
829 | { |
830 | printk(KERN_INFO "unimplemented!\n" ); |
831 | } |
832 | |
833 | static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg) |
834 | { |
835 | printk(KERN_INFO "unimplemented!\n" ); |
836 | } |
837 | |
838 | static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg) |
839 | { |
840 | printk(KERN_INFO "unimplemented!\n" ); |
841 | } |
842 | |
843 | static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg) |
844 | { |
845 | int idx = U8(*ptr); |
846 | (*ptr)++; |
847 | SDEBUG(" block: %d\n" , idx); |
848 | if (!idx) |
849 | ctx->ctx->data_block = 0; |
850 | else if (idx == 255) |
851 | ctx->ctx->data_block = ctx->start; |
852 | else |
853 | ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx); |
854 | SDEBUG(" base: 0x%04X\n" , ctx->ctx->data_block); |
855 | } |
856 | |
857 | static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg) |
858 | { |
859 | uint8_t attr = U8((*ptr)++); |
860 | SDEBUG(" fb_base: " ); |
861 | ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr); |
862 | } |
863 | |
864 | static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg) |
865 | { |
866 | int port; |
867 | switch (arg) { |
868 | case ATOM_PORT_ATI: |
869 | port = U16(*ptr); |
870 | if (port < ATOM_IO_NAMES_CNT) |
871 | SDEBUG(" port: %d (%s)\n" , port, atom_io_names[port]); |
872 | else |
873 | SDEBUG(" port: %d\n" , port); |
874 | if (!port) |
875 | ctx->ctx->io_mode = ATOM_IO_MM; |
876 | else |
877 | ctx->ctx->io_mode = ATOM_IO_IIO | port; |
878 | (*ptr) += 2; |
879 | break; |
880 | case ATOM_PORT_PCI: |
881 | ctx->ctx->io_mode = ATOM_IO_PCI; |
882 | (*ptr)++; |
883 | break; |
884 | case ATOM_PORT_SYSIO: |
885 | ctx->ctx->io_mode = ATOM_IO_SYSIO; |
886 | (*ptr)++; |
887 | break; |
888 | } |
889 | } |
890 | |
891 | static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg) |
892 | { |
893 | ctx->ctx->reg_block = U16(*ptr); |
894 | (*ptr) += 2; |
895 | SDEBUG(" base: 0x%04X\n" , ctx->ctx->reg_block); |
896 | } |
897 | |
898 | static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg) |
899 | { |
900 | uint8_t attr = U8((*ptr)++), shift; |
901 | uint32_t saved, dst; |
902 | int dptr = *ptr; |
903 | attr &= 0x38; |
904 | attr |= atom_def_dst[attr >> 3] << 6; |
905 | SDEBUG(" dst: " ); |
906 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
907 | shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); |
908 | SDEBUG(" shift: %d\n" , shift); |
909 | dst <<= shift; |
910 | SDEBUG(" dst: " ); |
911 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
912 | } |
913 | |
914 | static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg) |
915 | { |
916 | uint8_t attr = U8((*ptr)++), shift; |
917 | uint32_t saved, dst; |
918 | int dptr = *ptr; |
919 | attr &= 0x38; |
920 | attr |= atom_def_dst[attr >> 3] << 6; |
921 | SDEBUG(" dst: " ); |
922 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
923 | shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); |
924 | SDEBUG(" shift: %d\n" , shift); |
925 | dst >>= shift; |
926 | SDEBUG(" dst: " ); |
927 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
928 | } |
929 | |
930 | static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) |
931 | { |
932 | uint8_t attr = U8((*ptr)++), shift; |
933 | uint32_t saved, dst; |
934 | int dptr = *ptr; |
935 | uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; |
936 | SDEBUG(" dst: " ); |
937 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
938 | /* op needs to full dst value */ |
939 | dst = saved; |
940 | shift = atom_get_src(ctx, attr, ptr); |
941 | SDEBUG(" shift: %d\n" , shift); |
942 | dst <<= shift; |
943 | dst &= atom_arg_mask[dst_align]; |
944 | dst >>= atom_arg_shift[dst_align]; |
945 | SDEBUG(" dst: " ); |
946 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
947 | } |
948 | |
949 | static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) |
950 | { |
951 | uint8_t attr = U8((*ptr)++), shift; |
952 | uint32_t saved, dst; |
953 | int dptr = *ptr; |
954 | uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; |
955 | SDEBUG(" dst: " ); |
956 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
957 | /* op needs to full dst value */ |
958 | dst = saved; |
959 | shift = atom_get_src(ctx, attr, ptr); |
960 | SDEBUG(" shift: %d\n" , shift); |
961 | dst >>= shift; |
962 | dst &= atom_arg_mask[dst_align]; |
963 | dst >>= atom_arg_shift[dst_align]; |
964 | SDEBUG(" dst: " ); |
965 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
966 | } |
967 | |
968 | static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg) |
969 | { |
970 | uint8_t attr = U8((*ptr)++); |
971 | uint32_t dst, src, saved; |
972 | int dptr = *ptr; |
973 | SDEBUG(" dst: " ); |
974 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
975 | SDEBUG(" src: " ); |
976 | src = atom_get_src(ctx, attr, ptr); |
977 | dst -= src; |
978 | SDEBUG(" dst: " ); |
979 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
980 | } |
981 | |
982 | static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg) |
983 | { |
984 | uint8_t attr = U8((*ptr)++); |
985 | uint32_t src, val, target; |
986 | SDEBUG(" switch: " ); |
987 | src = atom_get_src(ctx, attr, ptr); |
988 | while (U16(*ptr) != ATOM_CASE_END) |
989 | if (U8(*ptr) == ATOM_CASE_MAGIC) { |
990 | (*ptr)++; |
991 | SDEBUG(" case: " ); |
992 | val = |
993 | atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM, |
994 | ptr); |
995 | target = U16(*ptr); |
996 | if (val == src) { |
997 | SDEBUG(" target: %04X\n" , target); |
998 | *ptr = ctx->start + target; |
999 | return; |
1000 | } |
1001 | (*ptr) += 2; |
1002 | } else { |
1003 | printk(KERN_INFO "Bad case.\n" ); |
1004 | return; |
1005 | } |
1006 | (*ptr) += 2; |
1007 | } |
1008 | |
1009 | static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg) |
1010 | { |
1011 | uint8_t attr = U8((*ptr)++); |
1012 | uint32_t dst, src; |
1013 | SDEBUG(" src1: " ); |
1014 | dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); |
1015 | SDEBUG(" src2: " ); |
1016 | src = atom_get_src(ctx, attr, ptr); |
1017 | ctx->ctx->cs_equal = ((dst & src) == 0); |
1018 | SDEBUG(" result: %s\n" , ctx->ctx->cs_equal ? "EQ" : "NE" ); |
1019 | } |
1020 | |
1021 | static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg) |
1022 | { |
1023 | uint8_t attr = U8((*ptr)++); |
1024 | uint32_t dst, src, saved; |
1025 | int dptr = *ptr; |
1026 | SDEBUG(" dst: " ); |
1027 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
1028 | SDEBUG(" src: " ); |
1029 | src = atom_get_src(ctx, attr, ptr); |
1030 | dst ^= src; |
1031 | SDEBUG(" dst: " ); |
1032 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
1033 | } |
1034 | |
1035 | static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg) |
1036 | { |
1037 | printk(KERN_INFO "unimplemented!\n" ); |
1038 | } |
1039 | |
1040 | static struct { |
1041 | void (*func) (atom_exec_context *, int *, int); |
1042 | int arg; |
1043 | } opcode_table[ATOM_OP_CNT] = { |
1044 | { |
1045 | NULL, 0}, { |
1046 | atom_op_move, ATOM_ARG_REG}, { |
1047 | atom_op_move, ATOM_ARG_PS}, { |
1048 | atom_op_move, ATOM_ARG_WS}, { |
1049 | atom_op_move, ATOM_ARG_FB}, { |
1050 | atom_op_move, ATOM_ARG_PLL}, { |
1051 | atom_op_move, ATOM_ARG_MC}, { |
1052 | atom_op_and, ATOM_ARG_REG}, { |
1053 | atom_op_and, ATOM_ARG_PS}, { |
1054 | atom_op_and, ATOM_ARG_WS}, { |
1055 | atom_op_and, ATOM_ARG_FB}, { |
1056 | atom_op_and, ATOM_ARG_PLL}, { |
1057 | atom_op_and, ATOM_ARG_MC}, { |
1058 | atom_op_or, ATOM_ARG_REG}, { |
1059 | atom_op_or, ATOM_ARG_PS}, { |
1060 | atom_op_or, ATOM_ARG_WS}, { |
1061 | atom_op_or, ATOM_ARG_FB}, { |
1062 | atom_op_or, ATOM_ARG_PLL}, { |
1063 | atom_op_or, ATOM_ARG_MC}, { |
1064 | atom_op_shift_left, ATOM_ARG_REG}, { |
1065 | atom_op_shift_left, ATOM_ARG_PS}, { |
1066 | atom_op_shift_left, ATOM_ARG_WS}, { |
1067 | atom_op_shift_left, ATOM_ARG_FB}, { |
1068 | atom_op_shift_left, ATOM_ARG_PLL}, { |
1069 | atom_op_shift_left, ATOM_ARG_MC}, { |
1070 | atom_op_shift_right, ATOM_ARG_REG}, { |
1071 | atom_op_shift_right, ATOM_ARG_PS}, { |
1072 | atom_op_shift_right, ATOM_ARG_WS}, { |
1073 | atom_op_shift_right, ATOM_ARG_FB}, { |
1074 | atom_op_shift_right, ATOM_ARG_PLL}, { |
1075 | atom_op_shift_right, ATOM_ARG_MC}, { |
1076 | atom_op_mul, ATOM_ARG_REG}, { |
1077 | atom_op_mul, ATOM_ARG_PS}, { |
1078 | atom_op_mul, ATOM_ARG_WS}, { |
1079 | atom_op_mul, ATOM_ARG_FB}, { |
1080 | atom_op_mul, ATOM_ARG_PLL}, { |
1081 | atom_op_mul, ATOM_ARG_MC}, { |
1082 | atom_op_div, ATOM_ARG_REG}, { |
1083 | atom_op_div, ATOM_ARG_PS}, { |
1084 | atom_op_div, ATOM_ARG_WS}, { |
1085 | atom_op_div, ATOM_ARG_FB}, { |
1086 | atom_op_div, ATOM_ARG_PLL}, { |
1087 | atom_op_div, ATOM_ARG_MC}, { |
1088 | atom_op_add, ATOM_ARG_REG}, { |
1089 | atom_op_add, ATOM_ARG_PS}, { |
1090 | atom_op_add, ATOM_ARG_WS}, { |
1091 | atom_op_add, ATOM_ARG_FB}, { |
1092 | atom_op_add, ATOM_ARG_PLL}, { |
1093 | atom_op_add, ATOM_ARG_MC}, { |
1094 | atom_op_sub, ATOM_ARG_REG}, { |
1095 | atom_op_sub, ATOM_ARG_PS}, { |
1096 | atom_op_sub, ATOM_ARG_WS}, { |
1097 | atom_op_sub, ATOM_ARG_FB}, { |
1098 | atom_op_sub, ATOM_ARG_PLL}, { |
1099 | atom_op_sub, ATOM_ARG_MC}, { |
1100 | atom_op_setport, ATOM_PORT_ATI}, { |
1101 | atom_op_setport, ATOM_PORT_PCI}, { |
1102 | atom_op_setport, ATOM_PORT_SYSIO}, { |
1103 | atom_op_setregblock, 0}, { |
1104 | atom_op_setfbbase, 0}, { |
1105 | atom_op_compare, ATOM_ARG_REG}, { |
1106 | atom_op_compare, ATOM_ARG_PS}, { |
1107 | atom_op_compare, ATOM_ARG_WS}, { |
1108 | atom_op_compare, ATOM_ARG_FB}, { |
1109 | atom_op_compare, ATOM_ARG_PLL}, { |
1110 | atom_op_compare, ATOM_ARG_MC}, { |
1111 | atom_op_switch, 0}, { |
1112 | atom_op_jump, ATOM_COND_ALWAYS}, { |
1113 | atom_op_jump, ATOM_COND_EQUAL}, { |
1114 | atom_op_jump, ATOM_COND_BELOW}, { |
1115 | atom_op_jump, ATOM_COND_ABOVE}, { |
1116 | atom_op_jump, ATOM_COND_BELOWOREQUAL}, { |
1117 | atom_op_jump, ATOM_COND_ABOVEOREQUAL}, { |
1118 | atom_op_jump, ATOM_COND_NOTEQUAL}, { |
1119 | atom_op_test, ATOM_ARG_REG}, { |
1120 | atom_op_test, ATOM_ARG_PS}, { |
1121 | atom_op_test, ATOM_ARG_WS}, { |
1122 | atom_op_test, ATOM_ARG_FB}, { |
1123 | atom_op_test, ATOM_ARG_PLL}, { |
1124 | atom_op_test, ATOM_ARG_MC}, { |
1125 | atom_op_delay, ATOM_UNIT_MILLISEC}, { |
1126 | atom_op_delay, ATOM_UNIT_MICROSEC}, { |
1127 | atom_op_calltable, 0}, { |
1128 | atom_op_repeat, 0}, { |
1129 | atom_op_clear, ATOM_ARG_REG}, { |
1130 | atom_op_clear, ATOM_ARG_PS}, { |
1131 | atom_op_clear, ATOM_ARG_WS}, { |
1132 | atom_op_clear, ATOM_ARG_FB}, { |
1133 | atom_op_clear, ATOM_ARG_PLL}, { |
1134 | atom_op_clear, ATOM_ARG_MC}, { |
1135 | atom_op_nop, 0}, { |
1136 | atom_op_eot, 0}, { |
1137 | atom_op_mask, ATOM_ARG_REG}, { |
1138 | atom_op_mask, ATOM_ARG_PS}, { |
1139 | atom_op_mask, ATOM_ARG_WS}, { |
1140 | atom_op_mask, ATOM_ARG_FB}, { |
1141 | atom_op_mask, ATOM_ARG_PLL}, { |
1142 | atom_op_mask, ATOM_ARG_MC}, { |
1143 | atom_op_postcard, 0}, { |
1144 | atom_op_beep, 0}, { |
1145 | atom_op_savereg, 0}, { |
1146 | atom_op_restorereg, 0}, { |
1147 | atom_op_setdatablock, 0}, { |
1148 | atom_op_xor, ATOM_ARG_REG}, { |
1149 | atom_op_xor, ATOM_ARG_PS}, { |
1150 | atom_op_xor, ATOM_ARG_WS}, { |
1151 | atom_op_xor, ATOM_ARG_FB}, { |
1152 | atom_op_xor, ATOM_ARG_PLL}, { |
1153 | atom_op_xor, ATOM_ARG_MC}, { |
1154 | atom_op_shl, ATOM_ARG_REG}, { |
1155 | atom_op_shl, ATOM_ARG_PS}, { |
1156 | atom_op_shl, ATOM_ARG_WS}, { |
1157 | atom_op_shl, ATOM_ARG_FB}, { |
1158 | atom_op_shl, ATOM_ARG_PLL}, { |
1159 | atom_op_shl, ATOM_ARG_MC}, { |
1160 | atom_op_shr, ATOM_ARG_REG}, { |
1161 | atom_op_shr, ATOM_ARG_PS}, { |
1162 | atom_op_shr, ATOM_ARG_WS}, { |
1163 | atom_op_shr, ATOM_ARG_FB}, { |
1164 | atom_op_shr, ATOM_ARG_PLL}, { |
1165 | atom_op_shr, ATOM_ARG_MC}, { |
1166 | atom_op_debug, 0},}; |
1167 | |
1168 | static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) |
1169 | { |
1170 | int base = CU16(ctx->cmd_table + 4 + 2 * index); |
1171 | int len, ws, ps, ptr; |
1172 | unsigned char op; |
1173 | atom_exec_context ectx; |
1174 | int ret = 0; |
1175 | |
1176 | if (!base) |
1177 | return -EINVAL; |
1178 | |
1179 | len = CU16(base + ATOM_CT_SIZE_PTR); |
1180 | ws = CU8(base + ATOM_CT_WS_PTR); |
1181 | ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK; |
1182 | ptr = base + ATOM_CT_CODE_PTR; |
1183 | |
1184 | SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n" , base, len, ws, ps); |
1185 | |
1186 | ectx.ctx = ctx; |
1187 | ectx.ps_shift = ps / 4; |
1188 | ectx.start = base; |
1189 | ectx.ps = params; |
1190 | ectx.abort = false; |
1191 | ectx.last_jump = 0; |
1192 | if (ws) |
1193 | ectx.ws = kzalloc(4 * ws, GFP_KERNEL); |
1194 | else |
1195 | ectx.ws = NULL; |
1196 | |
1197 | debug_depth++; |
1198 | while (1) { |
1199 | op = CU8(ptr++); |
1200 | if (op < ATOM_OP_NAMES_CNT) |
1201 | SDEBUG("%s @ 0x%04X\n" , atom_op_names[op], ptr - 1); |
1202 | else |
1203 | SDEBUG("[%d] @ 0x%04X\n" , op, ptr - 1); |
1204 | if (ectx.abort) { |
1205 | DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n" , |
1206 | base, len, ws, ps, ptr - 1); |
1207 | ret = -EINVAL; |
1208 | goto free; |
1209 | } |
1210 | |
1211 | if (op < ATOM_OP_CNT && op > 0) |
1212 | opcode_table[op].func(&ectx, &ptr, |
1213 | opcode_table[op].arg); |
1214 | else |
1215 | break; |
1216 | |
1217 | if (op == ATOM_OP_EOT) |
1218 | break; |
1219 | } |
1220 | debug_depth--; |
1221 | SDEBUG("<<\n" ); |
1222 | |
1223 | free: |
1224 | if (ws) |
1225 | kfree(ectx.ws); |
1226 | return ret; |
1227 | } |
1228 | |
1229 | int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) |
1230 | { |
1231 | int r; |
1232 | |
1233 | mutex_lock(&ctx->mutex); |
1234 | /* reset data block */ |
1235 | ctx->data_block = 0; |
1236 | /* reset reg block */ |
1237 | ctx->reg_block = 0; |
1238 | /* reset fb window */ |
1239 | ctx->fb_base = 0; |
1240 | /* reset io mode */ |
1241 | ctx->io_mode = ATOM_IO_MM; |
1242 | /* reset divmul */ |
1243 | ctx->divmul[0] = 0; |
1244 | ctx->divmul[1] = 0; |
1245 | r = atom_execute_table_locked(ctx, index, params); |
1246 | mutex_unlock(&ctx->mutex); |
1247 | return r; |
1248 | } |
1249 | |
1250 | static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; |
1251 | |
1252 | static void atom_index_iio(struct atom_context *ctx, int base) |
1253 | { |
1254 | ctx->iio = kzalloc(2 * 256, GFP_KERNEL); |
1255 | if (!ctx->iio) |
1256 | return; |
1257 | while (CU8(base) == ATOM_IIO_START) { |
1258 | ctx->iio[CU8(base + 1)] = base + 2; |
1259 | base += 2; |
1260 | while (CU8(base) != ATOM_IIO_END) |
1261 | base += atom_iio_len[CU8(base)]; |
1262 | base += 3; |
1263 | } |
1264 | } |
1265 | |
1266 | struct atom_context *atom_parse(struct card_info *card, void *bios) |
1267 | { |
1268 | int base; |
1269 | struct atom_context *ctx = |
1270 | kzalloc(sizeof(struct atom_context), GFP_KERNEL); |
1271 | char *str; |
1272 | char name[512]; |
1273 | int i; |
1274 | |
1275 | if (!ctx) |
1276 | return NULL; |
1277 | |
1278 | ctx->card = card; |
1279 | ctx->bios = bios; |
1280 | |
1281 | if (CU16(0) != ATOM_BIOS_MAGIC) { |
1282 | printk(KERN_INFO "Invalid BIOS magic.\n" ); |
1283 | kfree(ctx); |
1284 | return NULL; |
1285 | } |
1286 | if (strncmp |
1287 | (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC, |
1288 | strlen(ATOM_ATI_MAGIC))) { |
1289 | printk(KERN_INFO "Invalid ATI magic.\n" ); |
1290 | kfree(ctx); |
1291 | return NULL; |
1292 | } |
1293 | |
1294 | base = CU16(ATOM_ROM_TABLE_PTR); |
1295 | if (strncmp |
1296 | (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC, |
1297 | strlen(ATOM_ROM_MAGIC))) { |
1298 | printk(KERN_INFO "Invalid ATOM magic.\n" ); |
1299 | kfree(ctx); |
1300 | return NULL; |
1301 | } |
1302 | |
1303 | ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR); |
1304 | ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR); |
1305 | atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4); |
1306 | if (!ctx->iio) { |
1307 | atom_destroy(ctx); |
1308 | return NULL; |
1309 | } |
1310 | |
1311 | str = CSTR(CU16(base + ATOM_ROM_MSG_PTR)); |
1312 | while (*str && ((*str == '\n') || (*str == '\r'))) |
1313 | str++; |
1314 | /* name string isn't always 0 terminated */ |
1315 | for (i = 0; i < 511; i++) { |
1316 | name[i] = str[i]; |
1317 | if (name[i] < '.' || name[i] > 'z') { |
1318 | name[i] = 0; |
1319 | break; |
1320 | } |
1321 | } |
1322 | printk(KERN_INFO "ATOM BIOS: %s\n" , name); |
1323 | |
1324 | return ctx; |
1325 | } |
1326 | |
1327 | int atom_asic_init(struct atom_context *ctx) |
1328 | { |
1329 | struct radeon_device *rdev = ctx->card->dev->dev_private; |
1330 | int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR); |
1331 | uint32_t ps[16]; |
1332 | int ret; |
1333 | |
1334 | memset(ps, 0, 64); |
1335 | |
1336 | ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR)); |
1337 | ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR)); |
1338 | if (!ps[0] || !ps[1]) |
1339 | return 1; |
1340 | |
1341 | if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) |
1342 | return 1; |
1343 | ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps); |
1344 | if (ret) |
1345 | return ret; |
1346 | |
1347 | memset(ps, 0, 64); |
1348 | |
1349 | if (rdev->family < CHIP_R600) { |
1350 | if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL)) |
1351 | atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps); |
1352 | } |
1353 | return ret; |
1354 | } |
1355 | |
1356 | void atom_destroy(struct atom_context *ctx) |
1357 | { |
1358 | kfree(ctx->iio); |
1359 | kfree(ctx); |
1360 | } |
1361 | |
1362 | bool (struct atom_context *ctx, int index, |
1363 | uint16_t * size, uint8_t * frev, uint8_t * crev, |
1364 | uint16_t * data_start) |
1365 | { |
1366 | int offset = index * 2 + 4; |
1367 | int idx = CU16(ctx->data_table + offset); |
1368 | u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4); |
1369 | |
1370 | if (!mdt[index]) |
1371 | return false; |
1372 | |
1373 | if (size) |
1374 | *size = CU16(idx); |
1375 | if (frev) |
1376 | *frev = CU8(idx + 2); |
1377 | if (crev) |
1378 | *crev = CU8(idx + 3); |
1379 | *data_start = idx; |
1380 | return true; |
1381 | } |
1382 | |
1383 | bool (struct atom_context *ctx, int index, uint8_t * frev, |
1384 | uint8_t * crev) |
1385 | { |
1386 | int offset = index * 2 + 4; |
1387 | int idx = CU16(ctx->cmd_table + offset); |
1388 | u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4); |
1389 | |
1390 | if (!mct[index]) |
1391 | return false; |
1392 | |
1393 | if (frev) |
1394 | *frev = CU8(idx + 2); |
1395 | if (crev) |
1396 | *crev = CU8(idx + 3); |
1397 | return true; |
1398 | } |
1399 | |
1400 | int atom_allocate_fb_scratch(struct atom_context *ctx) |
1401 | { |
1402 | int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); |
1403 | uint16_t data_offset; |
1404 | int usage_bytes = 0; |
1405 | struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; |
1406 | |
1407 | if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { |
1408 | firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); |
1409 | |
1410 | DRM_DEBUG("atom firmware requested %08x %dkb\n" , |
1411 | le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware), |
1412 | le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb)); |
1413 | |
1414 | usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024; |
1415 | } |
1416 | ctx->scratch_size_bytes = 0; |
1417 | if (usage_bytes == 0) |
1418 | usage_bytes = 20 * 1024; |
1419 | /* allocate some scratch memory */ |
1420 | ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); |
1421 | if (!ctx->scratch) |
1422 | return -ENOMEM; |
1423 | ctx->scratch_size_bytes = usage_bytes; |
1424 | return 0; |
1425 | } |
1426 | |