1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
26 | * Jerome Glisse |
27 | */ |
28 | #include <linux/seq_file.h> |
29 | #include <linux/slab.h> |
30 | #include <drm/drmP.h> |
31 | #include <drm/radeon_drm.h> |
32 | #include "radeon_reg.h" |
33 | #include "radeon.h" |
34 | #include "radeon_asic.h" |
35 | #include "r100d.h" |
36 | #include "rs100d.h" |
37 | #include "rv200d.h" |
38 | #include "rv250d.h" |
39 | #include "atom.h" |
40 | |
41 | #include <linux/firmware.h> |
42 | #include <linux/module.h> |
43 | |
44 | #include "r100_reg_safe.h" |
45 | #include "rn50_reg_safe.h" |
46 | |
47 | /* Firmware Names */ |
48 | #define FIRMWARE_R100 "radeon/R100_cp.bin" |
49 | #define FIRMWARE_R200 "radeon/R200_cp.bin" |
50 | #define FIRMWARE_R300 "radeon/R300_cp.bin" |
51 | #define FIRMWARE_R420 "radeon/R420_cp.bin" |
52 | #define FIRMWARE_RS690 "radeon/RS690_cp.bin" |
53 | #define FIRMWARE_RS600 "radeon/RS600_cp.bin" |
54 | #define FIRMWARE_R520 "radeon/R520_cp.bin" |
55 | |
56 | MODULE_FIRMWARE(FIRMWARE_R100); |
57 | MODULE_FIRMWARE(FIRMWARE_R200); |
58 | MODULE_FIRMWARE(FIRMWARE_R300); |
59 | MODULE_FIRMWARE(FIRMWARE_R420); |
60 | MODULE_FIRMWARE(FIRMWARE_RS690); |
61 | MODULE_FIRMWARE(FIRMWARE_RS600); |
62 | MODULE_FIRMWARE(FIRMWARE_R520); |
63 | |
64 | #include "r100_track.h" |
65 | |
66 | /* This files gather functions specifics to: |
67 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
68 | * and others in some cases. |
69 | */ |
70 | |
71 | static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc) |
72 | { |
73 | if (crtc == 0) { |
74 | if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR) |
75 | return true; |
76 | else |
77 | return false; |
78 | } else { |
79 | if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR) |
80 | return true; |
81 | else |
82 | return false; |
83 | } |
84 | } |
85 | |
86 | static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc) |
87 | { |
88 | u32 vline1, vline2; |
89 | |
90 | if (crtc == 0) { |
91 | vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; |
92 | vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; |
93 | } else { |
94 | vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; |
95 | vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; |
96 | } |
97 | if (vline1 != vline2) |
98 | return true; |
99 | else |
100 | return false; |
101 | } |
102 | |
103 | /** |
104 | * r100_wait_for_vblank - vblank wait asic callback. |
105 | * |
106 | * @rdev: radeon_device pointer |
107 | * @crtc: crtc to wait for vblank on |
108 | * |
109 | * Wait for vblank on the requested crtc (r1xx-r4xx). |
110 | */ |
111 | void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) |
112 | { |
113 | unsigned i = 0; |
114 | |
115 | if (crtc >= rdev->num_crtc) |
116 | return; |
117 | |
118 | if (crtc == 0) { |
119 | if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN)) |
120 | return; |
121 | } else { |
122 | if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN)) |
123 | return; |
124 | } |
125 | |
126 | /* depending on when we hit vblank, we may be close to active; if so, |
127 | * wait for another frame. |
128 | */ |
129 | while (r100_is_in_vblank(rdev, crtc)) { |
130 | if (i++ % 100 == 0) { |
131 | if (!r100_is_counter_moving(rdev, crtc)) |
132 | break; |
133 | } |
134 | } |
135 | |
136 | while (!r100_is_in_vblank(rdev, crtc)) { |
137 | if (i++ % 100 == 0) { |
138 | if (!r100_is_counter_moving(rdev, crtc)) |
139 | break; |
140 | } |
141 | } |
142 | } |
143 | |
144 | /** |
145 | * r100_pre_page_flip - pre-pageflip callback. |
146 | * |
147 | * @rdev: radeon_device pointer |
148 | * @crtc: crtc to prepare for pageflip on |
149 | * |
150 | * Pre-pageflip callback (r1xx-r4xx). |
151 | * Enables the pageflip irq (vblank irq). |
152 | */ |
153 | void r100_pre_page_flip(struct radeon_device *rdev, int crtc) |
154 | { |
155 | /* enable the pflip int */ |
156 | radeon_irq_kms_pflip_irq_get(rdev, crtc); |
157 | } |
158 | |
159 | /** |
160 | * r100_post_page_flip - pos-pageflip callback. |
161 | * |
162 | * @rdev: radeon_device pointer |
163 | * @crtc: crtc to cleanup pageflip on |
164 | * |
165 | * Post-pageflip callback (r1xx-r4xx). |
166 | * Disables the pageflip irq (vblank irq). |
167 | */ |
168 | void r100_post_page_flip(struct radeon_device *rdev, int crtc) |
169 | { |
170 | /* disable the pflip int */ |
171 | radeon_irq_kms_pflip_irq_put(rdev, crtc); |
172 | } |
173 | |
174 | /** |
175 | * r100_page_flip - pageflip callback. |
176 | * |
177 | * @rdev: radeon_device pointer |
178 | * @crtc_id: crtc to cleanup pageflip on |
179 | * @crtc_base: new address of the crtc (GPU MC address) |
180 | * |
181 | * Does the actual pageflip (r1xx-r4xx). |
182 | * During vblank we take the crtc lock and wait for the update_pending |
183 | * bit to go high, when it does, we release the lock, and allow the |
184 | * double buffered update to take place. |
185 | * Returns the current update pending status. |
186 | */ |
187 | u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) |
188 | { |
189 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; |
190 | u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; |
191 | int i; |
192 | |
193 | /* Lock the graphics update lock */ |
194 | /* update the scanout addresses */ |
195 | WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); |
196 | |
197 | /* Wait for update_pending to go high. */ |
198 | for (i = 0; i < rdev->usec_timeout; i++) { |
199 | if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET) |
200 | break; |
201 | udelay(1); |
202 | } |
203 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n" ); |
204 | |
205 | /* Unlock the lock, so double-buffering can take place inside vblank */ |
206 | tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; |
207 | WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); |
208 | |
209 | /* Return current update_pending status: */ |
210 | return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET; |
211 | } |
212 | |
213 | /** |
214 | * r100_pm_get_dynpm_state - look up dynpm power state callback. |
215 | * |
216 | * @rdev: radeon_device pointer |
217 | * |
218 | * Look up the optimal power state based on the |
219 | * current state of the GPU (r1xx-r5xx). |
220 | * Used for dynpm only. |
221 | */ |
222 | void r100_pm_get_dynpm_state(struct radeon_device *rdev) |
223 | { |
224 | int i; |
225 | rdev->pm.dynpm_can_upclock = true; |
226 | rdev->pm.dynpm_can_downclock = true; |
227 | |
228 | switch (rdev->pm.dynpm_planned_action) { |
229 | case DYNPM_ACTION_MINIMUM: |
230 | rdev->pm.requested_power_state_index = 0; |
231 | rdev->pm.dynpm_can_downclock = false; |
232 | break; |
233 | case DYNPM_ACTION_DOWNCLOCK: |
234 | if (rdev->pm.current_power_state_index == 0) { |
235 | rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; |
236 | rdev->pm.dynpm_can_downclock = false; |
237 | } else { |
238 | if (rdev->pm.active_crtc_count > 1) { |
239 | for (i = 0; i < rdev->pm.num_power_states; i++) { |
240 | if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) |
241 | continue; |
242 | else if (i >= rdev->pm.current_power_state_index) { |
243 | rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; |
244 | break; |
245 | } else { |
246 | rdev->pm.requested_power_state_index = i; |
247 | break; |
248 | } |
249 | } |
250 | } else |
251 | rdev->pm.requested_power_state_index = |
252 | rdev->pm.current_power_state_index - 1; |
253 | } |
254 | /* don't use the power state if crtcs are active and no display flag is set */ |
255 | if ((rdev->pm.active_crtc_count > 0) && |
256 | (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags & |
257 | RADEON_PM_MODE_NO_DISPLAY)) { |
258 | rdev->pm.requested_power_state_index++; |
259 | } |
260 | break; |
261 | case DYNPM_ACTION_UPCLOCK: |
262 | if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { |
263 | rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; |
264 | rdev->pm.dynpm_can_upclock = false; |
265 | } else { |
266 | if (rdev->pm.active_crtc_count > 1) { |
267 | for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { |
268 | if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) |
269 | continue; |
270 | else if (i <= rdev->pm.current_power_state_index) { |
271 | rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; |
272 | break; |
273 | } else { |
274 | rdev->pm.requested_power_state_index = i; |
275 | break; |
276 | } |
277 | } |
278 | } else |
279 | rdev->pm.requested_power_state_index = |
280 | rdev->pm.current_power_state_index + 1; |
281 | } |
282 | break; |
283 | case DYNPM_ACTION_DEFAULT: |
284 | rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; |
285 | rdev->pm.dynpm_can_upclock = false; |
286 | break; |
287 | case DYNPM_ACTION_NONE: |
288 | default: |
289 | DRM_ERROR("Requested mode for not defined action\n" ); |
290 | return; |
291 | } |
292 | /* only one clock mode per power state */ |
293 | rdev->pm.requested_clock_mode_index = 0; |
294 | |
295 | DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n" , |
296 | rdev->pm.power_state[rdev->pm.requested_power_state_index]. |
297 | clock_info[rdev->pm.requested_clock_mode_index].sclk, |
298 | rdev->pm.power_state[rdev->pm.requested_power_state_index]. |
299 | clock_info[rdev->pm.requested_clock_mode_index].mclk, |
300 | rdev->pm.power_state[rdev->pm.requested_power_state_index]. |
301 | pcie_lanes); |
302 | } |
303 | |
304 | /** |
305 | * r100_pm_init_profile - Initialize power profiles callback. |
306 | * |
307 | * @rdev: radeon_device pointer |
308 | * |
309 | * Initialize the power states used in profile mode |
310 | * (r1xx-r3xx). |
311 | * Used for profile mode only. |
312 | */ |
313 | void r100_pm_init_profile(struct radeon_device *rdev) |
314 | { |
315 | /* default */ |
316 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; |
317 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
318 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; |
319 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; |
320 | /* low sh */ |
321 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; |
322 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; |
323 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
324 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
325 | /* mid sh */ |
326 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; |
327 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; |
328 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; |
329 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; |
330 | /* high sh */ |
331 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; |
332 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
333 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; |
334 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; |
335 | /* low mh */ |
336 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; |
337 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
338 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
339 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
340 | /* mid mh */ |
341 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; |
342 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
343 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; |
344 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; |
345 | /* high mh */ |
346 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; |
347 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
348 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; |
349 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; |
350 | } |
351 | |
352 | /** |
353 | * r100_pm_misc - set additional pm hw parameters callback. |
354 | * |
355 | * @rdev: radeon_device pointer |
356 | * |
357 | * Set non-clock parameters associated with a power state |
358 | * (voltage, pcie lanes, etc.) (r1xx-r4xx). |
359 | */ |
360 | void r100_pm_misc(struct radeon_device *rdev) |
361 | { |
362 | int requested_index = rdev->pm.requested_power_state_index; |
363 | struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; |
364 | struct radeon_voltage *voltage = &ps->clock_info[0].voltage; |
365 | u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl; |
366 | |
367 | if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { |
368 | if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { |
369 | tmp = RREG32(voltage->gpio.reg); |
370 | if (voltage->active_high) |
371 | tmp |= voltage->gpio.mask; |
372 | else |
373 | tmp &= ~(voltage->gpio.mask); |
374 | WREG32(voltage->gpio.reg, tmp); |
375 | if (voltage->delay) |
376 | udelay(voltage->delay); |
377 | } else { |
378 | tmp = RREG32(voltage->gpio.reg); |
379 | if (voltage->active_high) |
380 | tmp &= ~voltage->gpio.mask; |
381 | else |
382 | tmp |= voltage->gpio.mask; |
383 | WREG32(voltage->gpio.reg, tmp); |
384 | if (voltage->delay) |
385 | udelay(voltage->delay); |
386 | } |
387 | } |
388 | |
389 | sclk_cntl = RREG32_PLL(SCLK_CNTL); |
390 | sclk_cntl2 = RREG32_PLL(SCLK_CNTL2); |
391 | sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3); |
392 | sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL); |
393 | sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3); |
394 | if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { |
395 | sclk_more_cntl |= REDUCED_SPEED_SCLK_EN; |
396 | if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE) |
397 | sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE; |
398 | else |
399 | sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE; |
400 | if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) |
401 | sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0); |
402 | else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) |
403 | sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2); |
404 | } else |
405 | sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN; |
406 | |
407 | if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { |
408 | sclk_more_cntl |= IO_CG_VOLTAGE_DROP; |
409 | if (voltage->delay) { |
410 | sclk_more_cntl |= VOLTAGE_DROP_SYNC; |
411 | switch (voltage->delay) { |
412 | case 33: |
413 | sclk_more_cntl |= VOLTAGE_DELAY_SEL(0); |
414 | break; |
415 | case 66: |
416 | sclk_more_cntl |= VOLTAGE_DELAY_SEL(1); |
417 | break; |
418 | case 99: |
419 | sclk_more_cntl |= VOLTAGE_DELAY_SEL(2); |
420 | break; |
421 | case 132: |
422 | sclk_more_cntl |= VOLTAGE_DELAY_SEL(3); |
423 | break; |
424 | } |
425 | } else |
426 | sclk_more_cntl &= ~VOLTAGE_DROP_SYNC; |
427 | } else |
428 | sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP; |
429 | |
430 | if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) |
431 | sclk_cntl &= ~FORCE_HDP; |
432 | else |
433 | sclk_cntl |= FORCE_HDP; |
434 | |
435 | WREG32_PLL(SCLK_CNTL, sclk_cntl); |
436 | WREG32_PLL(SCLK_CNTL2, sclk_cntl2); |
437 | WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl); |
438 | |
439 | /* set pcie lanes */ |
440 | if ((rdev->flags & RADEON_IS_PCIE) && |
441 | !(rdev->flags & RADEON_IS_IGP) && |
442 | rdev->asic->pm.set_pcie_lanes && |
443 | (ps->pcie_lanes != |
444 | rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { |
445 | radeon_set_pcie_lanes(rdev, |
446 | ps->pcie_lanes); |
447 | DRM_DEBUG_DRIVER("Setting: p: %d\n" , ps->pcie_lanes); |
448 | } |
449 | } |
450 | |
451 | /** |
452 | * r100_pm_prepare - pre-power state change callback. |
453 | * |
454 | * @rdev: radeon_device pointer |
455 | * |
456 | * Prepare for a power state change (r1xx-r4xx). |
457 | */ |
458 | void r100_pm_prepare(struct radeon_device *rdev) |
459 | { |
460 | struct drm_device *ddev = rdev->ddev; |
461 | struct drm_crtc *crtc; |
462 | struct radeon_crtc *radeon_crtc; |
463 | u32 tmp; |
464 | |
465 | /* disable any active CRTCs */ |
466 | list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { |
467 | radeon_crtc = to_radeon_crtc(crtc); |
468 | if (radeon_crtc->enabled) { |
469 | if (radeon_crtc->crtc_id) { |
470 | tmp = RREG32(RADEON_CRTC2_GEN_CNTL); |
471 | tmp |= RADEON_CRTC2_DISP_REQ_EN_B; |
472 | WREG32(RADEON_CRTC2_GEN_CNTL, tmp); |
473 | } else { |
474 | tmp = RREG32(RADEON_CRTC_GEN_CNTL); |
475 | tmp |= RADEON_CRTC_DISP_REQ_EN_B; |
476 | WREG32(RADEON_CRTC_GEN_CNTL, tmp); |
477 | } |
478 | } |
479 | } |
480 | } |
481 | |
482 | /** |
483 | * r100_pm_finish - post-power state change callback. |
484 | * |
485 | * @rdev: radeon_device pointer |
486 | * |
487 | * Clean up after a power state change (r1xx-r4xx). |
488 | */ |
489 | void r100_pm_finish(struct radeon_device *rdev) |
490 | { |
491 | struct drm_device *ddev = rdev->ddev; |
492 | struct drm_crtc *crtc; |
493 | struct radeon_crtc *radeon_crtc; |
494 | u32 tmp; |
495 | |
496 | /* enable any active CRTCs */ |
497 | list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { |
498 | radeon_crtc = to_radeon_crtc(crtc); |
499 | if (radeon_crtc->enabled) { |
500 | if (radeon_crtc->crtc_id) { |
501 | tmp = RREG32(RADEON_CRTC2_GEN_CNTL); |
502 | tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B; |
503 | WREG32(RADEON_CRTC2_GEN_CNTL, tmp); |
504 | } else { |
505 | tmp = RREG32(RADEON_CRTC_GEN_CNTL); |
506 | tmp &= ~RADEON_CRTC_DISP_REQ_EN_B; |
507 | WREG32(RADEON_CRTC_GEN_CNTL, tmp); |
508 | } |
509 | } |
510 | } |
511 | } |
512 | |
513 | /** |
514 | * r100_gui_idle - gui idle callback. |
515 | * |
516 | * @rdev: radeon_device pointer |
517 | * |
518 | * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx). |
519 | * Returns true if idle, false if not. |
520 | */ |
521 | bool r100_gui_idle(struct radeon_device *rdev) |
522 | { |
523 | if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE) |
524 | return false; |
525 | else |
526 | return true; |
527 | } |
528 | |
529 | /* hpd for digital panel detect/disconnect */ |
530 | /** |
531 | * r100_hpd_sense - hpd sense callback. |
532 | * |
533 | * @rdev: radeon_device pointer |
534 | * @hpd: hpd (hotplug detect) pin |
535 | * |
536 | * Checks if a digital monitor is connected (r1xx-r4xx). |
537 | * Returns true if connected, false if not connected. |
538 | */ |
539 | bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) |
540 | { |
541 | bool connected = false; |
542 | |
543 | switch (hpd) { |
544 | case RADEON_HPD_1: |
545 | if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE) |
546 | connected = true; |
547 | break; |
548 | case RADEON_HPD_2: |
549 | if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE) |
550 | connected = true; |
551 | break; |
552 | default: |
553 | break; |
554 | } |
555 | return connected; |
556 | } |
557 | |
558 | /** |
559 | * r100_hpd_set_polarity - hpd set polarity callback. |
560 | * |
561 | * @rdev: radeon_device pointer |
562 | * @hpd: hpd (hotplug detect) pin |
563 | * |
564 | * Set the polarity of the hpd pin (r1xx-r4xx). |
565 | */ |
566 | void r100_hpd_set_polarity(struct radeon_device *rdev, |
567 | enum radeon_hpd_id hpd) |
568 | { |
569 | u32 tmp; |
570 | bool connected = r100_hpd_sense(rdev, hpd); |
571 | |
572 | switch (hpd) { |
573 | case RADEON_HPD_1: |
574 | tmp = RREG32(RADEON_FP_GEN_CNTL); |
575 | if (connected) |
576 | tmp &= ~RADEON_FP_DETECT_INT_POL; |
577 | else |
578 | tmp |= RADEON_FP_DETECT_INT_POL; |
579 | WREG32(RADEON_FP_GEN_CNTL, tmp); |
580 | break; |
581 | case RADEON_HPD_2: |
582 | tmp = RREG32(RADEON_FP2_GEN_CNTL); |
583 | if (connected) |
584 | tmp &= ~RADEON_FP2_DETECT_INT_POL; |
585 | else |
586 | tmp |= RADEON_FP2_DETECT_INT_POL; |
587 | WREG32(RADEON_FP2_GEN_CNTL, tmp); |
588 | break; |
589 | default: |
590 | break; |
591 | } |
592 | } |
593 | |
594 | /** |
595 | * r100_hpd_init - hpd setup callback. |
596 | * |
597 | * @rdev: radeon_device pointer |
598 | * |
599 | * Setup the hpd pins used by the card (r1xx-r4xx). |
600 | * Set the polarity, and enable the hpd interrupts. |
601 | */ |
602 | void r100_hpd_init(struct radeon_device *rdev) |
603 | { |
604 | struct drm_device *dev = rdev->ddev; |
605 | struct drm_connector *connector; |
606 | unsigned enable = 0; |
607 | |
608 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
609 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
610 | enable |= 1 << radeon_connector->hpd.hpd; |
611 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); |
612 | } |
613 | radeon_irq_kms_enable_hpd(rdev, enable); |
614 | } |
615 | |
616 | /** |
617 | * r100_hpd_fini - hpd tear down callback. |
618 | * |
619 | * @rdev: radeon_device pointer |
620 | * |
621 | * Tear down the hpd pins used by the card (r1xx-r4xx). |
622 | * Disable the hpd interrupts. |
623 | */ |
624 | void r100_hpd_fini(struct radeon_device *rdev) |
625 | { |
626 | struct drm_device *dev = rdev->ddev; |
627 | struct drm_connector *connector; |
628 | unsigned disable = 0; |
629 | |
630 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
631 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
632 | disable |= 1 << radeon_connector->hpd.hpd; |
633 | } |
634 | radeon_irq_kms_disable_hpd(rdev, disable); |
635 | } |
636 | |
637 | /* |
638 | * PCI GART |
639 | */ |
640 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev) |
641 | { |
642 | /* TODO: can we do somethings here ? */ |
643 | /* It seems hw only cache one entry so we should discard this |
644 | * entry otherwise if first GPU GART read hit this entry it |
645 | * could end up in wrong address. */ |
646 | } |
647 | |
648 | int r100_pci_gart_init(struct radeon_device *rdev) |
649 | { |
650 | int r; |
651 | |
652 | if (rdev->gart.ptr) { |
653 | WARN(1, "R100 PCI GART already initialized\n" ); |
654 | return 0; |
655 | } |
656 | /* Initialize common gart structure */ |
657 | r = radeon_gart_init(rdev); |
658 | if (r) |
659 | return r; |
660 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
661 | rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; |
662 | rdev->asic->gart.set_page = &r100_pci_gart_set_page; |
663 | return radeon_gart_table_ram_alloc(rdev); |
664 | } |
665 | |
666 | int r100_pci_gart_enable(struct radeon_device *rdev) |
667 | { |
668 | uint32_t tmp; |
669 | |
670 | radeon_gart_restore(rdev); |
671 | /* discard memory request outside of configured range */ |
672 | tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; |
673 | WREG32(RADEON_AIC_CNTL, tmp); |
674 | /* set address range for PCI address translate */ |
675 | WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start); |
676 | WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end); |
677 | /* set PCI GART page-table base address */ |
678 | WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); |
679 | tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; |
680 | WREG32(RADEON_AIC_CNTL, tmp); |
681 | r100_pci_gart_tlb_flush(rdev); |
682 | DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n" , |
683 | (unsigned)(rdev->mc.gtt_size >> 20), |
684 | (unsigned long long)rdev->gart.table_addr); |
685 | rdev->gart.ready = true; |
686 | return 0; |
687 | } |
688 | |
689 | void r100_pci_gart_disable(struct radeon_device *rdev) |
690 | { |
691 | uint32_t tmp; |
692 | |
693 | /* discard memory request outside of configured range */ |
694 | tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; |
695 | WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN); |
696 | WREG32(RADEON_AIC_LO_ADDR, 0); |
697 | WREG32(RADEON_AIC_HI_ADDR, 0); |
698 | } |
699 | |
700 | int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
701 | { |
702 | u32 *gtt = rdev->gart.ptr; |
703 | |
704 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
705 | return -EINVAL; |
706 | } |
707 | gtt[i] = cpu_to_le32(lower_32_bits(addr)); |
708 | return 0; |
709 | } |
710 | |
711 | void r100_pci_gart_fini(struct radeon_device *rdev) |
712 | { |
713 | radeon_gart_fini(rdev); |
714 | r100_pci_gart_disable(rdev); |
715 | radeon_gart_table_ram_free(rdev); |
716 | } |
717 | |
718 | int r100_irq_set(struct radeon_device *rdev) |
719 | { |
720 | uint32_t tmp = 0; |
721 | |
722 | if (!rdev->irq.installed) { |
723 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n" ); |
724 | WREG32(R_000040_GEN_INT_CNTL, 0); |
725 | return -EINVAL; |
726 | } |
727 | if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { |
728 | tmp |= RADEON_SW_INT_ENABLE; |
729 | } |
730 | if (rdev->irq.crtc_vblank_int[0] || |
731 | atomic_read(&rdev->irq.pflip[0])) { |
732 | tmp |= RADEON_CRTC_VBLANK_MASK; |
733 | } |
734 | if (rdev->irq.crtc_vblank_int[1] || |
735 | atomic_read(&rdev->irq.pflip[1])) { |
736 | tmp |= RADEON_CRTC2_VBLANK_MASK; |
737 | } |
738 | if (rdev->irq.hpd[0]) { |
739 | tmp |= RADEON_FP_DETECT_MASK; |
740 | } |
741 | if (rdev->irq.hpd[1]) { |
742 | tmp |= RADEON_FP2_DETECT_MASK; |
743 | } |
744 | WREG32(RADEON_GEN_INT_CNTL, tmp); |
745 | return 0; |
746 | } |
747 | |
748 | void r100_irq_disable(struct radeon_device *rdev) |
749 | { |
750 | u32 tmp; |
751 | |
752 | WREG32(R_000040_GEN_INT_CNTL, 0); |
753 | /* Wait and acknowledge irq */ |
754 | mdelay(1); |
755 | tmp = RREG32(R_000044_GEN_INT_STATUS); |
756 | WREG32(R_000044_GEN_INT_STATUS, tmp); |
757 | } |
758 | |
759 | static uint32_t r100_irq_ack(struct radeon_device *rdev) |
760 | { |
761 | uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); |
762 | uint32_t irq_mask = RADEON_SW_INT_TEST | |
763 | RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | |
764 | RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; |
765 | |
766 | if (irqs) { |
767 | WREG32(RADEON_GEN_INT_STATUS, irqs); |
768 | } |
769 | return irqs & irq_mask; |
770 | } |
771 | |
772 | int r100_irq_process(struct radeon_device *rdev) |
773 | { |
774 | uint32_t status, msi_rearm; |
775 | bool queue_hotplug = false; |
776 | |
777 | status = r100_irq_ack(rdev); |
778 | if (!status) { |
779 | return IRQ_NONE; |
780 | } |
781 | if (rdev->shutdown) { |
782 | return IRQ_NONE; |
783 | } |
784 | while (status) { |
785 | /* SW interrupt */ |
786 | if (status & RADEON_SW_INT_TEST) { |
787 | radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); |
788 | } |
789 | /* Vertical blank interrupts */ |
790 | if (status & RADEON_CRTC_VBLANK_STAT) { |
791 | if (rdev->irq.crtc_vblank_int[0]) { |
792 | drm_handle_vblank(rdev->ddev, 0); |
793 | #ifdef __NetBSD__ |
794 | spin_lock(&rdev->irq.vblank_lock); |
795 | rdev->pm.vblank_sync = true; |
796 | DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock); |
797 | spin_unlock(&rdev->irq.vblank_lock); |
798 | #else |
799 | rdev->pm.vblank_sync = true; |
800 | wake_up(&rdev->irq.vblank_queue); |
801 | #endif |
802 | } |
803 | if (atomic_read(&rdev->irq.pflip[0])) |
804 | radeon_crtc_handle_flip(rdev, 0); |
805 | } |
806 | if (status & RADEON_CRTC2_VBLANK_STAT) { |
807 | if (rdev->irq.crtc_vblank_int[1]) { |
808 | drm_handle_vblank(rdev->ddev, 1); |
809 | #ifdef __NetBSD__ |
810 | spin_lock(&rdev->irq.vblank_lock); |
811 | rdev->pm.vblank_sync = true; |
812 | DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock); |
813 | spin_unlock(&rdev->irq.vblank_lock); |
814 | #else |
815 | rdev->pm.vblank_sync = true; |
816 | wake_up(&rdev->irq.vblank_queue); |
817 | #endif |
818 | } |
819 | if (atomic_read(&rdev->irq.pflip[1])) |
820 | radeon_crtc_handle_flip(rdev, 1); |
821 | } |
822 | if (status & RADEON_FP_DETECT_STAT) { |
823 | queue_hotplug = true; |
824 | DRM_DEBUG("HPD1\n" ); |
825 | } |
826 | if (status & RADEON_FP2_DETECT_STAT) { |
827 | queue_hotplug = true; |
828 | DRM_DEBUG("HPD2\n" ); |
829 | } |
830 | status = r100_irq_ack(rdev); |
831 | } |
832 | if (queue_hotplug) |
833 | schedule_work(&rdev->hotplug_work); |
834 | if (rdev->msi_enabled) { |
835 | switch (rdev->family) { |
836 | case CHIP_RS400: |
837 | case CHIP_RS480: |
838 | msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM; |
839 | WREG32(RADEON_AIC_CNTL, msi_rearm); |
840 | WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM); |
841 | break; |
842 | default: |
843 | WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN); |
844 | break; |
845 | } |
846 | } |
847 | return IRQ_HANDLED; |
848 | } |
849 | |
850 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) |
851 | { |
852 | if (crtc == 0) |
853 | return RREG32(RADEON_CRTC_CRNT_FRAME); |
854 | else |
855 | return RREG32(RADEON_CRTC2_CRNT_FRAME); |
856 | } |
857 | |
858 | /* Who ever call radeon_fence_emit should call ring_lock and ask |
859 | * for enough space (today caller are ib schedule and buffer move) */ |
860 | void r100_fence_ring_emit(struct radeon_device *rdev, |
861 | struct radeon_fence *fence) |
862 | { |
863 | struct radeon_ring *ring = &rdev->ring[fence->ring]; |
864 | |
865 | /* We have to make sure that caches are flushed before |
866 | * CPU might read something from VRAM. */ |
867 | radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); |
868 | radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL); |
869 | radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); |
870 | radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL); |
871 | /* Wait until IDLE & CLEAN */ |
872 | radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); |
873 | radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); |
874 | radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); |
875 | radeon_ring_write(ring, rdev->config.r100.hdp_cntl | |
876 | RADEON_HDP_READ_BUFFER_INVALIDATE); |
877 | radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); |
878 | radeon_ring_write(ring, rdev->config.r100.hdp_cntl); |
879 | /* Emit fence sequence & fire IRQ */ |
880 | radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); |
881 | radeon_ring_write(ring, fence->seq); |
882 | radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); |
883 | radeon_ring_write(ring, RADEON_SW_INT_FIRE); |
884 | } |
885 | |
886 | bool r100_semaphore_ring_emit(struct radeon_device *rdev, |
887 | struct radeon_ring *ring, |
888 | struct radeon_semaphore *semaphore, |
889 | bool emit_wait) |
890 | { |
891 | /* Unused on older asics, since we don't have semaphores or multiple rings */ |
892 | BUG(); |
893 | return false; |
894 | } |
895 | |
896 | int r100_copy_blit(struct radeon_device *rdev, |
897 | uint64_t src_offset, |
898 | uint64_t dst_offset, |
899 | unsigned num_gpu_pages, |
900 | struct radeon_fence **fence) |
901 | { |
902 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
903 | uint32_t cur_pages; |
904 | uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; |
905 | uint32_t pitch; |
906 | uint32_t stride_pixels; |
907 | unsigned ndw; |
908 | int num_loops; |
909 | int r = 0; |
910 | |
911 | /* radeon limited to 16k stride */ |
912 | stride_bytes &= 0x3fff; |
913 | /* radeon pitch is /64 */ |
914 | pitch = stride_bytes / 64; |
915 | stride_pixels = stride_bytes / 4; |
916 | num_loops = DIV_ROUND_UP(num_gpu_pages, 8191); |
917 | |
918 | /* Ask for enough room for blit + flush + fence */ |
919 | ndw = 64 + (10 * num_loops); |
920 | r = radeon_ring_lock(rdev, ring, ndw); |
921 | if (r) { |
922 | DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n" , r, ndw); |
923 | return -EINVAL; |
924 | } |
925 | while (num_gpu_pages > 0) { |
926 | cur_pages = num_gpu_pages; |
927 | if (cur_pages > 8191) { |
928 | cur_pages = 8191; |
929 | } |
930 | num_gpu_pages -= cur_pages; |
931 | |
932 | /* pages are in Y direction - height |
933 | page width in X direction - width */ |
934 | radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8)); |
935 | radeon_ring_write(ring, |
936 | RADEON_GMC_SRC_PITCH_OFFSET_CNTL | |
937 | RADEON_GMC_DST_PITCH_OFFSET_CNTL | |
938 | RADEON_GMC_SRC_CLIPPING | |
939 | RADEON_GMC_DST_CLIPPING | |
940 | RADEON_GMC_BRUSH_NONE | |
941 | (RADEON_COLOR_FORMAT_ARGB8888 << 8) | |
942 | RADEON_GMC_SRC_DATATYPE_COLOR | |
943 | RADEON_ROP3_S | |
944 | RADEON_DP_SRC_SOURCE_MEMORY | |
945 | RADEON_GMC_CLR_CMP_CNTL_DIS | |
946 | RADEON_GMC_WR_MSK_DIS); |
947 | radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10)); |
948 | radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10)); |
949 | radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); |
950 | radeon_ring_write(ring, 0); |
951 | radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); |
952 | radeon_ring_write(ring, num_gpu_pages); |
953 | radeon_ring_write(ring, num_gpu_pages); |
954 | radeon_ring_write(ring, cur_pages | (stride_pixels << 16)); |
955 | } |
956 | radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); |
957 | radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL); |
958 | radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); |
959 | radeon_ring_write(ring, |
960 | RADEON_WAIT_2D_IDLECLEAN | |
961 | RADEON_WAIT_HOST_IDLECLEAN | |
962 | RADEON_WAIT_DMA_GUI_IDLE); |
963 | if (fence) { |
964 | r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); |
965 | } |
966 | radeon_ring_unlock_commit(rdev, ring); |
967 | return r; |
968 | } |
969 | |
970 | static int r100_cp_wait_for_idle(struct radeon_device *rdev) |
971 | { |
972 | unsigned i; |
973 | u32 tmp; |
974 | |
975 | for (i = 0; i < rdev->usec_timeout; i++) { |
976 | tmp = RREG32(R_000E40_RBBM_STATUS); |
977 | if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) { |
978 | return 0; |
979 | } |
980 | udelay(1); |
981 | } |
982 | return -1; |
983 | } |
984 | |
985 | void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) |
986 | { |
987 | int r; |
988 | |
989 | r = radeon_ring_lock(rdev, ring, 2); |
990 | if (r) { |
991 | return; |
992 | } |
993 | radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); |
994 | radeon_ring_write(ring, |
995 | RADEON_ISYNC_ANY2D_IDLE3D | |
996 | RADEON_ISYNC_ANY3D_IDLE2D | |
997 | RADEON_ISYNC_WAIT_IDLEGUI | |
998 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); |
999 | radeon_ring_unlock_commit(rdev, ring); |
1000 | } |
1001 | |
1002 | |
1003 | /* Load the microcode for the CP */ |
1004 | static int r100_cp_init_microcode(struct radeon_device *rdev) |
1005 | { |
1006 | const char *fw_name = NULL; |
1007 | int err; |
1008 | |
1009 | DRM_DEBUG_KMS("\n" ); |
1010 | |
1011 | if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || |
1012 | (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || |
1013 | (rdev->family == CHIP_RS200)) { |
1014 | DRM_INFO("Loading R100 Microcode\n" ); |
1015 | fw_name = FIRMWARE_R100; |
1016 | } else if ((rdev->family == CHIP_R200) || |
1017 | (rdev->family == CHIP_RV250) || |
1018 | (rdev->family == CHIP_RV280) || |
1019 | (rdev->family == CHIP_RS300)) { |
1020 | DRM_INFO("Loading R200 Microcode\n" ); |
1021 | fw_name = FIRMWARE_R200; |
1022 | } else if ((rdev->family == CHIP_R300) || |
1023 | (rdev->family == CHIP_R350) || |
1024 | (rdev->family == CHIP_RV350) || |
1025 | (rdev->family == CHIP_RV380) || |
1026 | (rdev->family == CHIP_RS400) || |
1027 | (rdev->family == CHIP_RS480)) { |
1028 | DRM_INFO("Loading R300 Microcode\n" ); |
1029 | fw_name = FIRMWARE_R300; |
1030 | } else if ((rdev->family == CHIP_R420) || |
1031 | (rdev->family == CHIP_R423) || |
1032 | (rdev->family == CHIP_RV410)) { |
1033 | DRM_INFO("Loading R400 Microcode\n" ); |
1034 | fw_name = FIRMWARE_R420; |
1035 | } else if ((rdev->family == CHIP_RS690) || |
1036 | (rdev->family == CHIP_RS740)) { |
1037 | DRM_INFO("Loading RS690/RS740 Microcode\n" ); |
1038 | fw_name = FIRMWARE_RS690; |
1039 | } else if (rdev->family == CHIP_RS600) { |
1040 | DRM_INFO("Loading RS600 Microcode\n" ); |
1041 | fw_name = FIRMWARE_RS600; |
1042 | } else if ((rdev->family == CHIP_RV515) || |
1043 | (rdev->family == CHIP_R520) || |
1044 | (rdev->family == CHIP_RV530) || |
1045 | (rdev->family == CHIP_R580) || |
1046 | (rdev->family == CHIP_RV560) || |
1047 | (rdev->family == CHIP_RV570)) { |
1048 | DRM_INFO("Loading R500 Microcode\n" ); |
1049 | fw_name = FIRMWARE_R520; |
1050 | } |
1051 | |
1052 | err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); |
1053 | if (err) { |
1054 | printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n" , |
1055 | fw_name); |
1056 | } else if (rdev->me_fw->size % 8) { |
1057 | printk(KERN_ERR |
1058 | "radeon_cp: Bogus length %zu in firmware \"%s\"\n" , |
1059 | rdev->me_fw->size, fw_name); |
1060 | err = -EINVAL; |
1061 | release_firmware(rdev->me_fw); |
1062 | rdev->me_fw = NULL; |
1063 | } |
1064 | return err; |
1065 | } |
1066 | |
1067 | u32 r100_gfx_get_rptr(struct radeon_device *rdev, |
1068 | struct radeon_ring *ring) |
1069 | { |
1070 | u32 rptr; |
1071 | |
1072 | if (rdev->wb.enabled) |
1073 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); |
1074 | else |
1075 | rptr = RREG32(RADEON_CP_RB_RPTR); |
1076 | |
1077 | return rptr; |
1078 | } |
1079 | |
1080 | u32 r100_gfx_get_wptr(struct radeon_device *rdev, |
1081 | struct radeon_ring *ring) |
1082 | { |
1083 | u32 wptr; |
1084 | |
1085 | wptr = RREG32(RADEON_CP_RB_WPTR); |
1086 | |
1087 | return wptr; |
1088 | } |
1089 | |
1090 | void r100_gfx_set_wptr(struct radeon_device *rdev, |
1091 | struct radeon_ring *ring) |
1092 | { |
1093 | WREG32(RADEON_CP_RB_WPTR, ring->wptr); |
1094 | (void)RREG32(RADEON_CP_RB_WPTR); |
1095 | } |
1096 | |
1097 | static void r100_cp_load_microcode(struct radeon_device *rdev) |
1098 | { |
1099 | const __be32 *fw_data; |
1100 | int i, size; |
1101 | |
1102 | if (r100_gui_wait_for_idle(rdev)) { |
1103 | printk(KERN_WARNING "Failed to wait GUI idle while " |
1104 | "programming pipes. Bad things might happen.\n" ); |
1105 | } |
1106 | |
1107 | if (rdev->me_fw) { |
1108 | size = rdev->me_fw->size / 4; |
1109 | fw_data = (const __be32 *)rdev->me_fw->data; |
1110 | WREG32(RADEON_CP_ME_RAM_ADDR, 0); |
1111 | for (i = 0; i < size; i += 2) { |
1112 | WREG32(RADEON_CP_ME_RAM_DATAH, |
1113 | be32_to_cpup(&fw_data[i])); |
1114 | WREG32(RADEON_CP_ME_RAM_DATAL, |
1115 | be32_to_cpup(&fw_data[i + 1])); |
1116 | } |
1117 | } |
1118 | } |
1119 | |
1120 | int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) |
1121 | { |
1122 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
1123 | unsigned rb_bufsz; |
1124 | unsigned rb_blksz; |
1125 | unsigned max_fetch; |
1126 | unsigned pre_write_timer; |
1127 | unsigned pre_write_limit; |
1128 | unsigned indirect2_start; |
1129 | unsigned indirect1_start; |
1130 | uint32_t tmp; |
1131 | int r; |
1132 | |
1133 | if (r100_debugfs_cp_init(rdev)) { |
1134 | DRM_ERROR("Failed to register debugfs file for CP !\n" ); |
1135 | } |
1136 | if (!rdev->me_fw) { |
1137 | r = r100_cp_init_microcode(rdev); |
1138 | if (r) { |
1139 | DRM_ERROR("Failed to load firmware!\n" ); |
1140 | return r; |
1141 | } |
1142 | } |
1143 | |
1144 | /* Align ring size */ |
1145 | rb_bufsz = order_base_2(ring_size / 8); |
1146 | ring_size = (1 << (rb_bufsz + 1)) * 4; |
1147 | r100_cp_load_microcode(rdev); |
1148 | r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, |
1149 | RADEON_CP_PACKET2); |
1150 | if (r) { |
1151 | return r; |
1152 | } |
1153 | /* Each time the cp read 1024 bytes (16 dword/quadword) update |
1154 | * the rptr copy in system ram */ |
1155 | rb_blksz = 9; |
1156 | /* cp will read 128bytes at a time (4 dwords) */ |
1157 | max_fetch = 1; |
1158 | ring->align_mask = 16 - 1; |
1159 | /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */ |
1160 | pre_write_timer = 64; |
1161 | /* Force CP_RB_WPTR write if written more than one time before the |
1162 | * delay expire |
1163 | */ |
1164 | pre_write_limit = 0; |
1165 | /* Setup the cp cache like this (cache size is 96 dwords) : |
1166 | * RING 0 to 15 |
1167 | * INDIRECT1 16 to 79 |
1168 | * INDIRECT2 80 to 95 |
1169 | * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) |
1170 | * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords)) |
1171 | * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) |
1172 | * Idea being that most of the gpu cmd will be through indirect1 buffer |
1173 | * so it gets the bigger cache. |
1174 | */ |
1175 | indirect2_start = 80; |
1176 | indirect1_start = 16; |
1177 | /* cp setup */ |
1178 | WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); |
1179 | tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | |
1180 | REG_SET(RADEON_RB_BLKSZ, rb_blksz) | |
1181 | REG_SET(RADEON_MAX_FETCH, max_fetch)); |
1182 | #ifdef __BIG_ENDIAN |
1183 | tmp |= RADEON_BUF_SWAP_32BIT; |
1184 | #endif |
1185 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); |
1186 | |
1187 | /* Set ring address */ |
1188 | DRM_INFO("radeon: ring at 0x%016lX\n" , (unsigned long)ring->gpu_addr); |
1189 | WREG32(RADEON_CP_RB_BASE, ring->gpu_addr); |
1190 | /* Force read & write ptr to 0 */ |
1191 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); |
1192 | WREG32(RADEON_CP_RB_RPTR_WR, 0); |
1193 | ring->wptr = 0; |
1194 | WREG32(RADEON_CP_RB_WPTR, ring->wptr); |
1195 | |
1196 | /* set the wb address whether it's enabled or not */ |
1197 | WREG32(R_00070C_CP_RB_RPTR_ADDR, |
1198 | S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2)); |
1199 | WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET); |
1200 | |
1201 | if (rdev->wb.enabled) |
1202 | WREG32(R_000770_SCRATCH_UMSK, 0xff); |
1203 | else { |
1204 | tmp |= RADEON_RB_NO_UPDATE; |
1205 | WREG32(R_000770_SCRATCH_UMSK, 0); |
1206 | } |
1207 | |
1208 | WREG32(RADEON_CP_RB_CNTL, tmp); |
1209 | udelay(10); |
1210 | /* Set cp mode to bus mastering & enable cp*/ |
1211 | WREG32(RADEON_CP_CSQ_MODE, |
1212 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | |
1213 | REG_SET(RADEON_INDIRECT1_START, indirect1_start)); |
1214 | WREG32(RADEON_CP_RB_WPTR_DELAY, 0); |
1215 | WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); |
1216 | WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); |
1217 | |
1218 | /* at this point everything should be setup correctly to enable master */ |
1219 | pci_set_master(rdev->pdev); |
1220 | |
1221 | radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
1222 | r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); |
1223 | if (r) { |
1224 | DRM_ERROR("radeon: cp isn't working (%d).\n" , r); |
1225 | return r; |
1226 | } |
1227 | ring->ready = true; |
1228 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
1229 | |
1230 | if (!ring->rptr_save_reg /* not resuming from suspend */ |
1231 | && radeon_ring_supports_scratch_reg(rdev, ring)) { |
1232 | r = radeon_scratch_get(rdev, &ring->rptr_save_reg); |
1233 | if (r) { |
1234 | DRM_ERROR("failed to get scratch reg for rptr save (%d).\n" , r); |
1235 | ring->rptr_save_reg = 0; |
1236 | } |
1237 | } |
1238 | return 0; |
1239 | } |
1240 | |
1241 | void r100_cp_fini(struct radeon_device *rdev) |
1242 | { |
1243 | if (r100_cp_wait_for_idle(rdev)) { |
1244 | DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n" ); |
1245 | } |
1246 | /* Disable ring */ |
1247 | r100_cp_disable(rdev); |
1248 | radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg); |
1249 | radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
1250 | DRM_INFO("radeon: cp finalized\n" ); |
1251 | } |
1252 | |
1253 | void r100_cp_disable(struct radeon_device *rdev) |
1254 | { |
1255 | /* Disable ring */ |
1256 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
1257 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
1258 | WREG32(RADEON_CP_CSQ_MODE, 0); |
1259 | WREG32(RADEON_CP_CSQ_CNTL, 0); |
1260 | WREG32(R_000770_SCRATCH_UMSK, 0); |
1261 | if (r100_gui_wait_for_idle(rdev)) { |
1262 | printk(KERN_WARNING "Failed to wait GUI idle while " |
1263 | "programming pipes. Bad things might happen.\n" ); |
1264 | } |
1265 | } |
1266 | |
1267 | /* |
1268 | * CS functions |
1269 | */ |
1270 | int r100_reloc_pitch_offset(struct radeon_cs_parser *p, |
1271 | struct radeon_cs_packet *pkt, |
1272 | unsigned idx, |
1273 | unsigned reg) |
1274 | { |
1275 | int r; |
1276 | u32 tile_flags = 0; |
1277 | u32 tmp; |
1278 | struct radeon_cs_reloc *reloc; |
1279 | u32 value; |
1280 | |
1281 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1282 | if (r) { |
1283 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
1284 | idx, reg); |
1285 | radeon_cs_dump_packet(p, pkt); |
1286 | return r; |
1287 | } |
1288 | |
1289 | value = radeon_get_ib_value(p, idx); |
1290 | tmp = value & 0x003fffff; |
1291 | tmp += (((u32)reloc->gpu_offset) >> 10); |
1292 | |
1293 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
1294 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
1295 | tile_flags |= RADEON_DST_TILE_MACRO; |
1296 | if (reloc->tiling_flags & RADEON_TILING_MICRO) { |
1297 | if (reg == RADEON_SRC_PITCH_OFFSET) { |
1298 | DRM_ERROR("Cannot src blit from microtiled surface\n" ); |
1299 | radeon_cs_dump_packet(p, pkt); |
1300 | return -EINVAL; |
1301 | } |
1302 | tile_flags |= RADEON_DST_TILE_MICRO; |
1303 | } |
1304 | |
1305 | tmp |= tile_flags; |
1306 | p->ib.ptr[idx] = (value & 0x3fc00000) | tmp; |
1307 | } else |
1308 | p->ib.ptr[idx] = (value & 0xffc00000) | tmp; |
1309 | return 0; |
1310 | } |
1311 | |
1312 | int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, |
1313 | struct radeon_cs_packet *pkt, |
1314 | int idx) |
1315 | { |
1316 | unsigned c, i; |
1317 | struct radeon_cs_reloc *reloc; |
1318 | struct r100_cs_track *track; |
1319 | int r = 0; |
1320 | volatile uint32_t *ib; |
1321 | u32 idx_value; |
1322 | |
1323 | ib = p->ib.ptr; |
1324 | track = (struct r100_cs_track *)p->track; |
1325 | c = radeon_get_ib_value(p, idx++) & 0x1F; |
1326 | if (c > 16) { |
1327 | DRM_ERROR("Only 16 vertex buffers are allowed %d\n" , |
1328 | pkt->opcode); |
1329 | radeon_cs_dump_packet(p, pkt); |
1330 | return -EINVAL; |
1331 | } |
1332 | track->num_arrays = c; |
1333 | for (i = 0; i < (c - 1); i+=2, idx+=3) { |
1334 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1335 | if (r) { |
1336 | DRM_ERROR("No reloc for packet3 %d\n" , |
1337 | pkt->opcode); |
1338 | radeon_cs_dump_packet(p, pkt); |
1339 | return r; |
1340 | } |
1341 | idx_value = radeon_get_ib_value(p, idx); |
1342 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); |
1343 | |
1344 | track->arrays[i + 0].esize = idx_value >> 8; |
1345 | track->arrays[i + 0].robj = reloc->robj; |
1346 | track->arrays[i + 0].esize &= 0x7F; |
1347 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1348 | if (r) { |
1349 | DRM_ERROR("No reloc for packet3 %d\n" , |
1350 | pkt->opcode); |
1351 | radeon_cs_dump_packet(p, pkt); |
1352 | return r; |
1353 | } |
1354 | ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset); |
1355 | track->arrays[i + 1].robj = reloc->robj; |
1356 | track->arrays[i + 1].esize = idx_value >> 24; |
1357 | track->arrays[i + 1].esize &= 0x7F; |
1358 | } |
1359 | if (c & 1) { |
1360 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1361 | if (r) { |
1362 | DRM_ERROR("No reloc for packet3 %d\n" , |
1363 | pkt->opcode); |
1364 | radeon_cs_dump_packet(p, pkt); |
1365 | return r; |
1366 | } |
1367 | idx_value = radeon_get_ib_value(p, idx); |
1368 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); |
1369 | track->arrays[i + 0].robj = reloc->robj; |
1370 | track->arrays[i + 0].esize = idx_value >> 8; |
1371 | track->arrays[i + 0].esize &= 0x7F; |
1372 | } |
1373 | return r; |
1374 | } |
1375 | |
1376 | int r100_cs_parse_packet0(struct radeon_cs_parser *p, |
1377 | struct radeon_cs_packet *pkt, |
1378 | const unsigned *auth, unsigned n, |
1379 | radeon_packet0_check_t check) |
1380 | { |
1381 | unsigned reg; |
1382 | unsigned i, j, m; |
1383 | unsigned idx; |
1384 | int r; |
1385 | |
1386 | idx = pkt->idx + 1; |
1387 | reg = pkt->reg; |
1388 | /* Check that register fall into register range |
1389 | * determined by the number of entry (n) in the |
1390 | * safe register bitmap. |
1391 | */ |
1392 | if (pkt->one_reg_wr) { |
1393 | if ((reg >> 7) > n) { |
1394 | return -EINVAL; |
1395 | } |
1396 | } else { |
1397 | if (((reg + (pkt->count << 2)) >> 7) > n) { |
1398 | return -EINVAL; |
1399 | } |
1400 | } |
1401 | for (i = 0; i <= pkt->count; i++, idx++) { |
1402 | j = (reg >> 7); |
1403 | m = 1 << ((reg >> 2) & 31); |
1404 | if (auth[j] & m) { |
1405 | r = check(p, pkt, idx, reg); |
1406 | if (r) { |
1407 | return r; |
1408 | } |
1409 | } |
1410 | if (pkt->one_reg_wr) { |
1411 | if (!(auth[j] & m)) { |
1412 | break; |
1413 | } |
1414 | } else { |
1415 | reg += 4; |
1416 | } |
1417 | } |
1418 | return 0; |
1419 | } |
1420 | |
1421 | /** |
1422 | * r100_cs_packet_next_vline() - parse userspace VLINE packet |
1423 | * @parser: parser structure holding parsing context. |
1424 | * |
1425 | * Userspace sends a special sequence for VLINE waits. |
1426 | * PACKET0 - VLINE_START_END + value |
1427 | * PACKET0 - WAIT_UNTIL +_value |
1428 | * RELOC (P3) - crtc_id in reloc. |
1429 | * |
1430 | * This function parses this and relocates the VLINE START END |
1431 | * and WAIT UNTIL packets to the correct crtc. |
1432 | * It also detects a switched off crtc and nulls out the |
1433 | * wait in that case. |
1434 | */ |
1435 | int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) |
1436 | { |
1437 | struct drm_mode_object *obj; |
1438 | struct drm_crtc *crtc; |
1439 | struct radeon_crtc *radeon_crtc; |
1440 | struct radeon_cs_packet p3reloc, waitreloc; |
1441 | int crtc_id; |
1442 | int r; |
1443 | uint32_t , h_idx, reg; |
1444 | volatile uint32_t *ib; |
1445 | |
1446 | ib = p->ib.ptr; |
1447 | |
1448 | /* parse the wait until */ |
1449 | r = radeon_cs_packet_parse(p, &waitreloc, p->idx); |
1450 | if (r) |
1451 | return r; |
1452 | |
1453 | /* check its a wait until and only 1 count */ |
1454 | if (waitreloc.reg != RADEON_WAIT_UNTIL || |
1455 | waitreloc.count != 0) { |
1456 | DRM_ERROR("vline wait had illegal wait until segment\n" ); |
1457 | return -EINVAL; |
1458 | } |
1459 | |
1460 | if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { |
1461 | DRM_ERROR("vline wait had illegal wait until\n" ); |
1462 | return -EINVAL; |
1463 | } |
1464 | |
1465 | /* jump over the NOP */ |
1466 | r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2); |
1467 | if (r) |
1468 | return r; |
1469 | |
1470 | h_idx = p->idx - 2; |
1471 | p->idx += waitreloc.count + 2; |
1472 | p->idx += p3reloc.count + 2; |
1473 | |
1474 | header = radeon_get_ib_value(p, h_idx); |
1475 | crtc_id = radeon_get_ib_value(p, h_idx + 5); |
1476 | reg = R100_CP_PACKET0_GET_REG(header); |
1477 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
1478 | if (!obj) { |
1479 | DRM_ERROR("cannot find crtc %d\n" , crtc_id); |
1480 | return -ENOENT; |
1481 | } |
1482 | crtc = obj_to_crtc(obj); |
1483 | radeon_crtc = to_radeon_crtc(crtc); |
1484 | crtc_id = radeon_crtc->crtc_id; |
1485 | |
1486 | if (!crtc->enabled) { |
1487 | /* if the CRTC isn't enabled - we need to nop out the wait until */ |
1488 | ib[h_idx + 2] = PACKET2(0); |
1489 | ib[h_idx + 3] = PACKET2(0); |
1490 | } else if (crtc_id == 1) { |
1491 | switch (reg) { |
1492 | case AVIVO_D1MODE_VLINE_START_END: |
1493 | header &= ~R300_CP_PACKET0_REG_MASK; |
1494 | header |= AVIVO_D2MODE_VLINE_START_END >> 2; |
1495 | break; |
1496 | case RADEON_CRTC_GUI_TRIG_VLINE: |
1497 | header &= ~R300_CP_PACKET0_REG_MASK; |
1498 | header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; |
1499 | break; |
1500 | default: |
1501 | DRM_ERROR("unknown crtc reloc\n" ); |
1502 | return -EINVAL; |
1503 | } |
1504 | ib[h_idx] = header; |
1505 | ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; |
1506 | } |
1507 | |
1508 | return 0; |
1509 | } |
1510 | |
1511 | static int r100_get_vtx_size(uint32_t vtx_fmt) |
1512 | { |
1513 | int vtx_size; |
1514 | vtx_size = 2; |
1515 | /* ordered according to bits in spec */ |
1516 | if (vtx_fmt & RADEON_SE_VTX_FMT_W0) |
1517 | vtx_size++; |
1518 | if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR) |
1519 | vtx_size += 3; |
1520 | if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA) |
1521 | vtx_size++; |
1522 | if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR) |
1523 | vtx_size++; |
1524 | if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC) |
1525 | vtx_size += 3; |
1526 | if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG) |
1527 | vtx_size++; |
1528 | if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC) |
1529 | vtx_size++; |
1530 | if (vtx_fmt & RADEON_SE_VTX_FMT_ST0) |
1531 | vtx_size += 2; |
1532 | if (vtx_fmt & RADEON_SE_VTX_FMT_ST1) |
1533 | vtx_size += 2; |
1534 | if (vtx_fmt & RADEON_SE_VTX_FMT_Q1) |
1535 | vtx_size++; |
1536 | if (vtx_fmt & RADEON_SE_VTX_FMT_ST2) |
1537 | vtx_size += 2; |
1538 | if (vtx_fmt & RADEON_SE_VTX_FMT_Q2) |
1539 | vtx_size++; |
1540 | if (vtx_fmt & RADEON_SE_VTX_FMT_ST3) |
1541 | vtx_size += 2; |
1542 | if (vtx_fmt & RADEON_SE_VTX_FMT_Q3) |
1543 | vtx_size++; |
1544 | if (vtx_fmt & RADEON_SE_VTX_FMT_Q0) |
1545 | vtx_size++; |
1546 | /* blend weight */ |
1547 | if (vtx_fmt & (0x7 << 15)) |
1548 | vtx_size += (vtx_fmt >> 15) & 0x7; |
1549 | if (vtx_fmt & RADEON_SE_VTX_FMT_N0) |
1550 | vtx_size += 3; |
1551 | if (vtx_fmt & RADEON_SE_VTX_FMT_XY1) |
1552 | vtx_size += 2; |
1553 | if (vtx_fmt & RADEON_SE_VTX_FMT_Z1) |
1554 | vtx_size++; |
1555 | if (vtx_fmt & RADEON_SE_VTX_FMT_W1) |
1556 | vtx_size++; |
1557 | if (vtx_fmt & RADEON_SE_VTX_FMT_N1) |
1558 | vtx_size++; |
1559 | if (vtx_fmt & RADEON_SE_VTX_FMT_Z) |
1560 | vtx_size++; |
1561 | return vtx_size; |
1562 | } |
1563 | |
1564 | static int r100_packet0_check(struct radeon_cs_parser *p, |
1565 | struct radeon_cs_packet *pkt, |
1566 | unsigned idx, unsigned reg) |
1567 | { |
1568 | struct radeon_cs_reloc *reloc; |
1569 | struct r100_cs_track *track; |
1570 | volatile uint32_t *ib; |
1571 | uint32_t tmp; |
1572 | int r; |
1573 | int i, face; |
1574 | u32 tile_flags = 0; |
1575 | u32 idx_value; |
1576 | |
1577 | ib = p->ib.ptr; |
1578 | track = (struct r100_cs_track *)p->track; |
1579 | |
1580 | idx_value = radeon_get_ib_value(p, idx); |
1581 | |
1582 | switch (reg) { |
1583 | case RADEON_CRTC_GUI_TRIG_VLINE: |
1584 | r = r100_cs_packet_parse_vline(p); |
1585 | if (r) { |
1586 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
1587 | idx, reg); |
1588 | radeon_cs_dump_packet(p, pkt); |
1589 | return r; |
1590 | } |
1591 | break; |
1592 | /* FIXME: only allow PACKET3 blit? easier to check for out of |
1593 | * range access */ |
1594 | case RADEON_DST_PITCH_OFFSET: |
1595 | case RADEON_SRC_PITCH_OFFSET: |
1596 | r = r100_reloc_pitch_offset(p, pkt, idx, reg); |
1597 | if (r) |
1598 | return r; |
1599 | break; |
1600 | case RADEON_RB3D_DEPTHOFFSET: |
1601 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1602 | if (r) { |
1603 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
1604 | idx, reg); |
1605 | radeon_cs_dump_packet(p, pkt); |
1606 | return r; |
1607 | } |
1608 | track->zb.robj = reloc->robj; |
1609 | track->zb.offset = idx_value; |
1610 | track->zb_dirty = true; |
1611 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1612 | break; |
1613 | case RADEON_RB3D_COLOROFFSET: |
1614 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1615 | if (r) { |
1616 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
1617 | idx, reg); |
1618 | radeon_cs_dump_packet(p, pkt); |
1619 | return r; |
1620 | } |
1621 | track->cb[0].robj = reloc->robj; |
1622 | track->cb[0].offset = idx_value; |
1623 | track->cb_dirty = true; |
1624 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1625 | break; |
1626 | case RADEON_PP_TXOFFSET_0: |
1627 | case RADEON_PP_TXOFFSET_1: |
1628 | case RADEON_PP_TXOFFSET_2: |
1629 | i = (reg - RADEON_PP_TXOFFSET_0) / 24; |
1630 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1631 | if (r) { |
1632 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
1633 | idx, reg); |
1634 | radeon_cs_dump_packet(p, pkt); |
1635 | return r; |
1636 | } |
1637 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
1638 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
1639 | tile_flags |= RADEON_TXO_MACRO_TILE; |
1640 | if (reloc->tiling_flags & RADEON_TILING_MICRO) |
1641 | tile_flags |= RADEON_TXO_MICRO_TILE_X2; |
1642 | |
1643 | tmp = idx_value & ~(0x7 << 2); |
1644 | tmp |= tile_flags; |
1645 | ib[idx] = tmp + ((u32)reloc->gpu_offset); |
1646 | } else |
1647 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1648 | track->textures[i].robj = reloc->robj; |
1649 | track->tex_dirty = true; |
1650 | break; |
1651 | case RADEON_PP_CUBIC_OFFSET_T0_0: |
1652 | case RADEON_PP_CUBIC_OFFSET_T0_1: |
1653 | case RADEON_PP_CUBIC_OFFSET_T0_2: |
1654 | case RADEON_PP_CUBIC_OFFSET_T0_3: |
1655 | case RADEON_PP_CUBIC_OFFSET_T0_4: |
1656 | i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4; |
1657 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1658 | if (r) { |
1659 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
1660 | idx, reg); |
1661 | radeon_cs_dump_packet(p, pkt); |
1662 | return r; |
1663 | } |
1664 | track->textures[0].cube_info[i].offset = idx_value; |
1665 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1666 | track->textures[0].cube_info[i].robj = reloc->robj; |
1667 | track->tex_dirty = true; |
1668 | break; |
1669 | case RADEON_PP_CUBIC_OFFSET_T1_0: |
1670 | case RADEON_PP_CUBIC_OFFSET_T1_1: |
1671 | case RADEON_PP_CUBIC_OFFSET_T1_2: |
1672 | case RADEON_PP_CUBIC_OFFSET_T1_3: |
1673 | case RADEON_PP_CUBIC_OFFSET_T1_4: |
1674 | i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4; |
1675 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1676 | if (r) { |
1677 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
1678 | idx, reg); |
1679 | radeon_cs_dump_packet(p, pkt); |
1680 | return r; |
1681 | } |
1682 | track->textures[1].cube_info[i].offset = idx_value; |
1683 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1684 | track->textures[1].cube_info[i].robj = reloc->robj; |
1685 | track->tex_dirty = true; |
1686 | break; |
1687 | case RADEON_PP_CUBIC_OFFSET_T2_0: |
1688 | case RADEON_PP_CUBIC_OFFSET_T2_1: |
1689 | case RADEON_PP_CUBIC_OFFSET_T2_2: |
1690 | case RADEON_PP_CUBIC_OFFSET_T2_3: |
1691 | case RADEON_PP_CUBIC_OFFSET_T2_4: |
1692 | i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4; |
1693 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1694 | if (r) { |
1695 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
1696 | idx, reg); |
1697 | radeon_cs_dump_packet(p, pkt); |
1698 | return r; |
1699 | } |
1700 | track->textures[2].cube_info[i].offset = idx_value; |
1701 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1702 | track->textures[2].cube_info[i].robj = reloc->robj; |
1703 | track->tex_dirty = true; |
1704 | break; |
1705 | case RADEON_RE_WIDTH_HEIGHT: |
1706 | track->maxy = ((idx_value >> 16) & 0x7FF); |
1707 | track->cb_dirty = true; |
1708 | track->zb_dirty = true; |
1709 | break; |
1710 | case RADEON_RB3D_COLORPITCH: |
1711 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1712 | if (r) { |
1713 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
1714 | idx, reg); |
1715 | radeon_cs_dump_packet(p, pkt); |
1716 | return r; |
1717 | } |
1718 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
1719 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
1720 | tile_flags |= RADEON_COLOR_TILE_ENABLE; |
1721 | if (reloc->tiling_flags & RADEON_TILING_MICRO) |
1722 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; |
1723 | |
1724 | tmp = idx_value & ~(0x7 << 16); |
1725 | tmp |= tile_flags; |
1726 | ib[idx] = tmp; |
1727 | } else |
1728 | ib[idx] = idx_value; |
1729 | |
1730 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; |
1731 | track->cb_dirty = true; |
1732 | break; |
1733 | case RADEON_RB3D_DEPTHPITCH: |
1734 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; |
1735 | track->zb_dirty = true; |
1736 | break; |
1737 | case RADEON_RB3D_CNTL: |
1738 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { |
1739 | case 7: |
1740 | case 8: |
1741 | case 9: |
1742 | case 11: |
1743 | case 12: |
1744 | track->cb[0].cpp = 1; |
1745 | break; |
1746 | case 3: |
1747 | case 4: |
1748 | case 15: |
1749 | track->cb[0].cpp = 2; |
1750 | break; |
1751 | case 6: |
1752 | track->cb[0].cpp = 4; |
1753 | break; |
1754 | default: |
1755 | DRM_ERROR("Invalid color buffer format (%d) !\n" , |
1756 | ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); |
1757 | return -EINVAL; |
1758 | } |
1759 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); |
1760 | track->cb_dirty = true; |
1761 | track->zb_dirty = true; |
1762 | break; |
1763 | case RADEON_RB3D_ZSTENCILCNTL: |
1764 | switch (idx_value & 0xf) { |
1765 | case 0: |
1766 | track->zb.cpp = 2; |
1767 | break; |
1768 | case 2: |
1769 | case 3: |
1770 | case 4: |
1771 | case 5: |
1772 | case 9: |
1773 | case 11: |
1774 | track->zb.cpp = 4; |
1775 | break; |
1776 | default: |
1777 | break; |
1778 | } |
1779 | track->zb_dirty = true; |
1780 | break; |
1781 | case RADEON_RB3D_ZPASS_ADDR: |
1782 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1783 | if (r) { |
1784 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
1785 | idx, reg); |
1786 | radeon_cs_dump_packet(p, pkt); |
1787 | return r; |
1788 | } |
1789 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1790 | break; |
1791 | case RADEON_PP_CNTL: |
1792 | { |
1793 | uint32_t temp = idx_value >> 4; |
1794 | for (i = 0; i < track->num_texture; i++) |
1795 | track->textures[i].enabled = !!(temp & (1 << i)); |
1796 | track->tex_dirty = true; |
1797 | } |
1798 | break; |
1799 | case RADEON_SE_VF_CNTL: |
1800 | track->vap_vf_cntl = idx_value; |
1801 | break; |
1802 | case RADEON_SE_VTX_FMT: |
1803 | track->vtx_size = r100_get_vtx_size(idx_value); |
1804 | break; |
1805 | case RADEON_PP_TEX_SIZE_0: |
1806 | case RADEON_PP_TEX_SIZE_1: |
1807 | case RADEON_PP_TEX_SIZE_2: |
1808 | i = (reg - RADEON_PP_TEX_SIZE_0) / 8; |
1809 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; |
1810 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; |
1811 | track->tex_dirty = true; |
1812 | break; |
1813 | case RADEON_PP_TEX_PITCH_0: |
1814 | case RADEON_PP_TEX_PITCH_1: |
1815 | case RADEON_PP_TEX_PITCH_2: |
1816 | i = (reg - RADEON_PP_TEX_PITCH_0) / 8; |
1817 | track->textures[i].pitch = idx_value + 32; |
1818 | track->tex_dirty = true; |
1819 | break; |
1820 | case RADEON_PP_TXFILTER_0: |
1821 | case RADEON_PP_TXFILTER_1: |
1822 | case RADEON_PP_TXFILTER_2: |
1823 | i = (reg - RADEON_PP_TXFILTER_0) / 24; |
1824 | track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK) |
1825 | >> RADEON_MAX_MIP_LEVEL_SHIFT); |
1826 | tmp = (idx_value >> 23) & 0x7; |
1827 | if (tmp == 2 || tmp == 6) |
1828 | track->textures[i].roundup_w = false; |
1829 | tmp = (idx_value >> 27) & 0x7; |
1830 | if (tmp == 2 || tmp == 6) |
1831 | track->textures[i].roundup_h = false; |
1832 | track->tex_dirty = true; |
1833 | break; |
1834 | case RADEON_PP_TXFORMAT_0: |
1835 | case RADEON_PP_TXFORMAT_1: |
1836 | case RADEON_PP_TXFORMAT_2: |
1837 | i = (reg - RADEON_PP_TXFORMAT_0) / 24; |
1838 | if (idx_value & RADEON_TXFORMAT_NON_POWER2) { |
1839 | track->textures[i].use_pitch = 1; |
1840 | } else { |
1841 | track->textures[i].use_pitch = 0; |
1842 | track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); |
1843 | track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); |
1844 | } |
1845 | if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) |
1846 | track->textures[i].tex_coord_type = 2; |
1847 | switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { |
1848 | case RADEON_TXFORMAT_I8: |
1849 | case RADEON_TXFORMAT_RGB332: |
1850 | case RADEON_TXFORMAT_Y8: |
1851 | track->textures[i].cpp = 1; |
1852 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
1853 | break; |
1854 | case RADEON_TXFORMAT_AI88: |
1855 | case RADEON_TXFORMAT_ARGB1555: |
1856 | case RADEON_TXFORMAT_RGB565: |
1857 | case RADEON_TXFORMAT_ARGB4444: |
1858 | case RADEON_TXFORMAT_VYUY422: |
1859 | case RADEON_TXFORMAT_YVYU422: |
1860 | case RADEON_TXFORMAT_SHADOW16: |
1861 | case RADEON_TXFORMAT_LDUDV655: |
1862 | case RADEON_TXFORMAT_DUDV88: |
1863 | track->textures[i].cpp = 2; |
1864 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
1865 | break; |
1866 | case RADEON_TXFORMAT_ARGB8888: |
1867 | case RADEON_TXFORMAT_RGBA8888: |
1868 | case RADEON_TXFORMAT_SHADOW32: |
1869 | case RADEON_TXFORMAT_LDUDUV8888: |
1870 | track->textures[i].cpp = 4; |
1871 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
1872 | break; |
1873 | case RADEON_TXFORMAT_DXT1: |
1874 | track->textures[i].cpp = 1; |
1875 | track->textures[i].compress_format = R100_TRACK_COMP_DXT1; |
1876 | break; |
1877 | case RADEON_TXFORMAT_DXT23: |
1878 | case RADEON_TXFORMAT_DXT45: |
1879 | track->textures[i].cpp = 1; |
1880 | track->textures[i].compress_format = R100_TRACK_COMP_DXT35; |
1881 | break; |
1882 | } |
1883 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); |
1884 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); |
1885 | track->tex_dirty = true; |
1886 | break; |
1887 | case RADEON_PP_CUBIC_FACES_0: |
1888 | case RADEON_PP_CUBIC_FACES_1: |
1889 | case RADEON_PP_CUBIC_FACES_2: |
1890 | tmp = idx_value; |
1891 | i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; |
1892 | for (face = 0; face < 4; face++) { |
1893 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); |
1894 | track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); |
1895 | } |
1896 | track->tex_dirty = true; |
1897 | break; |
1898 | default: |
1899 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n" , |
1900 | reg, idx); |
1901 | return -EINVAL; |
1902 | } |
1903 | return 0; |
1904 | } |
1905 | |
1906 | int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, |
1907 | struct radeon_cs_packet *pkt, |
1908 | struct radeon_bo *robj) |
1909 | { |
1910 | unsigned idx; |
1911 | u32 value; |
1912 | idx = pkt->idx + 1; |
1913 | value = radeon_get_ib_value(p, idx + 2); |
1914 | if ((value + 1) > radeon_bo_size(robj)) { |
1915 | DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " |
1916 | "(need %u have %lu) !\n" , |
1917 | value + 1, |
1918 | radeon_bo_size(robj)); |
1919 | return -EINVAL; |
1920 | } |
1921 | return 0; |
1922 | } |
1923 | |
1924 | static int r100_packet3_check(struct radeon_cs_parser *p, |
1925 | struct radeon_cs_packet *pkt) |
1926 | { |
1927 | struct radeon_cs_reloc *reloc; |
1928 | struct r100_cs_track *track; |
1929 | unsigned idx; |
1930 | volatile uint32_t *ib; |
1931 | int r; |
1932 | |
1933 | ib = p->ib.ptr; |
1934 | idx = pkt->idx + 1; |
1935 | track = (struct r100_cs_track *)p->track; |
1936 | switch (pkt->opcode) { |
1937 | case PACKET3_3D_LOAD_VBPNTR: |
1938 | r = r100_packet3_load_vbpntr(p, pkt, idx); |
1939 | if (r) |
1940 | return r; |
1941 | break; |
1942 | case PACKET3_INDX_BUFFER: |
1943 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1944 | if (r) { |
1945 | DRM_ERROR("No reloc for packet3 %d\n" , pkt->opcode); |
1946 | radeon_cs_dump_packet(p, pkt); |
1947 | return r; |
1948 | } |
1949 | ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset); |
1950 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); |
1951 | if (r) { |
1952 | return r; |
1953 | } |
1954 | break; |
1955 | case 0x23: |
1956 | /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ |
1957 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1958 | if (r) { |
1959 | DRM_ERROR("No reloc for packet3 %d\n" , pkt->opcode); |
1960 | radeon_cs_dump_packet(p, pkt); |
1961 | return r; |
1962 | } |
1963 | ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset); |
1964 | track->num_arrays = 1; |
1965 | track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); |
1966 | |
1967 | track->arrays[0].robj = reloc->robj; |
1968 | track->arrays[0].esize = track->vtx_size; |
1969 | |
1970 | track->max_indx = radeon_get_ib_value(p, idx+1); |
1971 | |
1972 | track->vap_vf_cntl = radeon_get_ib_value(p, idx+3); |
1973 | track->immd_dwords = pkt->count - 1; |
1974 | r = r100_cs_track_check(p->rdev, track); |
1975 | if (r) |
1976 | return r; |
1977 | break; |
1978 | case PACKET3_3D_DRAW_IMMD: |
1979 | if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { |
1980 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n" ); |
1981 | return -EINVAL; |
1982 | } |
1983 | track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0)); |
1984 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1985 | track->immd_dwords = pkt->count - 1; |
1986 | r = r100_cs_track_check(p->rdev, track); |
1987 | if (r) |
1988 | return r; |
1989 | break; |
1990 | /* triggers drawing using in-packet vertex data */ |
1991 | case PACKET3_3D_DRAW_IMMD_2: |
1992 | if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { |
1993 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n" ); |
1994 | return -EINVAL; |
1995 | } |
1996 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1997 | track->immd_dwords = pkt->count; |
1998 | r = r100_cs_track_check(p->rdev, track); |
1999 | if (r) |
2000 | return r; |
2001 | break; |
2002 | /* triggers drawing using in-packet vertex data */ |
2003 | case PACKET3_3D_DRAW_VBUF_2: |
2004 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
2005 | r = r100_cs_track_check(p->rdev, track); |
2006 | if (r) |
2007 | return r; |
2008 | break; |
2009 | /* triggers drawing of vertex buffers setup elsewhere */ |
2010 | case PACKET3_3D_DRAW_INDX_2: |
2011 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
2012 | r = r100_cs_track_check(p->rdev, track); |
2013 | if (r) |
2014 | return r; |
2015 | break; |
2016 | /* triggers drawing using indices to vertex buffer */ |
2017 | case PACKET3_3D_DRAW_VBUF: |
2018 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
2019 | r = r100_cs_track_check(p->rdev, track); |
2020 | if (r) |
2021 | return r; |
2022 | break; |
2023 | /* triggers drawing of vertex buffers setup elsewhere */ |
2024 | case PACKET3_3D_DRAW_INDX: |
2025 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
2026 | r = r100_cs_track_check(p->rdev, track); |
2027 | if (r) |
2028 | return r; |
2029 | break; |
2030 | /* triggers drawing using indices to vertex buffer */ |
2031 | case PACKET3_3D_CLEAR_HIZ: |
2032 | case PACKET3_3D_CLEAR_ZMASK: |
2033 | if (p->rdev->hyperz_filp != p->filp) |
2034 | return -EINVAL; |
2035 | break; |
2036 | case PACKET3_NOP: |
2037 | break; |
2038 | default: |
2039 | DRM_ERROR("Packet3 opcode %x not supported\n" , pkt->opcode); |
2040 | return -EINVAL; |
2041 | } |
2042 | return 0; |
2043 | } |
2044 | |
2045 | int r100_cs_parse(struct radeon_cs_parser *p) |
2046 | { |
2047 | struct radeon_cs_packet pkt; |
2048 | struct r100_cs_track *track; |
2049 | int r; |
2050 | |
2051 | track = kzalloc(sizeof(*track), GFP_KERNEL); |
2052 | if (!track) |
2053 | return -ENOMEM; |
2054 | r100_cs_track_clear(p->rdev, track); |
2055 | p->track = track; |
2056 | do { |
2057 | r = radeon_cs_packet_parse(p, &pkt, p->idx); |
2058 | if (r) { |
2059 | return r; |
2060 | } |
2061 | p->idx += pkt.count + 2; |
2062 | switch (pkt.type) { |
2063 | case RADEON_PACKET_TYPE0: |
2064 | if (p->rdev->family >= CHIP_R200) |
2065 | r = r100_cs_parse_packet0(p, &pkt, |
2066 | p->rdev->config.r100.reg_safe_bm, |
2067 | p->rdev->config.r100.reg_safe_bm_size, |
2068 | &r200_packet0_check); |
2069 | else |
2070 | r = r100_cs_parse_packet0(p, &pkt, |
2071 | p->rdev->config.r100.reg_safe_bm, |
2072 | p->rdev->config.r100.reg_safe_bm_size, |
2073 | &r100_packet0_check); |
2074 | break; |
2075 | case RADEON_PACKET_TYPE2: |
2076 | break; |
2077 | case RADEON_PACKET_TYPE3: |
2078 | r = r100_packet3_check(p, &pkt); |
2079 | break; |
2080 | default: |
2081 | DRM_ERROR("Unknown packet type %d !\n" , |
2082 | pkt.type); |
2083 | return -EINVAL; |
2084 | } |
2085 | if (r) |
2086 | return r; |
2087 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
2088 | return 0; |
2089 | } |
2090 | |
2091 | static void r100_cs_track_texture_print(struct r100_cs_track_texture *t) |
2092 | { |
2093 | DRM_ERROR("pitch %d\n" , t->pitch); |
2094 | DRM_ERROR("use_pitch %d\n" , t->use_pitch); |
2095 | DRM_ERROR("width %d\n" , t->width); |
2096 | DRM_ERROR("width_11 %d\n" , t->width_11); |
2097 | DRM_ERROR("height %d\n" , t->height); |
2098 | DRM_ERROR("height_11 %d\n" , t->height_11); |
2099 | DRM_ERROR("num levels %d\n" , t->num_levels); |
2100 | DRM_ERROR("depth %d\n" , t->txdepth); |
2101 | DRM_ERROR("bpp %d\n" , t->cpp); |
2102 | DRM_ERROR("coordinate type %d\n" , t->tex_coord_type); |
2103 | DRM_ERROR("width round to power of 2 %d\n" , t->roundup_w); |
2104 | DRM_ERROR("height round to power of 2 %d\n" , t->roundup_h); |
2105 | DRM_ERROR("compress format %d\n" , t->compress_format); |
2106 | } |
2107 | |
2108 | static int r100_track_compress_size(int compress_format, int w, int h) |
2109 | { |
2110 | int block_width, block_height, block_bytes; |
2111 | int wblocks, hblocks; |
2112 | int min_wblocks; |
2113 | int sz; |
2114 | |
2115 | block_width = 4; |
2116 | block_height = 4; |
2117 | |
2118 | switch (compress_format) { |
2119 | case R100_TRACK_COMP_DXT1: |
2120 | block_bytes = 8; |
2121 | min_wblocks = 4; |
2122 | break; |
2123 | default: |
2124 | case R100_TRACK_COMP_DXT35: |
2125 | block_bytes = 16; |
2126 | min_wblocks = 2; |
2127 | break; |
2128 | } |
2129 | |
2130 | hblocks = (h + block_height - 1) / block_height; |
2131 | wblocks = (w + block_width - 1) / block_width; |
2132 | if (wblocks < min_wblocks) |
2133 | wblocks = min_wblocks; |
2134 | sz = wblocks * hblocks * block_bytes; |
2135 | return sz; |
2136 | } |
2137 | |
2138 | static int r100_cs_track_cube(struct radeon_device *rdev, |
2139 | struct r100_cs_track *track, unsigned idx) |
2140 | { |
2141 | unsigned face, w, h; |
2142 | struct radeon_bo *cube_robj; |
2143 | unsigned long size; |
2144 | unsigned compress_format = track->textures[idx].compress_format; |
2145 | |
2146 | for (face = 0; face < 5; face++) { |
2147 | cube_robj = track->textures[idx].cube_info[face].robj; |
2148 | w = track->textures[idx].cube_info[face].width; |
2149 | h = track->textures[idx].cube_info[face].height; |
2150 | |
2151 | if (compress_format) { |
2152 | size = r100_track_compress_size(compress_format, w, h); |
2153 | } else |
2154 | size = w * h; |
2155 | size *= track->textures[idx].cpp; |
2156 | |
2157 | size += track->textures[idx].cube_info[face].offset; |
2158 | |
2159 | if (size > radeon_bo_size(cube_robj)) { |
2160 | DRM_ERROR("Cube texture offset greater than object size %lu %lu\n" , |
2161 | size, radeon_bo_size(cube_robj)); |
2162 | r100_cs_track_texture_print(&track->textures[idx]); |
2163 | return -1; |
2164 | } |
2165 | } |
2166 | return 0; |
2167 | } |
2168 | |
2169 | static int r100_cs_track_texture_check(struct radeon_device *rdev, |
2170 | struct r100_cs_track *track) |
2171 | { |
2172 | struct radeon_bo *robj; |
2173 | unsigned long size; |
2174 | unsigned u, i, w, h, d; |
2175 | int ret; |
2176 | |
2177 | for (u = 0; u < track->num_texture; u++) { |
2178 | if (!track->textures[u].enabled) |
2179 | continue; |
2180 | if (track->textures[u].lookup_disable) |
2181 | continue; |
2182 | robj = track->textures[u].robj; |
2183 | if (robj == NULL) { |
2184 | DRM_ERROR("No texture bound to unit %u\n" , u); |
2185 | return -EINVAL; |
2186 | } |
2187 | size = 0; |
2188 | for (i = 0; i <= track->textures[u].num_levels; i++) { |
2189 | if (track->textures[u].use_pitch) { |
2190 | if (rdev->family < CHIP_R300) |
2191 | w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i); |
2192 | else |
2193 | w = track->textures[u].pitch / (1 << i); |
2194 | } else { |
2195 | w = track->textures[u].width; |
2196 | if (rdev->family >= CHIP_RV515) |
2197 | w |= track->textures[u].width_11; |
2198 | w = w / (1 << i); |
2199 | if (track->textures[u].roundup_w) |
2200 | w = roundup_pow_of_two(w); |
2201 | } |
2202 | h = track->textures[u].height; |
2203 | if (rdev->family >= CHIP_RV515) |
2204 | h |= track->textures[u].height_11; |
2205 | h = h / (1 << i); |
2206 | if (track->textures[u].roundup_h) |
2207 | h = roundup_pow_of_two(h); |
2208 | if (track->textures[u].tex_coord_type == 1) { |
2209 | d = (1 << track->textures[u].txdepth) / (1 << i); |
2210 | if (!d) |
2211 | d = 1; |
2212 | } else { |
2213 | d = 1; |
2214 | } |
2215 | if (track->textures[u].compress_format) { |
2216 | |
2217 | size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; |
2218 | /* compressed textures are block based */ |
2219 | } else |
2220 | size += w * h * d; |
2221 | } |
2222 | size *= track->textures[u].cpp; |
2223 | |
2224 | switch (track->textures[u].tex_coord_type) { |
2225 | case 0: |
2226 | case 1: |
2227 | break; |
2228 | case 2: |
2229 | if (track->separate_cube) { |
2230 | ret = r100_cs_track_cube(rdev, track, u); |
2231 | if (ret) |
2232 | return ret; |
2233 | } else |
2234 | size *= 6; |
2235 | break; |
2236 | default: |
2237 | DRM_ERROR("Invalid texture coordinate type %u for unit " |
2238 | "%u\n" , track->textures[u].tex_coord_type, u); |
2239 | return -EINVAL; |
2240 | } |
2241 | if (size > radeon_bo_size(robj)) { |
2242 | DRM_ERROR("Texture of unit %u needs %lu bytes but is " |
2243 | "%lu\n" , u, size, radeon_bo_size(robj)); |
2244 | r100_cs_track_texture_print(&track->textures[u]); |
2245 | return -EINVAL; |
2246 | } |
2247 | } |
2248 | return 0; |
2249 | } |
2250 | |
2251 | int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) |
2252 | { |
2253 | unsigned i; |
2254 | unsigned long size; |
2255 | unsigned prim_walk; |
2256 | unsigned nverts; |
2257 | unsigned num_cb = track->cb_dirty ? track->num_cb : 0; |
2258 | |
2259 | if (num_cb && !track->zb_cb_clear && !track->color_channel_mask && |
2260 | !track->blend_read_enable) |
2261 | num_cb = 0; |
2262 | |
2263 | for (i = 0; i < num_cb; i++) { |
2264 | if (track->cb[i].robj == NULL) { |
2265 | DRM_ERROR("[drm] No buffer for color buffer %d !\n" , i); |
2266 | return -EINVAL; |
2267 | } |
2268 | size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; |
2269 | size += track->cb[i].offset; |
2270 | if (size > radeon_bo_size(track->cb[i].robj)) { |
2271 | DRM_ERROR("[drm] Buffer too small for color buffer %d " |
2272 | "(need %lu have %lu) !\n" , i, size, |
2273 | radeon_bo_size(track->cb[i].robj)); |
2274 | DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n" , |
2275 | i, track->cb[i].pitch, track->cb[i].cpp, |
2276 | track->cb[i].offset, track->maxy); |
2277 | return -EINVAL; |
2278 | } |
2279 | } |
2280 | track->cb_dirty = false; |
2281 | |
2282 | if (track->zb_dirty && track->z_enabled) { |
2283 | if (track->zb.robj == NULL) { |
2284 | DRM_ERROR("[drm] No buffer for z buffer !\n" ); |
2285 | return -EINVAL; |
2286 | } |
2287 | size = track->zb.pitch * track->zb.cpp * track->maxy; |
2288 | size += track->zb.offset; |
2289 | if (size > radeon_bo_size(track->zb.robj)) { |
2290 | DRM_ERROR("[drm] Buffer too small for z buffer " |
2291 | "(need %lu have %lu) !\n" , size, |
2292 | radeon_bo_size(track->zb.robj)); |
2293 | DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n" , |
2294 | track->zb.pitch, track->zb.cpp, |
2295 | track->zb.offset, track->maxy); |
2296 | return -EINVAL; |
2297 | } |
2298 | } |
2299 | track->zb_dirty = false; |
2300 | |
2301 | if (track->aa_dirty && track->aaresolve) { |
2302 | if (track->aa.robj == NULL) { |
2303 | DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n" , i); |
2304 | return -EINVAL; |
2305 | } |
2306 | /* I believe the format comes from colorbuffer0. */ |
2307 | size = track->aa.pitch * track->cb[0].cpp * track->maxy; |
2308 | size += track->aa.offset; |
2309 | if (size > radeon_bo_size(track->aa.robj)) { |
2310 | DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " |
2311 | "(need %lu have %lu) !\n" , i, size, |
2312 | radeon_bo_size(track->aa.robj)); |
2313 | DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n" , |
2314 | i, track->aa.pitch, track->cb[0].cpp, |
2315 | track->aa.offset, track->maxy); |
2316 | return -EINVAL; |
2317 | } |
2318 | } |
2319 | track->aa_dirty = false; |
2320 | |
2321 | prim_walk = (track->vap_vf_cntl >> 4) & 0x3; |
2322 | if (track->vap_vf_cntl & (1 << 14)) { |
2323 | nverts = track->vap_alt_nverts; |
2324 | } else { |
2325 | nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; |
2326 | } |
2327 | switch (prim_walk) { |
2328 | case 1: |
2329 | for (i = 0; i < track->num_arrays; i++) { |
2330 | size = track->arrays[i].esize * track->max_indx * 4; |
2331 | if (track->arrays[i].robj == NULL) { |
2332 | DRM_ERROR("(PW %u) Vertex array %u no buffer " |
2333 | "bound\n" , prim_walk, i); |
2334 | return -EINVAL; |
2335 | } |
2336 | if (size > radeon_bo_size(track->arrays[i].robj)) { |
2337 | dev_err(rdev->dev, "(PW %u) Vertex array %u " |
2338 | "need %lu dwords have %lu dwords\n" , |
2339 | prim_walk, i, size >> 2, |
2340 | radeon_bo_size(track->arrays[i].robj) |
2341 | >> 2); |
2342 | DRM_ERROR("Max indices %u\n" , track->max_indx); |
2343 | return -EINVAL; |
2344 | } |
2345 | } |
2346 | break; |
2347 | case 2: |
2348 | for (i = 0; i < track->num_arrays; i++) { |
2349 | size = track->arrays[i].esize * (nverts - 1) * 4; |
2350 | if (track->arrays[i].robj == NULL) { |
2351 | DRM_ERROR("(PW %u) Vertex array %u no buffer " |
2352 | "bound\n" , prim_walk, i); |
2353 | return -EINVAL; |
2354 | } |
2355 | if (size > radeon_bo_size(track->arrays[i].robj)) { |
2356 | dev_err(rdev->dev, "(PW %u) Vertex array %u " |
2357 | "need %lu dwords have %lu dwords\n" , |
2358 | prim_walk, i, size >> 2, |
2359 | radeon_bo_size(track->arrays[i].robj) |
2360 | >> 2); |
2361 | return -EINVAL; |
2362 | } |
2363 | } |
2364 | break; |
2365 | case 3: |
2366 | size = track->vtx_size * nverts; |
2367 | if (size != track->immd_dwords) { |
2368 | DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n" , |
2369 | track->immd_dwords, size); |
2370 | DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n" , |
2371 | nverts, track->vtx_size); |
2372 | return -EINVAL; |
2373 | } |
2374 | break; |
2375 | default: |
2376 | DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n" , |
2377 | prim_walk); |
2378 | return -EINVAL; |
2379 | } |
2380 | |
2381 | if (track->tex_dirty) { |
2382 | track->tex_dirty = false; |
2383 | return r100_cs_track_texture_check(rdev, track); |
2384 | } |
2385 | return 0; |
2386 | } |
2387 | |
2388 | void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) |
2389 | { |
2390 | unsigned i, face; |
2391 | |
2392 | track->cb_dirty = true; |
2393 | track->zb_dirty = true; |
2394 | track->tex_dirty = true; |
2395 | track->aa_dirty = true; |
2396 | |
2397 | if (rdev->family < CHIP_R300) { |
2398 | track->num_cb = 1; |
2399 | if (rdev->family <= CHIP_RS200) |
2400 | track->num_texture = 3; |
2401 | else |
2402 | track->num_texture = 6; |
2403 | track->maxy = 2048; |
2404 | track->separate_cube = 1; |
2405 | } else { |
2406 | track->num_cb = 4; |
2407 | track->num_texture = 16; |
2408 | track->maxy = 4096; |
2409 | track->separate_cube = 0; |
2410 | track->aaresolve = false; |
2411 | track->aa.robj = NULL; |
2412 | } |
2413 | |
2414 | for (i = 0; i < track->num_cb; i++) { |
2415 | track->cb[i].robj = NULL; |
2416 | track->cb[i].pitch = 8192; |
2417 | track->cb[i].cpp = 16; |
2418 | track->cb[i].offset = 0; |
2419 | } |
2420 | track->z_enabled = true; |
2421 | track->zb.robj = NULL; |
2422 | track->zb.pitch = 8192; |
2423 | track->zb.cpp = 4; |
2424 | track->zb.offset = 0; |
2425 | track->vtx_size = 0x7F; |
2426 | track->immd_dwords = 0xFFFFFFFFUL; |
2427 | track->num_arrays = 11; |
2428 | track->max_indx = 0x00FFFFFFUL; |
2429 | for (i = 0; i < track->num_arrays; i++) { |
2430 | track->arrays[i].robj = NULL; |
2431 | track->arrays[i].esize = 0x7F; |
2432 | } |
2433 | for (i = 0; i < track->num_texture; i++) { |
2434 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
2435 | track->textures[i].pitch = 16536; |
2436 | track->textures[i].width = 16536; |
2437 | track->textures[i].height = 16536; |
2438 | track->textures[i].width_11 = 1 << 11; |
2439 | track->textures[i].height_11 = 1 << 11; |
2440 | track->textures[i].num_levels = 12; |
2441 | if (rdev->family <= CHIP_RS200) { |
2442 | track->textures[i].tex_coord_type = 0; |
2443 | track->textures[i].txdepth = 0; |
2444 | } else { |
2445 | track->textures[i].txdepth = 16; |
2446 | track->textures[i].tex_coord_type = 1; |
2447 | } |
2448 | track->textures[i].cpp = 64; |
2449 | track->textures[i].robj = NULL; |
2450 | /* CS IB emission code makes sure texture unit are disabled */ |
2451 | track->textures[i].enabled = false; |
2452 | track->textures[i].lookup_disable = false; |
2453 | track->textures[i].roundup_w = true; |
2454 | track->textures[i].roundup_h = true; |
2455 | if (track->separate_cube) |
2456 | for (face = 0; face < 5; face++) { |
2457 | track->textures[i].cube_info[face].robj = NULL; |
2458 | track->textures[i].cube_info[face].width = 16536; |
2459 | track->textures[i].cube_info[face].height = 16536; |
2460 | track->textures[i].cube_info[face].offset = 0; |
2461 | } |
2462 | } |
2463 | } |
2464 | |
2465 | /* |
2466 | * Global GPU functions |
2467 | */ |
2468 | static void r100_errata(struct radeon_device *rdev) |
2469 | { |
2470 | rdev->pll_errata = 0; |
2471 | |
2472 | if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) { |
2473 | rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS; |
2474 | } |
2475 | |
2476 | if (rdev->family == CHIP_RV100 || |
2477 | rdev->family == CHIP_RS100 || |
2478 | rdev->family == CHIP_RS200) { |
2479 | rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY; |
2480 | } |
2481 | } |
2482 | |
2483 | static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n) |
2484 | { |
2485 | unsigned i; |
2486 | uint32_t tmp; |
2487 | |
2488 | for (i = 0; i < rdev->usec_timeout; i++) { |
2489 | tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK; |
2490 | if (tmp >= n) { |
2491 | return 0; |
2492 | } |
2493 | DRM_UDELAY(1); |
2494 | } |
2495 | return -1; |
2496 | } |
2497 | |
2498 | int r100_gui_wait_for_idle(struct radeon_device *rdev) |
2499 | { |
2500 | unsigned i; |
2501 | uint32_t tmp; |
2502 | |
2503 | if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) { |
2504 | printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !" |
2505 | " Bad things might happen.\n" ); |
2506 | } |
2507 | for (i = 0; i < rdev->usec_timeout; i++) { |
2508 | tmp = RREG32(RADEON_RBBM_STATUS); |
2509 | if (!(tmp & RADEON_RBBM_ACTIVE)) { |
2510 | return 0; |
2511 | } |
2512 | DRM_UDELAY(1); |
2513 | } |
2514 | return -1; |
2515 | } |
2516 | |
2517 | int r100_mc_wait_for_idle(struct radeon_device *rdev) |
2518 | { |
2519 | unsigned i; |
2520 | uint32_t tmp; |
2521 | |
2522 | for (i = 0; i < rdev->usec_timeout; i++) { |
2523 | /* read MC_STATUS */ |
2524 | tmp = RREG32(RADEON_MC_STATUS); |
2525 | if (tmp & RADEON_MC_IDLE) { |
2526 | return 0; |
2527 | } |
2528 | DRM_UDELAY(1); |
2529 | } |
2530 | return -1; |
2531 | } |
2532 | |
2533 | bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) |
2534 | { |
2535 | u32 rbbm_status; |
2536 | |
2537 | rbbm_status = RREG32(R_000E40_RBBM_STATUS); |
2538 | if (!G_000E40_GUI_ACTIVE(rbbm_status)) { |
2539 | radeon_ring_lockup_update(rdev, ring); |
2540 | return false; |
2541 | } |
2542 | return radeon_ring_test_lockup(rdev, ring); |
2543 | } |
2544 | |
2545 | /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ |
2546 | void r100_enable_bm(struct radeon_device *rdev) |
2547 | { |
2548 | uint32_t tmp; |
2549 | /* Enable bus mastering */ |
2550 | tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; |
2551 | WREG32(RADEON_BUS_CNTL, tmp); |
2552 | } |
2553 | |
2554 | void r100_bm_disable(struct radeon_device *rdev) |
2555 | { |
2556 | u32 tmp; |
2557 | |
2558 | /* disable bus mastering */ |
2559 | tmp = RREG32(R_000030_BUS_CNTL); |
2560 | WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044); |
2561 | mdelay(1); |
2562 | WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042); |
2563 | mdelay(1); |
2564 | WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); |
2565 | tmp = RREG32(RADEON_BUS_CNTL); |
2566 | mdelay(1); |
2567 | pci_clear_master(rdev->pdev); |
2568 | mdelay(1); |
2569 | } |
2570 | |
2571 | int r100_asic_reset(struct radeon_device *rdev) |
2572 | { |
2573 | struct r100_mc_save save; |
2574 | u32 status, tmp; |
2575 | int ret = 0; |
2576 | |
2577 | status = RREG32(R_000E40_RBBM_STATUS); |
2578 | if (!G_000E40_GUI_ACTIVE(status)) { |
2579 | return 0; |
2580 | } |
2581 | r100_mc_stop(rdev, &save); |
2582 | status = RREG32(R_000E40_RBBM_STATUS); |
2583 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n" , __func__, __LINE__, status); |
2584 | /* stop CP */ |
2585 | WREG32(RADEON_CP_CSQ_CNTL, 0); |
2586 | tmp = RREG32(RADEON_CP_RB_CNTL); |
2587 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); |
2588 | WREG32(RADEON_CP_RB_RPTR_WR, 0); |
2589 | WREG32(RADEON_CP_RB_WPTR, 0); |
2590 | WREG32(RADEON_CP_RB_CNTL, tmp); |
2591 | /* save PCI state */ |
2592 | pci_save_state(rdev->pdev); |
2593 | /* disable bus mastering */ |
2594 | r100_bm_disable(rdev); |
2595 | WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) | |
2596 | S_0000F0_SOFT_RESET_RE(1) | |
2597 | S_0000F0_SOFT_RESET_PP(1) | |
2598 | S_0000F0_SOFT_RESET_RB(1)); |
2599 | RREG32(R_0000F0_RBBM_SOFT_RESET); |
2600 | mdelay(500); |
2601 | WREG32(R_0000F0_RBBM_SOFT_RESET, 0); |
2602 | mdelay(1); |
2603 | status = RREG32(R_000E40_RBBM_STATUS); |
2604 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n" , __func__, __LINE__, status); |
2605 | /* reset CP */ |
2606 | WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); |
2607 | RREG32(R_0000F0_RBBM_SOFT_RESET); |
2608 | mdelay(500); |
2609 | WREG32(R_0000F0_RBBM_SOFT_RESET, 0); |
2610 | mdelay(1); |
2611 | status = RREG32(R_000E40_RBBM_STATUS); |
2612 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n" , __func__, __LINE__, status); |
2613 | /* restore PCI & busmastering */ |
2614 | pci_restore_state(rdev->pdev); |
2615 | r100_enable_bm(rdev); |
2616 | /* Check if GPU is idle */ |
2617 | if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || |
2618 | G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { |
2619 | dev_err(rdev->dev, "failed to reset GPU\n" ); |
2620 | ret = -1; |
2621 | } else |
2622 | dev_info(rdev->dev, "GPU reset succeed\n" ); |
2623 | r100_mc_resume(rdev, &save); |
2624 | return ret; |
2625 | } |
2626 | |
2627 | void r100_set_common_regs(struct radeon_device *rdev) |
2628 | { |
2629 | struct drm_device *dev = rdev->ddev; |
2630 | bool force_dac2 = false; |
2631 | u32 tmp; |
2632 | |
2633 | /* set these so they don't interfere with anything */ |
2634 | WREG32(RADEON_OV0_SCALE_CNTL, 0); |
2635 | WREG32(RADEON_SUBPIC_CNTL, 0); |
2636 | WREG32(RADEON_VIPH_CONTROL, 0); |
2637 | WREG32(RADEON_I2C_CNTL_1, 0); |
2638 | WREG32(RADEON_DVI_I2C_CNTL_1, 0); |
2639 | WREG32(RADEON_CAP0_TRIG_CNTL, 0); |
2640 | WREG32(RADEON_CAP1_TRIG_CNTL, 0); |
2641 | |
2642 | /* always set up dac2 on rn50 and some rv100 as lots |
2643 | * of servers seem to wire it up to a VGA port but |
2644 | * don't report it in the bios connector |
2645 | * table. |
2646 | */ |
2647 | switch (dev->pdev->device) { |
2648 | /* RN50 */ |
2649 | case 0x515e: |
2650 | case 0x5969: |
2651 | force_dac2 = true; |
2652 | break; |
2653 | /* RV100*/ |
2654 | case 0x5159: |
2655 | case 0x515a: |
2656 | /* DELL triple head servers */ |
2657 | if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) && |
2658 | ((dev->pdev->subsystem_device == 0x016c) || |
2659 | (dev->pdev->subsystem_device == 0x016d) || |
2660 | (dev->pdev->subsystem_device == 0x016e) || |
2661 | (dev->pdev->subsystem_device == 0x016f) || |
2662 | (dev->pdev->subsystem_device == 0x0170) || |
2663 | (dev->pdev->subsystem_device == 0x017d) || |
2664 | (dev->pdev->subsystem_device == 0x017e) || |
2665 | (dev->pdev->subsystem_device == 0x0183) || |
2666 | (dev->pdev->subsystem_device == 0x018a) || |
2667 | (dev->pdev->subsystem_device == 0x019a))) |
2668 | force_dac2 = true; |
2669 | break; |
2670 | } |
2671 | |
2672 | if (force_dac2) { |
2673 | u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); |
2674 | u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); |
2675 | u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2); |
2676 | |
2677 | /* For CRT on DAC2, don't turn it on if BIOS didn't |
2678 | enable it, even it's detected. |
2679 | */ |
2680 | |
2681 | /* force it to crtc0 */ |
2682 | dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL; |
2683 | dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL; |
2684 | disp_hw_debug |= RADEON_CRT2_DISP1_SEL; |
2685 | |
2686 | /* set up the TV DAC */ |
2687 | tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL | |
2688 | RADEON_TV_DAC_STD_MASK | |
2689 | RADEON_TV_DAC_RDACPD | |
2690 | RADEON_TV_DAC_GDACPD | |
2691 | RADEON_TV_DAC_BDACPD | |
2692 | RADEON_TV_DAC_BGADJ_MASK | |
2693 | RADEON_TV_DAC_DACADJ_MASK); |
2694 | tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | |
2695 | RADEON_TV_DAC_NHOLD | |
2696 | RADEON_TV_DAC_STD_PS2 | |
2697 | (0x58 << 16)); |
2698 | |
2699 | WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); |
2700 | WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); |
2701 | WREG32(RADEON_DAC_CNTL2, dac2_cntl); |
2702 | } |
2703 | |
2704 | /* switch PM block to ACPI mode */ |
2705 | tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL); |
2706 | tmp &= ~RADEON_PM_MODE_SEL; |
2707 | WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp); |
2708 | |
2709 | } |
2710 | |
2711 | /* |
2712 | * VRAM info |
2713 | */ |
2714 | static void r100_vram_get_type(struct radeon_device *rdev) |
2715 | { |
2716 | uint32_t tmp; |
2717 | |
2718 | rdev->mc.vram_is_ddr = false; |
2719 | if (rdev->flags & RADEON_IS_IGP) |
2720 | rdev->mc.vram_is_ddr = true; |
2721 | else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR) |
2722 | rdev->mc.vram_is_ddr = true; |
2723 | if ((rdev->family == CHIP_RV100) || |
2724 | (rdev->family == CHIP_RS100) || |
2725 | (rdev->family == CHIP_RS200)) { |
2726 | tmp = RREG32(RADEON_MEM_CNTL); |
2727 | if (tmp & RV100_HALF_MODE) { |
2728 | rdev->mc.vram_width = 32; |
2729 | } else { |
2730 | rdev->mc.vram_width = 64; |
2731 | } |
2732 | if (rdev->flags & RADEON_SINGLE_CRTC) { |
2733 | rdev->mc.vram_width /= 4; |
2734 | rdev->mc.vram_is_ddr = true; |
2735 | } |
2736 | } else if (rdev->family <= CHIP_RV280) { |
2737 | tmp = RREG32(RADEON_MEM_CNTL); |
2738 | if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) { |
2739 | rdev->mc.vram_width = 128; |
2740 | } else { |
2741 | rdev->mc.vram_width = 64; |
2742 | } |
2743 | } else { |
2744 | /* newer IGPs */ |
2745 | rdev->mc.vram_width = 128; |
2746 | } |
2747 | } |
2748 | |
2749 | static u32 r100_get_accessible_vram(struct radeon_device *rdev) |
2750 | { |
2751 | u32 aper_size; |
2752 | u8 byte; |
2753 | |
2754 | aper_size = RREG32(RADEON_CONFIG_APER_SIZE); |
2755 | |
2756 | /* Set HDP_APER_CNTL only on cards that are known not to be broken, |
2757 | * that is has the 2nd generation multifunction PCI interface |
2758 | */ |
2759 | if (rdev->family == CHIP_RV280 || |
2760 | rdev->family >= CHIP_RV350) { |
2761 | WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, |
2762 | ~RADEON_HDP_APER_CNTL); |
2763 | DRM_INFO("Generation 2 PCI interface, using max accessible memory\n" ); |
2764 | return aper_size * 2; |
2765 | } |
2766 | |
2767 | /* Older cards have all sorts of funny issues to deal with. First |
2768 | * check if it's a multifunction card by reading the PCI config |
2769 | * header type... Limit those to one aperture size |
2770 | */ |
2771 | pci_read_config_byte(rdev->pdev, 0xe, &byte); |
2772 | if (byte & 0x80) { |
2773 | DRM_INFO("Generation 1 PCI interface in multifunction mode\n" ); |
2774 | DRM_INFO("Limiting VRAM to one aperture\n" ); |
2775 | return aper_size; |
2776 | } |
2777 | |
2778 | /* Single function older card. We read HDP_APER_CNTL to see how the BIOS |
2779 | * have set it up. We don't write this as it's broken on some ASICs but |
2780 | * we expect the BIOS to have done the right thing (might be too optimistic...) |
2781 | */ |
2782 | if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) |
2783 | return aper_size * 2; |
2784 | return aper_size; |
2785 | } |
2786 | |
2787 | void r100_vram_init_sizes(struct radeon_device *rdev) |
2788 | { |
2789 | u64 config_aper_size; |
2790 | |
2791 | /* work out accessible VRAM */ |
2792 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); |
2793 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); |
2794 | rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev); |
2795 | /* FIXME we don't use the second aperture yet when we could use it */ |
2796 | if (rdev->mc.visible_vram_size > rdev->mc.aper_size) |
2797 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
2798 | config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); |
2799 | if (rdev->flags & RADEON_IS_IGP) { |
2800 | uint32_t tom; |
2801 | /* read NB_TOM to get the amount of ram stolen for the GPU */ |
2802 | tom = RREG32(RADEON_NB_TOM); |
2803 | rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); |
2804 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
2805 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
2806 | } else { |
2807 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
2808 | /* Some production boards of m6 will report 0 |
2809 | * if it's 8 MB |
2810 | */ |
2811 | if (rdev->mc.real_vram_size == 0) { |
2812 | rdev->mc.real_vram_size = 8192 * 1024; |
2813 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
2814 | } |
2815 | /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - |
2816 | * Novell bug 204882 + along with lots of ubuntu ones |
2817 | */ |
2818 | if (rdev->mc.aper_size > config_aper_size) |
2819 | config_aper_size = rdev->mc.aper_size; |
2820 | |
2821 | if (config_aper_size > rdev->mc.real_vram_size) |
2822 | rdev->mc.mc_vram_size = config_aper_size; |
2823 | else |
2824 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
2825 | } |
2826 | } |
2827 | |
2828 | void r100_vga_set_state(struct radeon_device *rdev, bool state) |
2829 | { |
2830 | uint32_t temp; |
2831 | |
2832 | temp = RREG32(RADEON_CONFIG_CNTL); |
2833 | if (state == false) { |
2834 | temp &= ~RADEON_CFG_VGA_RAM_EN; |
2835 | temp |= RADEON_CFG_VGA_IO_DIS; |
2836 | } else { |
2837 | temp &= ~RADEON_CFG_VGA_IO_DIS; |
2838 | } |
2839 | WREG32(RADEON_CONFIG_CNTL, temp); |
2840 | } |
2841 | |
2842 | static void r100_mc_init(struct radeon_device *rdev) |
2843 | { |
2844 | u64 base; |
2845 | |
2846 | r100_vram_get_type(rdev); |
2847 | r100_vram_init_sizes(rdev); |
2848 | base = rdev->mc.aper_base; |
2849 | if (rdev->flags & RADEON_IS_IGP) |
2850 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; |
2851 | radeon_vram_location(rdev, &rdev->mc, base); |
2852 | rdev->mc.gtt_base_align = 0; |
2853 | if (!(rdev->flags & RADEON_IS_AGP)) |
2854 | radeon_gtt_location(rdev, &rdev->mc); |
2855 | radeon_update_bandwidth_info(rdev); |
2856 | } |
2857 | |
2858 | |
2859 | /* |
2860 | * Indirect registers accessor |
2861 | */ |
2862 | void r100_pll_errata_after_index(struct radeon_device *rdev) |
2863 | { |
2864 | if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) { |
2865 | (void)RREG32(RADEON_CLOCK_CNTL_DATA); |
2866 | (void)RREG32(RADEON_CRTC_GEN_CNTL); |
2867 | } |
2868 | } |
2869 | |
2870 | static void r100_pll_errata_after_data(struct radeon_device *rdev) |
2871 | { |
2872 | /* This workarounds is necessary on RV100, RS100 and RS200 chips |
2873 | * or the chip could hang on a subsequent access |
2874 | */ |
2875 | if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) { |
2876 | mdelay(5); |
2877 | } |
2878 | |
2879 | /* This function is required to workaround a hardware bug in some (all?) |
2880 | * revisions of the R300. This workaround should be called after every |
2881 | * CLOCK_CNTL_INDEX register access. If not, register reads afterward |
2882 | * may not be correct. |
2883 | */ |
2884 | if (rdev->pll_errata & CHIP_ERRATA_R300_CG) { |
2885 | uint32_t save, tmp; |
2886 | |
2887 | save = RREG32(RADEON_CLOCK_CNTL_INDEX); |
2888 | tmp = save & ~(0x3f | RADEON_PLL_WR_EN); |
2889 | WREG32(RADEON_CLOCK_CNTL_INDEX, tmp); |
2890 | tmp = RREG32(RADEON_CLOCK_CNTL_DATA); |
2891 | WREG32(RADEON_CLOCK_CNTL_INDEX, save); |
2892 | } |
2893 | } |
2894 | |
2895 | uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) |
2896 | { |
2897 | unsigned long flags; |
2898 | uint32_t data; |
2899 | |
2900 | spin_lock_irqsave(&rdev->pll_idx_lock, flags); |
2901 | WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); |
2902 | r100_pll_errata_after_index(rdev); |
2903 | data = RREG32(RADEON_CLOCK_CNTL_DATA); |
2904 | r100_pll_errata_after_data(rdev); |
2905 | spin_unlock_irqrestore(&rdev->pll_idx_lock, flags); |
2906 | return data; |
2907 | } |
2908 | |
2909 | void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
2910 | { |
2911 | unsigned long flags; |
2912 | |
2913 | spin_lock_irqsave(&rdev->pll_idx_lock, flags); |
2914 | WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); |
2915 | r100_pll_errata_after_index(rdev); |
2916 | WREG32(RADEON_CLOCK_CNTL_DATA, v); |
2917 | r100_pll_errata_after_data(rdev); |
2918 | spin_unlock_irqrestore(&rdev->pll_idx_lock, flags); |
2919 | } |
2920 | |
2921 | static void r100_set_safe_registers(struct radeon_device *rdev) |
2922 | { |
2923 | if (ASIC_IS_RN50(rdev)) { |
2924 | rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; |
2925 | rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm); |
2926 | } else if (rdev->family < CHIP_R200) { |
2927 | rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; |
2928 | rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); |
2929 | } else { |
2930 | r200_set_safe_registers(rdev); |
2931 | } |
2932 | } |
2933 | |
2934 | /* |
2935 | * Debugfs info |
2936 | */ |
2937 | #if defined(CONFIG_DEBUG_FS) |
2938 | static int r100_debugfs_rbbm_info(struct seq_file *m, void *data) |
2939 | { |
2940 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
2941 | struct drm_device *dev = node->minor->dev; |
2942 | struct radeon_device *rdev = dev->dev_private; |
2943 | uint32_t reg, value; |
2944 | unsigned i; |
2945 | |
2946 | seq_printf(m, "RBBM_STATUS 0x%08x\n" , RREG32(RADEON_RBBM_STATUS)); |
2947 | seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n" , RREG32(0xE7C)); |
2948 | seq_printf(m, "CP_STAT 0x%08x\n" , RREG32(RADEON_CP_STAT)); |
2949 | for (i = 0; i < 64; i++) { |
2950 | WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100); |
2951 | reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2; |
2952 | WREG32(RADEON_RBBM_CMDFIFO_ADDR, i); |
2953 | value = RREG32(RADEON_RBBM_CMDFIFO_DATA); |
2954 | seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n" , i, reg, value); |
2955 | } |
2956 | return 0; |
2957 | } |
2958 | |
2959 | static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) |
2960 | { |
2961 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
2962 | struct drm_device *dev = node->minor->dev; |
2963 | struct radeon_device *rdev = dev->dev_private; |
2964 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
2965 | uint32_t rdp, wdp; |
2966 | unsigned count, i, j; |
2967 | |
2968 | radeon_ring_free_size(rdev, ring); |
2969 | rdp = RREG32(RADEON_CP_RB_RPTR); |
2970 | wdp = RREG32(RADEON_CP_RB_WPTR); |
2971 | count = (rdp + ring->ring_size - wdp) & ring->ptr_mask; |
2972 | seq_printf(m, "CP_STAT 0x%08x\n" , RREG32(RADEON_CP_STAT)); |
2973 | seq_printf(m, "CP_RB_WPTR 0x%08x\n" , wdp); |
2974 | seq_printf(m, "CP_RB_RPTR 0x%08x\n" , rdp); |
2975 | seq_printf(m, "%u free dwords in ring\n" , ring->ring_free_dw); |
2976 | seq_printf(m, "%u dwords in ring\n" , count); |
2977 | if (ring->ready) { |
2978 | for (j = 0; j <= count; j++) { |
2979 | i = (rdp + j) & ring->ptr_mask; |
2980 | seq_printf(m, "r[%04d]=0x%08x\n" , i, ring->ring[i]); |
2981 | } |
2982 | } |
2983 | return 0; |
2984 | } |
2985 | |
2986 | |
2987 | static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data) |
2988 | { |
2989 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
2990 | struct drm_device *dev = node->minor->dev; |
2991 | struct radeon_device *rdev = dev->dev_private; |
2992 | uint32_t csq_stat, csq2_stat, tmp; |
2993 | unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr; |
2994 | unsigned i; |
2995 | |
2996 | seq_printf(m, "CP_STAT 0x%08x\n" , RREG32(RADEON_CP_STAT)); |
2997 | seq_printf(m, "CP_CSQ_MODE 0x%08x\n" , RREG32(RADEON_CP_CSQ_MODE)); |
2998 | csq_stat = RREG32(RADEON_CP_CSQ_STAT); |
2999 | csq2_stat = RREG32(RADEON_CP_CSQ2_STAT); |
3000 | r_rptr = (csq_stat >> 0) & 0x3ff; |
3001 | r_wptr = (csq_stat >> 10) & 0x3ff; |
3002 | ib1_rptr = (csq_stat >> 20) & 0x3ff; |
3003 | ib1_wptr = (csq2_stat >> 0) & 0x3ff; |
3004 | ib2_rptr = (csq2_stat >> 10) & 0x3ff; |
3005 | ib2_wptr = (csq2_stat >> 20) & 0x3ff; |
3006 | seq_printf(m, "CP_CSQ_STAT 0x%08x\n" , csq_stat); |
3007 | seq_printf(m, "CP_CSQ2_STAT 0x%08x\n" , csq2_stat); |
3008 | seq_printf(m, "Ring rptr %u\n" , r_rptr); |
3009 | seq_printf(m, "Ring wptr %u\n" , r_wptr); |
3010 | seq_printf(m, "Indirect1 rptr %u\n" , ib1_rptr); |
3011 | seq_printf(m, "Indirect1 wptr %u\n" , ib1_wptr); |
3012 | seq_printf(m, "Indirect2 rptr %u\n" , ib2_rptr); |
3013 | seq_printf(m, "Indirect2 wptr %u\n" , ib2_wptr); |
3014 | /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms |
3015 | * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */ |
3016 | seq_printf(m, "Ring fifo:\n" ); |
3017 | for (i = 0; i < 256; i++) { |
3018 | WREG32(RADEON_CP_CSQ_ADDR, i << 2); |
3019 | tmp = RREG32(RADEON_CP_CSQ_DATA); |
3020 | seq_printf(m, "rfifo[%04d]=0x%08X\n" , i, tmp); |
3021 | } |
3022 | seq_printf(m, "Indirect1 fifo:\n" ); |
3023 | for (i = 256; i <= 512; i++) { |
3024 | WREG32(RADEON_CP_CSQ_ADDR, i << 2); |
3025 | tmp = RREG32(RADEON_CP_CSQ_DATA); |
3026 | seq_printf(m, "ib1fifo[%04d]=0x%08X\n" , i, tmp); |
3027 | } |
3028 | seq_printf(m, "Indirect2 fifo:\n" ); |
3029 | for (i = 640; i < ib1_wptr; i++) { |
3030 | WREG32(RADEON_CP_CSQ_ADDR, i << 2); |
3031 | tmp = RREG32(RADEON_CP_CSQ_DATA); |
3032 | seq_printf(m, "ib2fifo[%04d]=0x%08X\n" , i, tmp); |
3033 | } |
3034 | return 0; |
3035 | } |
3036 | |
3037 | static int r100_debugfs_mc_info(struct seq_file *m, void *data) |
3038 | { |
3039 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
3040 | struct drm_device *dev = node->minor->dev; |
3041 | struct radeon_device *rdev = dev->dev_private; |
3042 | uint32_t tmp; |
3043 | |
3044 | tmp = RREG32(RADEON_CONFIG_MEMSIZE); |
3045 | seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n" , tmp); |
3046 | tmp = RREG32(RADEON_MC_FB_LOCATION); |
3047 | seq_printf(m, "MC_FB_LOCATION 0x%08x\n" , tmp); |
3048 | tmp = RREG32(RADEON_BUS_CNTL); |
3049 | seq_printf(m, "BUS_CNTL 0x%08x\n" , tmp); |
3050 | tmp = RREG32(RADEON_MC_AGP_LOCATION); |
3051 | seq_printf(m, "MC_AGP_LOCATION 0x%08x\n" , tmp); |
3052 | tmp = RREG32(RADEON_AGP_BASE); |
3053 | seq_printf(m, "AGP_BASE 0x%08x\n" , tmp); |
3054 | tmp = RREG32(RADEON_HOST_PATH_CNTL); |
3055 | seq_printf(m, "HOST_PATH_CNTL 0x%08x\n" , tmp); |
3056 | tmp = RREG32(0x01D0); |
3057 | seq_printf(m, "AIC_CTRL 0x%08x\n" , tmp); |
3058 | tmp = RREG32(RADEON_AIC_LO_ADDR); |
3059 | seq_printf(m, "AIC_LO_ADDR 0x%08x\n" , tmp); |
3060 | tmp = RREG32(RADEON_AIC_HI_ADDR); |
3061 | seq_printf(m, "AIC_HI_ADDR 0x%08x\n" , tmp); |
3062 | tmp = RREG32(0x01E4); |
3063 | seq_printf(m, "AIC_TLB_ADDR 0x%08x\n" , tmp); |
3064 | return 0; |
3065 | } |
3066 | |
3067 | static struct drm_info_list r100_debugfs_rbbm_list[] = { |
3068 | {"r100_rbbm_info" , r100_debugfs_rbbm_info, 0, NULL}, |
3069 | }; |
3070 | |
3071 | static struct drm_info_list r100_debugfs_cp_list[] = { |
3072 | {"r100_cp_ring_info" , r100_debugfs_cp_ring_info, 0, NULL}, |
3073 | {"r100_cp_csq_fifo" , r100_debugfs_cp_csq_fifo, 0, NULL}, |
3074 | }; |
3075 | |
3076 | static struct drm_info_list r100_debugfs_mc_info_list[] = { |
3077 | {"r100_mc_info" , r100_debugfs_mc_info, 0, NULL}, |
3078 | }; |
3079 | #endif |
3080 | |
3081 | int r100_debugfs_rbbm_init(struct radeon_device *rdev) |
3082 | { |
3083 | #if defined(CONFIG_DEBUG_FS) |
3084 | return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1); |
3085 | #else |
3086 | return 0; |
3087 | #endif |
3088 | } |
3089 | |
3090 | int r100_debugfs_cp_init(struct radeon_device *rdev) |
3091 | { |
3092 | #if defined(CONFIG_DEBUG_FS) |
3093 | return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2); |
3094 | #else |
3095 | return 0; |
3096 | #endif |
3097 | } |
3098 | |
3099 | int r100_debugfs_mc_info_init(struct radeon_device *rdev) |
3100 | { |
3101 | #if defined(CONFIG_DEBUG_FS) |
3102 | return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1); |
3103 | #else |
3104 | return 0; |
3105 | #endif |
3106 | } |
3107 | |
3108 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, |
3109 | uint32_t tiling_flags, uint32_t pitch, |
3110 | uint32_t offset, uint32_t obj_size) |
3111 | { |
3112 | int surf_index = reg * 16; |
3113 | int flags = 0; |
3114 | |
3115 | if (rdev->family <= CHIP_RS200) { |
3116 | if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) |
3117 | == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) |
3118 | flags |= RADEON_SURF_TILE_COLOR_BOTH; |
3119 | if (tiling_flags & RADEON_TILING_MACRO) |
3120 | flags |= RADEON_SURF_TILE_COLOR_MACRO; |
3121 | /* setting pitch to 0 disables tiling */ |
3122 | if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) |
3123 | == 0) |
3124 | pitch = 0; |
3125 | } else if (rdev->family <= CHIP_RV280) { |
3126 | if (tiling_flags & (RADEON_TILING_MACRO)) |
3127 | flags |= R200_SURF_TILE_COLOR_MACRO; |
3128 | if (tiling_flags & RADEON_TILING_MICRO) |
3129 | flags |= R200_SURF_TILE_COLOR_MICRO; |
3130 | } else { |
3131 | if (tiling_flags & RADEON_TILING_MACRO) |
3132 | flags |= R300_SURF_TILE_MACRO; |
3133 | if (tiling_flags & RADEON_TILING_MICRO) |
3134 | flags |= R300_SURF_TILE_MICRO; |
3135 | } |
3136 | |
3137 | if (tiling_flags & RADEON_TILING_SWAP_16BIT) |
3138 | flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; |
3139 | if (tiling_flags & RADEON_TILING_SWAP_32BIT) |
3140 | flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; |
3141 | |
3142 | /* r100/r200 divide by 16 */ |
3143 | if (rdev->family < CHIP_R300) |
3144 | flags |= pitch / 16; |
3145 | else |
3146 | flags |= pitch / 8; |
3147 | |
3148 | |
3149 | DRM_DEBUG_KMS("writing surface %d %d %x %x\n" , reg, flags, offset, offset+obj_size-1); |
3150 | WREG32(RADEON_SURFACE0_INFO + surf_index, flags); |
3151 | WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); |
3152 | WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); |
3153 | return 0; |
3154 | } |
3155 | |
3156 | void r100_clear_surface_reg(struct radeon_device *rdev, int reg) |
3157 | { |
3158 | int surf_index = reg * 16; |
3159 | WREG32(RADEON_SURFACE0_INFO + surf_index, 0); |
3160 | } |
3161 | |
3162 | void r100_bandwidth_update(struct radeon_device *rdev) |
3163 | { |
3164 | fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; |
3165 | fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; |
3166 | fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; |
3167 | uint32_t temp, data, mem_trcd, mem_trp, mem_tras; |
3168 | fixed20_12 memtcas_ff[8] = { |
3169 | dfixed_init(1), |
3170 | dfixed_init(2), |
3171 | dfixed_init(3), |
3172 | dfixed_init(0), |
3173 | dfixed_init_half(1), |
3174 | dfixed_init_half(2), |
3175 | dfixed_init(0), |
3176 | }; |
3177 | fixed20_12 memtcas_rs480_ff[8] = { |
3178 | dfixed_init(0), |
3179 | dfixed_init(1), |
3180 | dfixed_init(2), |
3181 | dfixed_init(3), |
3182 | dfixed_init(0), |
3183 | dfixed_init_half(1), |
3184 | dfixed_init_half(2), |
3185 | dfixed_init_half(3), |
3186 | }; |
3187 | fixed20_12 memtcas2_ff[8] = { |
3188 | dfixed_init(0), |
3189 | dfixed_init(1), |
3190 | dfixed_init(2), |
3191 | dfixed_init(3), |
3192 | dfixed_init(4), |
3193 | dfixed_init(5), |
3194 | dfixed_init(6), |
3195 | dfixed_init(7), |
3196 | }; |
3197 | fixed20_12 memtrbs[8] = { |
3198 | dfixed_init(1), |
3199 | dfixed_init_half(1), |
3200 | dfixed_init(2), |
3201 | dfixed_init_half(2), |
3202 | dfixed_init(3), |
3203 | dfixed_init_half(3), |
3204 | dfixed_init(4), |
3205 | dfixed_init_half(4) |
3206 | }; |
3207 | fixed20_12 memtrbs_r4xx[8] = { |
3208 | dfixed_init(4), |
3209 | dfixed_init(5), |
3210 | dfixed_init(6), |
3211 | dfixed_init(7), |
3212 | dfixed_init(8), |
3213 | dfixed_init(9), |
3214 | dfixed_init(10), |
3215 | dfixed_init(11) |
3216 | }; |
3217 | fixed20_12 min_mem_eff; |
3218 | fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; |
3219 | fixed20_12 cur_latency_mclk, cur_latency_sclk; |
3220 | fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, |
3221 | disp_drain_rate2, read_return_rate; |
3222 | fixed20_12 time_disp1_drop_priority; |
3223 | int c; |
3224 | int cur_size = 16; /* in octawords */ |
3225 | int critical_point = 0, critical_point2; |
3226 | /* uint32_t read_return_rate, time_disp1_drop_priority; */ |
3227 | int stop_req, max_stop_req; |
3228 | struct drm_display_mode *mode1 = NULL; |
3229 | struct drm_display_mode *mode2 = NULL; |
3230 | uint32_t pixel_bytes1 = 0; |
3231 | uint32_t pixel_bytes2 = 0; |
3232 | |
3233 | crit_point_ff.full = 0; |
3234 | disp_drain_rate.full = 0; |
3235 | radeon_update_display_priority(rdev); |
3236 | |
3237 | if (rdev->mode_info.crtcs[0]->base.enabled) { |
3238 | mode1 = &rdev->mode_info.crtcs[0]->base.mode; |
3239 | pixel_bytes1 = rdev->mode_info.crtcs[0]->base.primary->fb->bits_per_pixel / 8; |
3240 | } |
3241 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
3242 | if (rdev->mode_info.crtcs[1]->base.enabled) { |
3243 | mode2 = &rdev->mode_info.crtcs[1]->base.mode; |
3244 | pixel_bytes2 = rdev->mode_info.crtcs[1]->base.primary->fb->bits_per_pixel / 8; |
3245 | } |
3246 | } |
3247 | |
3248 | min_mem_eff.full = dfixed_const_8(0); |
3249 | /* get modes */ |
3250 | if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { |
3251 | uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); |
3252 | mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); |
3253 | mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); |
3254 | /* check crtc enables */ |
3255 | if (mode2) |
3256 | mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); |
3257 | if (mode1) |
3258 | mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); |
3259 | WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); |
3260 | } |
3261 | |
3262 | /* |
3263 | * determine is there is enough bw for current mode |
3264 | */ |
3265 | sclk_ff = rdev->pm.sclk; |
3266 | mclk_ff = rdev->pm.mclk; |
3267 | |
3268 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); |
3269 | temp_ff.full = dfixed_const(temp); |
3270 | mem_bw.full = dfixed_mul(mclk_ff, temp_ff); |
3271 | |
3272 | pix_clk.full = 0; |
3273 | pix_clk2.full = 0; |
3274 | peak_disp_bw.full = 0; |
3275 | if (mode1) { |
3276 | temp_ff.full = dfixed_const(1000); |
3277 | pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ |
3278 | pix_clk.full = dfixed_div(pix_clk, temp_ff); |
3279 | temp_ff.full = dfixed_const(pixel_bytes1); |
3280 | peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); |
3281 | } |
3282 | if (mode2) { |
3283 | temp_ff.full = dfixed_const(1000); |
3284 | pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ |
3285 | pix_clk2.full = dfixed_div(pix_clk2, temp_ff); |
3286 | temp_ff.full = dfixed_const(pixel_bytes2); |
3287 | peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); |
3288 | } |
3289 | |
3290 | mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); |
3291 | if (peak_disp_bw.full >= mem_bw.full) { |
3292 | DRM_ERROR("You may not have enough display bandwidth for current mode\n" |
3293 | "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n" ); |
3294 | } |
3295 | |
3296 | /* Get values from the EXT_MEM_CNTL register...converting its contents. */ |
3297 | temp = RREG32(RADEON_MEM_TIMING_CNTL); |
3298 | if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ |
3299 | mem_trcd = ((temp >> 2) & 0x3) + 1; |
3300 | mem_trp = ((temp & 0x3)) + 1; |
3301 | mem_tras = ((temp & 0x70) >> 4) + 1; |
3302 | } else if (rdev->family == CHIP_R300 || |
3303 | rdev->family == CHIP_R350) { /* r300, r350 */ |
3304 | mem_trcd = (temp & 0x7) + 1; |
3305 | mem_trp = ((temp >> 8) & 0x7) + 1; |
3306 | mem_tras = ((temp >> 11) & 0xf) + 4; |
3307 | } else if (rdev->family == CHIP_RV350 || |
3308 | rdev->family <= CHIP_RV380) { |
3309 | /* rv3x0 */ |
3310 | mem_trcd = (temp & 0x7) + 3; |
3311 | mem_trp = ((temp >> 8) & 0x7) + 3; |
3312 | mem_tras = ((temp >> 11) & 0xf) + 6; |
3313 | } else if (rdev->family == CHIP_R420 || |
3314 | rdev->family == CHIP_R423 || |
3315 | rdev->family == CHIP_RV410) { |
3316 | /* r4xx */ |
3317 | mem_trcd = (temp & 0xf) + 3; |
3318 | if (mem_trcd > 15) |
3319 | mem_trcd = 15; |
3320 | mem_trp = ((temp >> 8) & 0xf) + 3; |
3321 | if (mem_trp > 15) |
3322 | mem_trp = 15; |
3323 | mem_tras = ((temp >> 12) & 0x1f) + 6; |
3324 | if (mem_tras > 31) |
3325 | mem_tras = 31; |
3326 | } else { /* RV200, R200 */ |
3327 | mem_trcd = (temp & 0x7) + 1; |
3328 | mem_trp = ((temp >> 8) & 0x7) + 1; |
3329 | mem_tras = ((temp >> 12) & 0xf) + 4; |
3330 | } |
3331 | /* convert to FF */ |
3332 | trcd_ff.full = dfixed_const(mem_trcd); |
3333 | trp_ff.full = dfixed_const(mem_trp); |
3334 | tras_ff.full = dfixed_const(mem_tras); |
3335 | |
3336 | /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ |
3337 | temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); |
3338 | data = (temp & (7 << 20)) >> 20; |
3339 | if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { |
3340 | if (rdev->family == CHIP_RS480) /* don't think rs400 */ |
3341 | tcas_ff = memtcas_rs480_ff[data]; |
3342 | else |
3343 | tcas_ff = memtcas_ff[data]; |
3344 | } else |
3345 | tcas_ff = memtcas2_ff[data]; |
3346 | |
3347 | if (rdev->family == CHIP_RS400 || |
3348 | rdev->family == CHIP_RS480) { |
3349 | /* extra cas latency stored in bits 23-25 0-4 clocks */ |
3350 | data = (temp >> 23) & 0x7; |
3351 | if (data < 5) |
3352 | tcas_ff.full += dfixed_const(data); |
3353 | } |
3354 | |
3355 | if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { |
3356 | /* on the R300, Tcas is included in Trbs. |
3357 | */ |
3358 | temp = RREG32(RADEON_MEM_CNTL); |
3359 | data = (R300_MEM_NUM_CHANNELS_MASK & temp); |
3360 | if (data == 1) { |
3361 | if (R300_MEM_USE_CD_CH_ONLY & temp) { |
3362 | temp = RREG32(R300_MC_IND_INDEX); |
3363 | temp &= ~R300_MC_IND_ADDR_MASK; |
3364 | temp |= R300_MC_READ_CNTL_CD_mcind; |
3365 | WREG32(R300_MC_IND_INDEX, temp); |
3366 | temp = RREG32(R300_MC_IND_DATA); |
3367 | data = (R300_MEM_RBS_POSITION_C_MASK & temp); |
3368 | } else { |
3369 | temp = RREG32(R300_MC_READ_CNTL_AB); |
3370 | data = (R300_MEM_RBS_POSITION_A_MASK & temp); |
3371 | } |
3372 | } else { |
3373 | temp = RREG32(R300_MC_READ_CNTL_AB); |
3374 | data = (R300_MEM_RBS_POSITION_A_MASK & temp); |
3375 | } |
3376 | if (rdev->family == CHIP_RV410 || |
3377 | rdev->family == CHIP_R420 || |
3378 | rdev->family == CHIP_R423) |
3379 | trbs_ff = memtrbs_r4xx[data]; |
3380 | else |
3381 | trbs_ff = memtrbs[data]; |
3382 | tcas_ff.full += trbs_ff.full; |
3383 | } |
3384 | |
3385 | sclk_eff_ff.full = sclk_ff.full; |
3386 | |
3387 | if (rdev->flags & RADEON_IS_AGP) { |
3388 | fixed20_12 agpmode_ff; |
3389 | agpmode_ff.full = dfixed_const(radeon_agpmode); |
3390 | temp_ff.full = dfixed_const_666(16); |
3391 | sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); |
3392 | } |
3393 | /* TODO PCIE lanes may affect this - agpmode == 16?? */ |
3394 | |
3395 | if (ASIC_IS_R300(rdev)) { |
3396 | sclk_delay_ff.full = dfixed_const(250); |
3397 | } else { |
3398 | if ((rdev->family == CHIP_RV100) || |
3399 | rdev->flags & RADEON_IS_IGP) { |
3400 | if (rdev->mc.vram_is_ddr) |
3401 | sclk_delay_ff.full = dfixed_const(41); |
3402 | else |
3403 | sclk_delay_ff.full = dfixed_const(33); |
3404 | } else { |
3405 | if (rdev->mc.vram_width == 128) |
3406 | sclk_delay_ff.full = dfixed_const(57); |
3407 | else |
3408 | sclk_delay_ff.full = dfixed_const(41); |
3409 | } |
3410 | } |
3411 | |
3412 | mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); |
3413 | |
3414 | if (rdev->mc.vram_is_ddr) { |
3415 | if (rdev->mc.vram_width == 32) { |
3416 | k1.full = dfixed_const(40); |
3417 | c = 3; |
3418 | } else { |
3419 | k1.full = dfixed_const(20); |
3420 | c = 1; |
3421 | } |
3422 | } else { |
3423 | k1.full = dfixed_const(40); |
3424 | c = 3; |
3425 | } |
3426 | |
3427 | temp_ff.full = dfixed_const(2); |
3428 | mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); |
3429 | temp_ff.full = dfixed_const(c); |
3430 | mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); |
3431 | temp_ff.full = dfixed_const(4); |
3432 | mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); |
3433 | mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); |
3434 | mc_latency_mclk.full += k1.full; |
3435 | |
3436 | mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); |
3437 | mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); |
3438 | |
3439 | /* |
3440 | HW cursor time assuming worst case of full size colour cursor. |
3441 | */ |
3442 | temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); |
3443 | temp_ff.full += trcd_ff.full; |
3444 | if (temp_ff.full < tras_ff.full) |
3445 | temp_ff.full = tras_ff.full; |
3446 | cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); |
3447 | |
3448 | temp_ff.full = dfixed_const(cur_size); |
3449 | cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); |
3450 | /* |
3451 | Find the total latency for the display data. |
3452 | */ |
3453 | disp_latency_overhead.full = dfixed_const(8); |
3454 | disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); |
3455 | mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; |
3456 | mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; |
3457 | |
3458 | if (mc_latency_mclk.full > mc_latency_sclk.full) |
3459 | disp_latency.full = mc_latency_mclk.full; |
3460 | else |
3461 | disp_latency.full = mc_latency_sclk.full; |
3462 | |
3463 | /* setup Max GRPH_STOP_REQ default value */ |
3464 | if (ASIC_IS_RV100(rdev)) |
3465 | max_stop_req = 0x5c; |
3466 | else |
3467 | max_stop_req = 0x7c; |
3468 | |
3469 | if (mode1) { |
3470 | /* CRTC1 |
3471 | Set GRPH_BUFFER_CNTL register using h/w defined optimal values. |
3472 | GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] |
3473 | */ |
3474 | stop_req = mode1->hdisplay * pixel_bytes1 / 16; |
3475 | |
3476 | if (stop_req > max_stop_req) |
3477 | stop_req = max_stop_req; |
3478 | |
3479 | /* |
3480 | Find the drain rate of the display buffer. |
3481 | */ |
3482 | temp_ff.full = dfixed_const((16/pixel_bytes1)); |
3483 | disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); |
3484 | |
3485 | /* |
3486 | Find the critical point of the display buffer. |
3487 | */ |
3488 | crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); |
3489 | crit_point_ff.full += dfixed_const_half(0); |
3490 | |
3491 | critical_point = dfixed_trunc(crit_point_ff); |
3492 | |
3493 | if (rdev->disp_priority == 2) { |
3494 | critical_point = 0; |
3495 | } |
3496 | |
3497 | /* |
3498 | The critical point should never be above max_stop_req-4. Setting |
3499 | GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. |
3500 | */ |
3501 | if (max_stop_req - critical_point < 4) |
3502 | critical_point = 0; |
3503 | |
3504 | if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { |
3505 | /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ |
3506 | critical_point = 0x10; |
3507 | } |
3508 | |
3509 | temp = RREG32(RADEON_GRPH_BUFFER_CNTL); |
3510 | temp &= ~(RADEON_GRPH_STOP_REQ_MASK); |
3511 | temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); |
3512 | temp &= ~(RADEON_GRPH_START_REQ_MASK); |
3513 | if ((rdev->family == CHIP_R350) && |
3514 | (stop_req > 0x15)) { |
3515 | stop_req -= 0x10; |
3516 | } |
3517 | temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); |
3518 | temp |= RADEON_GRPH_BUFFER_SIZE; |
3519 | temp &= ~(RADEON_GRPH_CRITICAL_CNTL | |
3520 | RADEON_GRPH_CRITICAL_AT_SOF | |
3521 | RADEON_GRPH_STOP_CNTL); |
3522 | /* |
3523 | Write the result into the register. |
3524 | */ |
3525 | WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | |
3526 | (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); |
3527 | |
3528 | #if 0 |
3529 | if ((rdev->family == CHIP_RS400) || |
3530 | (rdev->family == CHIP_RS480)) { |
3531 | /* attempt to program RS400 disp regs correctly ??? */ |
3532 | temp = RREG32(RS400_DISP1_REG_CNTL); |
3533 | temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | |
3534 | RS400_DISP1_STOP_REQ_LEVEL_MASK); |
3535 | WREG32(RS400_DISP1_REQ_CNTL1, (temp | |
3536 | (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | |
3537 | (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); |
3538 | temp = RREG32(RS400_DMIF_MEM_CNTL1); |
3539 | temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | |
3540 | RS400_DISP1_CRITICAL_POINT_STOP_MASK); |
3541 | WREG32(RS400_DMIF_MEM_CNTL1, (temp | |
3542 | (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | |
3543 | (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); |
3544 | } |
3545 | #endif |
3546 | |
3547 | DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n" , |
3548 | /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ |
3549 | (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); |
3550 | } |
3551 | |
3552 | if (mode2) { |
3553 | u32 grph2_cntl; |
3554 | stop_req = mode2->hdisplay * pixel_bytes2 / 16; |
3555 | |
3556 | if (stop_req > max_stop_req) |
3557 | stop_req = max_stop_req; |
3558 | |
3559 | /* |
3560 | Find the drain rate of the display buffer. |
3561 | */ |
3562 | temp_ff.full = dfixed_const((16/pixel_bytes2)); |
3563 | disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); |
3564 | |
3565 | grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); |
3566 | grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); |
3567 | grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); |
3568 | grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); |
3569 | if ((rdev->family == CHIP_R350) && |
3570 | (stop_req > 0x15)) { |
3571 | stop_req -= 0x10; |
3572 | } |
3573 | grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); |
3574 | grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; |
3575 | grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | |
3576 | RADEON_GRPH_CRITICAL_AT_SOF | |
3577 | RADEON_GRPH_STOP_CNTL); |
3578 | |
3579 | if ((rdev->family == CHIP_RS100) || |
3580 | (rdev->family == CHIP_RS200)) |
3581 | critical_point2 = 0; |
3582 | else { |
3583 | temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; |
3584 | temp_ff.full = dfixed_const(temp); |
3585 | temp_ff.full = dfixed_mul(mclk_ff, temp_ff); |
3586 | if (sclk_ff.full < temp_ff.full) |
3587 | temp_ff.full = sclk_ff.full; |
3588 | |
3589 | read_return_rate.full = temp_ff.full; |
3590 | |
3591 | if (mode1) { |
3592 | temp_ff.full = read_return_rate.full - disp_drain_rate.full; |
3593 | time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); |
3594 | } else { |
3595 | time_disp1_drop_priority.full = 0; |
3596 | } |
3597 | crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; |
3598 | crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); |
3599 | crit_point_ff.full += dfixed_const_half(0); |
3600 | |
3601 | critical_point2 = dfixed_trunc(crit_point_ff); |
3602 | |
3603 | if (rdev->disp_priority == 2) { |
3604 | critical_point2 = 0; |
3605 | } |
3606 | |
3607 | if (max_stop_req - critical_point2 < 4) |
3608 | critical_point2 = 0; |
3609 | |
3610 | } |
3611 | |
3612 | if (critical_point2 == 0 && rdev->family == CHIP_R300) { |
3613 | /* some R300 cards have problem with this set to 0 */ |
3614 | critical_point2 = 0x10; |
3615 | } |
3616 | |
3617 | WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | |
3618 | (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); |
3619 | |
3620 | if ((rdev->family == CHIP_RS400) || |
3621 | (rdev->family == CHIP_RS480)) { |
3622 | #if 0 |
3623 | /* attempt to program RS400 disp2 regs correctly ??? */ |
3624 | temp = RREG32(RS400_DISP2_REQ_CNTL1); |
3625 | temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | |
3626 | RS400_DISP2_STOP_REQ_LEVEL_MASK); |
3627 | WREG32(RS400_DISP2_REQ_CNTL1, (temp | |
3628 | (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | |
3629 | (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); |
3630 | temp = RREG32(RS400_DISP2_REQ_CNTL2); |
3631 | temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | |
3632 | RS400_DISP2_CRITICAL_POINT_STOP_MASK); |
3633 | WREG32(RS400_DISP2_REQ_CNTL2, (temp | |
3634 | (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | |
3635 | (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); |
3636 | #endif |
3637 | WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); |
3638 | WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); |
3639 | WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); |
3640 | WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); |
3641 | } |
3642 | |
3643 | DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n" , |
3644 | (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); |
3645 | } |
3646 | } |
3647 | |
3648 | int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) |
3649 | { |
3650 | uint32_t scratch; |
3651 | uint32_t tmp = 0; |
3652 | unsigned i; |
3653 | int r; |
3654 | |
3655 | r = radeon_scratch_get(rdev, &scratch); |
3656 | if (r) { |
3657 | DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n" , r); |
3658 | return r; |
3659 | } |
3660 | WREG32(scratch, 0xCAFEDEAD); |
3661 | r = radeon_ring_lock(rdev, ring, 2); |
3662 | if (r) { |
3663 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n" , r); |
3664 | radeon_scratch_free(rdev, scratch); |
3665 | return r; |
3666 | } |
3667 | radeon_ring_write(ring, PACKET0(scratch, 0)); |
3668 | radeon_ring_write(ring, 0xDEADBEEF); |
3669 | radeon_ring_unlock_commit(rdev, ring); |
3670 | for (i = 0; i < rdev->usec_timeout; i++) { |
3671 | tmp = RREG32(scratch); |
3672 | if (tmp == 0xDEADBEEF) { |
3673 | break; |
3674 | } |
3675 | DRM_UDELAY(1); |
3676 | } |
3677 | if (i < rdev->usec_timeout) { |
3678 | DRM_INFO("ring test succeeded in %d usecs\n" , i); |
3679 | } else { |
3680 | DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n" , |
3681 | scratch, tmp); |
3682 | r = -EINVAL; |
3683 | } |
3684 | radeon_scratch_free(rdev, scratch); |
3685 | return r; |
3686 | } |
3687 | |
3688 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) |
3689 | { |
3690 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
3691 | |
3692 | if (ring->rptr_save_reg) { |
3693 | u32 next_rptr = ring->wptr + 2 + 3; |
3694 | radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0)); |
3695 | radeon_ring_write(ring, next_rptr); |
3696 | } |
3697 | |
3698 | radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1)); |
3699 | radeon_ring_write(ring, ib->gpu_addr); |
3700 | radeon_ring_write(ring, ib->length_dw); |
3701 | } |
3702 | |
3703 | int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) |
3704 | { |
3705 | struct radeon_ib ib; |
3706 | uint32_t scratch; |
3707 | uint32_t tmp = 0; |
3708 | unsigned i; |
3709 | int r; |
3710 | |
3711 | r = radeon_scratch_get(rdev, &scratch); |
3712 | if (r) { |
3713 | DRM_ERROR("radeon: failed to get scratch reg (%d).\n" , r); |
3714 | return r; |
3715 | } |
3716 | WREG32(scratch, 0xCAFEDEAD); |
3717 | r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256); |
3718 | if (r) { |
3719 | DRM_ERROR("radeon: failed to get ib (%d).\n" , r); |
3720 | goto free_scratch; |
3721 | } |
3722 | ib.ptr[0] = PACKET0(scratch, 0); |
3723 | ib.ptr[1] = 0xDEADBEEF; |
3724 | ib.ptr[2] = PACKET2(0); |
3725 | ib.ptr[3] = PACKET2(0); |
3726 | ib.ptr[4] = PACKET2(0); |
3727 | ib.ptr[5] = PACKET2(0); |
3728 | ib.ptr[6] = PACKET2(0); |
3729 | ib.ptr[7] = PACKET2(0); |
3730 | ib.length_dw = 8; |
3731 | r = radeon_ib_schedule(rdev, &ib, NULL); |
3732 | if (r) { |
3733 | DRM_ERROR("radeon: failed to schedule ib (%d).\n" , r); |
3734 | goto free_ib; |
3735 | } |
3736 | r = radeon_fence_wait(ib.fence, false); |
3737 | if (r) { |
3738 | DRM_ERROR("radeon: fence wait failed (%d).\n" , r); |
3739 | goto free_ib; |
3740 | } |
3741 | for (i = 0; i < rdev->usec_timeout; i++) { |
3742 | tmp = RREG32(scratch); |
3743 | if (tmp == 0xDEADBEEF) { |
3744 | break; |
3745 | } |
3746 | DRM_UDELAY(1); |
3747 | } |
3748 | if (i < rdev->usec_timeout) { |
3749 | DRM_INFO("ib test succeeded in %u usecs\n" , i); |
3750 | } else { |
3751 | DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n" , |
3752 | scratch, tmp); |
3753 | r = -EINVAL; |
3754 | } |
3755 | free_ib: |
3756 | radeon_ib_free(rdev, &ib); |
3757 | free_scratch: |
3758 | radeon_scratch_free(rdev, scratch); |
3759 | return r; |
3760 | } |
3761 | |
3762 | void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) |
3763 | { |
3764 | /* Shutdown CP we shouldn't need to do that but better be safe than |
3765 | * sorry |
3766 | */ |
3767 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
3768 | WREG32(R_000740_CP_CSQ_CNTL, 0); |
3769 | |
3770 | /* Save few CRTC registers */ |
3771 | save->GENMO_WT = RREG8(R_0003C2_GENMO_WT); |
3772 | save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); |
3773 | save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); |
3774 | save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); |
3775 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
3776 | save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL); |
3777 | save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET); |
3778 | } |
3779 | |
3780 | /* Disable VGA aperture access */ |
3781 | WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT); |
3782 | /* Disable cursor, overlay, crtc */ |
3783 | WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); |
3784 | WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | |
3785 | S_000054_CRTC_DISPLAY_DIS(1)); |
3786 | WREG32(R_000050_CRTC_GEN_CNTL, |
3787 | (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) | |
3788 | S_000050_CRTC_DISP_REQ_EN_B(1)); |
3789 | WREG32(R_000420_OV0_SCALE_CNTL, |
3790 | C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL)); |
3791 | WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET); |
3792 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
3793 | WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET | |
3794 | S_000360_CUR2_LOCK(1)); |
3795 | WREG32(R_0003F8_CRTC2_GEN_CNTL, |
3796 | (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) | |
3797 | S_0003F8_CRTC2_DISPLAY_DIS(1) | |
3798 | S_0003F8_CRTC2_DISP_REQ_EN_B(1)); |
3799 | WREG32(R_000360_CUR2_OFFSET, |
3800 | C_000360_CUR2_LOCK & save->CUR2_OFFSET); |
3801 | } |
3802 | } |
3803 | |
3804 | void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) |
3805 | { |
3806 | /* Update base address for crtc */ |
3807 | WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start); |
3808 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
3809 | WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start); |
3810 | } |
3811 | /* Restore CRTC registers */ |
3812 | WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); |
3813 | WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); |
3814 | WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); |
3815 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
3816 | WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); |
3817 | } |
3818 | } |
3819 | |
3820 | void r100_vga_render_disable(struct radeon_device *rdev) |
3821 | { |
3822 | u32 tmp; |
3823 | |
3824 | tmp = RREG8(R_0003C2_GENMO_WT); |
3825 | WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp); |
3826 | } |
3827 | |
3828 | static void r100_debugfs(struct radeon_device *rdev) |
3829 | { |
3830 | int r; |
3831 | |
3832 | r = r100_debugfs_mc_info_init(rdev); |
3833 | if (r) |
3834 | dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n" ); |
3835 | } |
3836 | |
3837 | static void r100_mc_program(struct radeon_device *rdev) |
3838 | { |
3839 | struct r100_mc_save save; |
3840 | |
3841 | /* Stops all mc clients */ |
3842 | r100_mc_stop(rdev, &save); |
3843 | if (rdev->flags & RADEON_IS_AGP) { |
3844 | WREG32(R_00014C_MC_AGP_LOCATION, |
3845 | S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | |
3846 | S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); |
3847 | WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); |
3848 | if (rdev->family > CHIP_RV200) |
3849 | WREG32(R_00015C_AGP_BASE_2, |
3850 | upper_32_bits(rdev->mc.agp_base) & 0xff); |
3851 | } else { |
3852 | WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); |
3853 | WREG32(R_000170_AGP_BASE, 0); |
3854 | if (rdev->family > CHIP_RV200) |
3855 | WREG32(R_00015C_AGP_BASE_2, 0); |
3856 | } |
3857 | /* Wait for mc idle */ |
3858 | if (r100_mc_wait_for_idle(rdev)) |
3859 | dev_warn(rdev->dev, "Wait for MC idle timeout.\n" ); |
3860 | /* Program MC, should be a 32bits limited address space */ |
3861 | WREG32(R_000148_MC_FB_LOCATION, |
3862 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | |
3863 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
3864 | r100_mc_resume(rdev, &save); |
3865 | } |
3866 | |
3867 | static void r100_clock_startup(struct radeon_device *rdev) |
3868 | { |
3869 | u32 tmp; |
3870 | |
3871 | if (radeon_dynclks != -1 && radeon_dynclks) |
3872 | radeon_legacy_set_clock_gating(rdev, 1); |
3873 | /* We need to force on some of the block */ |
3874 | tmp = RREG32_PLL(R_00000D_SCLK_CNTL); |
3875 | tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); |
3876 | if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280)) |
3877 | tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1); |
3878 | WREG32_PLL(R_00000D_SCLK_CNTL, tmp); |
3879 | } |
3880 | |
3881 | static int r100_startup(struct radeon_device *rdev) |
3882 | { |
3883 | int r; |
3884 | |
3885 | /* set common regs */ |
3886 | r100_set_common_regs(rdev); |
3887 | /* program mc */ |
3888 | r100_mc_program(rdev); |
3889 | /* Resume clock */ |
3890 | r100_clock_startup(rdev); |
3891 | /* Initialize GART (initialize after TTM so we can allocate |
3892 | * memory through TTM but finalize after TTM) */ |
3893 | r100_enable_bm(rdev); |
3894 | if (rdev->flags & RADEON_IS_PCI) { |
3895 | r = r100_pci_gart_enable(rdev); |
3896 | if (r) |
3897 | return r; |
3898 | } |
3899 | |
3900 | /* allocate wb buffer */ |
3901 | r = radeon_wb_init(rdev); |
3902 | if (r) |
3903 | return r; |
3904 | |
3905 | r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); |
3906 | if (r) { |
3907 | dev_err(rdev->dev, "failed initializing CP fences (%d).\n" , r); |
3908 | return r; |
3909 | } |
3910 | |
3911 | /* Enable IRQ */ |
3912 | if (!rdev->irq.installed) { |
3913 | r = radeon_irq_kms_init(rdev); |
3914 | if (r) |
3915 | return r; |
3916 | } |
3917 | |
3918 | r100_irq_set(rdev); |
3919 | rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
3920 | /* 1M ring buffer */ |
3921 | r = r100_cp_init(rdev, 1024 * 1024); |
3922 | if (r) { |
3923 | dev_err(rdev->dev, "failed initializing CP (%d).\n" , r); |
3924 | return r; |
3925 | } |
3926 | |
3927 | r = radeon_ib_pool_init(rdev); |
3928 | if (r) { |
3929 | dev_err(rdev->dev, "IB initialization failed (%d).\n" , r); |
3930 | return r; |
3931 | } |
3932 | |
3933 | return 0; |
3934 | } |
3935 | |
3936 | int r100_resume(struct radeon_device *rdev) |
3937 | { |
3938 | int r; |
3939 | |
3940 | /* Make sur GART are not working */ |
3941 | if (rdev->flags & RADEON_IS_PCI) |
3942 | r100_pci_gart_disable(rdev); |
3943 | /* Resume clock before doing reset */ |
3944 | r100_clock_startup(rdev); |
3945 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
3946 | if (radeon_asic_reset(rdev)) { |
3947 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n" , |
3948 | RREG32(R_000E40_RBBM_STATUS), |
3949 | RREG32(R_0007C0_CP_STAT)); |
3950 | } |
3951 | /* post */ |
3952 | radeon_combios_asic_init(rdev->ddev); |
3953 | /* Resume clock after posting */ |
3954 | r100_clock_startup(rdev); |
3955 | /* Initialize surface registers */ |
3956 | radeon_surface_init(rdev); |
3957 | |
3958 | rdev->accel_working = true; |
3959 | r = r100_startup(rdev); |
3960 | if (r) { |
3961 | rdev->accel_working = false; |
3962 | } |
3963 | return r; |
3964 | } |
3965 | |
3966 | int r100_suspend(struct radeon_device *rdev) |
3967 | { |
3968 | radeon_pm_suspend(rdev); |
3969 | r100_cp_disable(rdev); |
3970 | radeon_wb_disable(rdev); |
3971 | r100_irq_disable(rdev); |
3972 | if (rdev->flags & RADEON_IS_PCI) |
3973 | r100_pci_gart_disable(rdev); |
3974 | return 0; |
3975 | } |
3976 | |
3977 | void r100_fini(struct radeon_device *rdev) |
3978 | { |
3979 | radeon_pm_fini(rdev); |
3980 | r100_cp_fini(rdev); |
3981 | radeon_wb_fini(rdev); |
3982 | radeon_ib_pool_fini(rdev); |
3983 | radeon_gem_fini(rdev); |
3984 | if (rdev->flags & RADEON_IS_PCI) |
3985 | r100_pci_gart_fini(rdev); |
3986 | radeon_agp_fini(rdev); |
3987 | radeon_irq_kms_fini(rdev); |
3988 | radeon_fence_driver_fini(rdev); |
3989 | radeon_bo_fini(rdev); |
3990 | radeon_atombios_fini(rdev); |
3991 | kfree(rdev->bios); |
3992 | rdev->bios = NULL; |
3993 | } |
3994 | |
3995 | /* |
3996 | * Due to how kexec works, it can leave the hw fully initialised when it |
3997 | * boots the new kernel. However doing our init sequence with the CP and |
3998 | * WB stuff setup causes GPU hangs on the RN50 at least. So at startup |
3999 | * do some quick sanity checks and restore sane values to avoid this |
4000 | * problem. |
4001 | */ |
4002 | void r100_restore_sanity(struct radeon_device *rdev) |
4003 | { |
4004 | u32 tmp; |
4005 | |
4006 | tmp = RREG32(RADEON_CP_CSQ_CNTL); |
4007 | if (tmp) { |
4008 | WREG32(RADEON_CP_CSQ_CNTL, 0); |
4009 | } |
4010 | tmp = RREG32(RADEON_CP_RB_CNTL); |
4011 | if (tmp) { |
4012 | WREG32(RADEON_CP_RB_CNTL, 0); |
4013 | } |
4014 | tmp = RREG32(RADEON_SCRATCH_UMSK); |
4015 | if (tmp) { |
4016 | WREG32(RADEON_SCRATCH_UMSK, 0); |
4017 | } |
4018 | } |
4019 | |
4020 | int r100_init(struct radeon_device *rdev) |
4021 | { |
4022 | int r; |
4023 | |
4024 | /* Register debugfs file specific to this group of asics */ |
4025 | r100_debugfs(rdev); |
4026 | /* Disable VGA */ |
4027 | r100_vga_render_disable(rdev); |
4028 | /* Initialize scratch registers */ |
4029 | radeon_scratch_init(rdev); |
4030 | /* Initialize surface registers */ |
4031 | radeon_surface_init(rdev); |
4032 | /* sanity check some register to avoid hangs like after kexec */ |
4033 | r100_restore_sanity(rdev); |
4034 | /* TODO: disable VGA need to use VGA request */ |
4035 | /* BIOS*/ |
4036 | if (!radeon_get_bios(rdev)) { |
4037 | if (ASIC_IS_AVIVO(rdev)) |
4038 | return -EINVAL; |
4039 | } |
4040 | if (rdev->is_atom_bios) { |
4041 | dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n" ); |
4042 | return -EINVAL; |
4043 | } else { |
4044 | r = radeon_combios_init(rdev); |
4045 | if (r) |
4046 | return r; |
4047 | } |
4048 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
4049 | if (radeon_asic_reset(rdev)) { |
4050 | dev_warn(rdev->dev, |
4051 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n" , |
4052 | RREG32(R_000E40_RBBM_STATUS), |
4053 | RREG32(R_0007C0_CP_STAT)); |
4054 | } |
4055 | /* check if cards are posted or not */ |
4056 | if (radeon_boot_test_post_card(rdev) == false) |
4057 | return -EINVAL; |
4058 | /* Set asic errata */ |
4059 | r100_errata(rdev); |
4060 | /* Initialize clocks */ |
4061 | radeon_get_clock_info(rdev->ddev); |
4062 | /* initialize AGP */ |
4063 | if (rdev->flags & RADEON_IS_AGP) { |
4064 | r = radeon_agp_init(rdev); |
4065 | if (r) { |
4066 | radeon_agp_disable(rdev); |
4067 | } |
4068 | } |
4069 | /* initialize VRAM */ |
4070 | r100_mc_init(rdev); |
4071 | /* Fence driver */ |
4072 | r = radeon_fence_driver_init(rdev); |
4073 | if (r) |
4074 | return r; |
4075 | /* Memory manager */ |
4076 | r = radeon_bo_init(rdev); |
4077 | if (r) |
4078 | return r; |
4079 | if (rdev->flags & RADEON_IS_PCI) { |
4080 | r = r100_pci_gart_init(rdev); |
4081 | if (r) |
4082 | return r; |
4083 | } |
4084 | r100_set_safe_registers(rdev); |
4085 | |
4086 | /* Initialize power management */ |
4087 | radeon_pm_init(rdev); |
4088 | |
4089 | rdev->accel_working = true; |
4090 | r = r100_startup(rdev); |
4091 | if (r) { |
4092 | /* Somethings want wront with the accel init stop accel */ |
4093 | dev_err(rdev->dev, "Disabling GPU acceleration\n" ); |
4094 | r100_cp_fini(rdev); |
4095 | radeon_wb_fini(rdev); |
4096 | radeon_ib_pool_fini(rdev); |
4097 | radeon_irq_kms_fini(rdev); |
4098 | if (rdev->flags & RADEON_IS_PCI) |
4099 | r100_pci_gart_fini(rdev); |
4100 | rdev->accel_working = false; |
4101 | } |
4102 | return 0; |
4103 | } |
4104 | |
4105 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, |
4106 | bool always_indirect) |
4107 | { |
4108 | #ifdef __NetBSD__ |
4109 | if (reg < rdev->rmmio_size && !always_indirect) { |
4110 | return bus_space_read_4(rdev->rmmio_bst, rdev->rmmio_bsh, reg); |
4111 | } else { |
4112 | unsigned long flags; |
4113 | uint32_t ret; |
4114 | |
4115 | spin_lock_irqsave(&rdev->mmio_idx_lock, flags); |
4116 | bus_space_write_4(rdev->rmmio_bst, rdev->rmmio_bsh, |
4117 | RADEON_MM_INDEX, reg); |
4118 | ret = bus_space_read_4(rdev->rmmio_bst, rdev->rmmio_bsh, |
4119 | RADEON_MM_DATA); |
4120 | spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); |
4121 | |
4122 | return ret; |
4123 | } |
4124 | #else |
4125 | if (reg < rdev->rmmio_size && !always_indirect) |
4126 | return readl(((void __iomem *)rdev->rmmio) + reg); |
4127 | else { |
4128 | unsigned long flags; |
4129 | uint32_t ret; |
4130 | |
4131 | spin_lock_irqsave(&rdev->mmio_idx_lock, flags); |
4132 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
4133 | ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
4134 | spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); |
4135 | |
4136 | return ret; |
4137 | } |
4138 | #endif |
4139 | } |
4140 | |
4141 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, |
4142 | bool always_indirect) |
4143 | { |
4144 | #ifdef __NetBSD__ |
4145 | if (reg < rdev->rmmio_size && !always_indirect) { |
4146 | bus_space_write_4(rdev->rmmio_bst, rdev->rmmio_bsh, reg, v); |
4147 | } else { |
4148 | unsigned long flags; |
4149 | |
4150 | spin_lock_irqsave(&rdev->mmio_idx_lock, flags); |
4151 | bus_space_write_4(rdev->rmmio_bst, rdev->rmmio_bsh, |
4152 | RADEON_MM_INDEX, reg); |
4153 | bus_space_write_4(rdev->rmmio_bst, rdev->rmmio_bsh, |
4154 | RADEON_MM_DATA, v); |
4155 | spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); |
4156 | } |
4157 | #else |
4158 | if (reg < rdev->rmmio_size && !always_indirect) |
4159 | writel(v, ((void __iomem *)rdev->rmmio) + reg); |
4160 | else { |
4161 | unsigned long flags; |
4162 | |
4163 | spin_lock_irqsave(&rdev->mmio_idx_lock, flags); |
4164 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
4165 | writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
4166 | spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); |
4167 | } |
4168 | #endif |
4169 | } |
4170 | |
4171 | u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) |
4172 | { |
4173 | #ifdef __NetBSD__ |
4174 | if (reg < rdev->rio_mem_size) { |
4175 | return bus_space_read_4(rdev->rio_mem_bst, rdev->rio_mem_bsh, |
4176 | reg); |
4177 | } else { |
4178 | bus_space_write_4(rdev->rio_mem_bst, rdev->rio_mem_bsh, |
4179 | RADEON_MM_INDEX, reg); |
4180 | return bus_space_read_4(rdev->rio_mem_bst, rdev->rio_mem_bsh, |
4181 | RADEON_MM_DATA); |
4182 | } |
4183 | #else |
4184 | if (reg < rdev->rio_mem_size) |
4185 | return ioread32(rdev->rio_mem + reg); |
4186 | else { |
4187 | iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX); |
4188 | return ioread32(rdev->rio_mem + RADEON_MM_DATA); |
4189 | } |
4190 | #endif |
4191 | } |
4192 | |
4193 | void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
4194 | { |
4195 | #ifdef __NetBSD__ |
4196 | if (reg < rdev->rio_mem_size) { |
4197 | bus_space_write_4(rdev->rio_mem_bst, rdev->rio_mem_bsh, reg, |
4198 | v); |
4199 | } else { |
4200 | bus_space_write_4(rdev->rio_mem_bst, rdev->rio_mem_bsh, |
4201 | RADEON_MM_INDEX, reg); |
4202 | bus_space_write_4(rdev->rio_mem_bst, rdev->rio_mem_bsh, |
4203 | RADEON_MM_DATA, v); |
4204 | } |
4205 | #else |
4206 | if (reg < rdev->rio_mem_size) |
4207 | iowrite32(v, rdev->rio_mem + reg); |
4208 | else { |
4209 | iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX); |
4210 | iowrite32(v, rdev->rio_mem + RADEON_MM_DATA); |
4211 | } |
4212 | #endif |
4213 | } |
4214 | |