1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/bitops.h>
29#include <linux/slab.h>
30#include <linux/seq_file.h>
31#include <linux/firmware.h>
32#include <linux/module.h>
33#include <drm/drmP.h>
34#include <drm/radeon_drm.h>
35#include "radeon.h"
36#include "radeon_asic.h"
37#include "radeon_mode.h"
38#include "r600d.h"
39#include "atom.h"
40#include "avivod.h"
41#include "radeon_ucode.h"
42
43/* Firmware Names */
44MODULE_FIRMWARE("radeon/R600_pfp.bin");
45MODULE_FIRMWARE("radeon/R600_me.bin");
46MODULE_FIRMWARE("radeon/RV610_pfp.bin");
47MODULE_FIRMWARE("radeon/RV610_me.bin");
48MODULE_FIRMWARE("radeon/RV630_pfp.bin");
49MODULE_FIRMWARE("radeon/RV630_me.bin");
50MODULE_FIRMWARE("radeon/RV620_pfp.bin");
51MODULE_FIRMWARE("radeon/RV620_me.bin");
52MODULE_FIRMWARE("radeon/RV635_pfp.bin");
53MODULE_FIRMWARE("radeon/RV635_me.bin");
54MODULE_FIRMWARE("radeon/RV670_pfp.bin");
55MODULE_FIRMWARE("radeon/RV670_me.bin");
56MODULE_FIRMWARE("radeon/RS780_pfp.bin");
57MODULE_FIRMWARE("radeon/RS780_me.bin");
58MODULE_FIRMWARE("radeon/RV770_pfp.bin");
59MODULE_FIRMWARE("radeon/RV770_me.bin");
60MODULE_FIRMWARE("radeon/RV770_smc.bin");
61MODULE_FIRMWARE("radeon/RV730_pfp.bin");
62MODULE_FIRMWARE("radeon/RV730_me.bin");
63MODULE_FIRMWARE("radeon/RV730_smc.bin");
64MODULE_FIRMWARE("radeon/RV740_smc.bin");
65MODULE_FIRMWARE("radeon/RV710_pfp.bin");
66MODULE_FIRMWARE("radeon/RV710_me.bin");
67MODULE_FIRMWARE("radeon/RV710_smc.bin");
68MODULE_FIRMWARE("radeon/R600_rlc.bin");
69MODULE_FIRMWARE("radeon/R700_rlc.bin");
70MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
71MODULE_FIRMWARE("radeon/CEDAR_me.bin");
72MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
73MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
74MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
75MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
76MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
77MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
78MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
79MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
80MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
81MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
82MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
83MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
84MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
85MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
86MODULE_FIRMWARE("radeon/PALM_pfp.bin");
87MODULE_FIRMWARE("radeon/PALM_me.bin");
88MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
89MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
90MODULE_FIRMWARE("radeon/SUMO_me.bin");
91MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
92MODULE_FIRMWARE("radeon/SUMO2_me.bin");
93
94static const u32 crtc_offsets[2] =
95{
96 0,
97 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
98};
99
100int r600_debugfs_mc_info_init(struct radeon_device *rdev);
101
102/* r600,rv610,rv630,rv620,rv635,rv670 */
103int r600_mc_wait_for_idle(struct radeon_device *rdev);
104static void r600_gpu_init(struct radeon_device *rdev);
105void r600_fini(struct radeon_device *rdev);
106void r600_irq_disable(struct radeon_device *rdev);
107static void r600_pcie_gen2_enable(struct radeon_device *rdev);
108extern int evergreen_rlc_resume(struct radeon_device *rdev);
109extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
110
111/**
112 * r600_get_xclk - get the xclk
113 *
114 * @rdev: radeon_device pointer
115 *
116 * Returns the reference clock used by the gfx engine
117 * (r6xx, IGPs, APUs).
118 */
119u32 r600_get_xclk(struct radeon_device *rdev)
120{
121 return rdev->clock.spll.reference_freq;
122}
123
124int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
125{
126 return 0;
127}
128
129void dce3_program_fmt(struct drm_encoder *encoder)
130{
131 struct drm_device *dev = encoder->dev;
132 struct radeon_device *rdev = dev->dev_private;
133 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
134 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
135 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
136 int bpc = 0;
137 u32 tmp = 0;
138 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
139
140 if (connector) {
141 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
142 bpc = radeon_get_monitor_bpc(connector);
143 dither = radeon_connector->dither;
144 }
145
146 /* LVDS FMT is set up by atom */
147 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
148 return;
149
150 /* not needed for analog */
151 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
152 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
153 return;
154
155 if (bpc == 0)
156 return;
157
158 switch (bpc) {
159 case 6:
160 if (dither == RADEON_FMT_DITHER_ENABLE)
161 /* XXX sort out optimal dither settings */
162 tmp |= FMT_SPATIAL_DITHER_EN;
163 else
164 tmp |= FMT_TRUNCATE_EN;
165 break;
166 case 8:
167 if (dither == RADEON_FMT_DITHER_ENABLE)
168 /* XXX sort out optimal dither settings */
169 tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
170 else
171 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
172 break;
173 case 10:
174 default:
175 /* not needed */
176 break;
177 }
178
179 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
180}
181
182/* get temperature in millidegrees */
183int rv6xx_get_temp(struct radeon_device *rdev)
184{
185 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
186 ASIC_T_SHIFT;
187 int actual_temp = temp & 0xff;
188
189 if (temp & 0x100)
190 actual_temp -= 256;
191
192 return actual_temp * 1000;
193}
194
195void r600_pm_get_dynpm_state(struct radeon_device *rdev)
196{
197 int i;
198
199 rdev->pm.dynpm_can_upclock = true;
200 rdev->pm.dynpm_can_downclock = true;
201
202 /* power state array is low to high, default is first */
203 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
204 int min_power_state_index = 0;
205
206 if (rdev->pm.num_power_states > 2)
207 min_power_state_index = 1;
208
209 switch (rdev->pm.dynpm_planned_action) {
210 case DYNPM_ACTION_MINIMUM:
211 rdev->pm.requested_power_state_index = min_power_state_index;
212 rdev->pm.requested_clock_mode_index = 0;
213 rdev->pm.dynpm_can_downclock = false;
214 break;
215 case DYNPM_ACTION_DOWNCLOCK:
216 if (rdev->pm.current_power_state_index == min_power_state_index) {
217 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
218 rdev->pm.dynpm_can_downclock = false;
219 } else {
220 if (rdev->pm.active_crtc_count > 1) {
221 for (i = 0; i < rdev->pm.num_power_states; i++) {
222 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
223 continue;
224 else if (i >= rdev->pm.current_power_state_index) {
225 rdev->pm.requested_power_state_index =
226 rdev->pm.current_power_state_index;
227 break;
228 } else {
229 rdev->pm.requested_power_state_index = i;
230 break;
231 }
232 }
233 } else {
234 if (rdev->pm.current_power_state_index == 0)
235 rdev->pm.requested_power_state_index =
236 rdev->pm.num_power_states - 1;
237 else
238 rdev->pm.requested_power_state_index =
239 rdev->pm.current_power_state_index - 1;
240 }
241 }
242 rdev->pm.requested_clock_mode_index = 0;
243 /* don't use the power state if crtcs are active and no display flag is set */
244 if ((rdev->pm.active_crtc_count > 0) &&
245 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
246 clock_info[rdev->pm.requested_clock_mode_index].flags &
247 RADEON_PM_MODE_NO_DISPLAY)) {
248 rdev->pm.requested_power_state_index++;
249 }
250 break;
251 case DYNPM_ACTION_UPCLOCK:
252 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
253 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
254 rdev->pm.dynpm_can_upclock = false;
255 } else {
256 if (rdev->pm.active_crtc_count > 1) {
257 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
258 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
259 continue;
260 else if (i <= rdev->pm.current_power_state_index) {
261 rdev->pm.requested_power_state_index =
262 rdev->pm.current_power_state_index;
263 break;
264 } else {
265 rdev->pm.requested_power_state_index = i;
266 break;
267 }
268 }
269 } else
270 rdev->pm.requested_power_state_index =
271 rdev->pm.current_power_state_index + 1;
272 }
273 rdev->pm.requested_clock_mode_index = 0;
274 break;
275 case DYNPM_ACTION_DEFAULT:
276 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
277 rdev->pm.requested_clock_mode_index = 0;
278 rdev->pm.dynpm_can_upclock = false;
279 break;
280 case DYNPM_ACTION_NONE:
281 default:
282 DRM_ERROR("Requested mode for not defined action\n");
283 return;
284 }
285 } else {
286 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
287 /* for now just select the first power state and switch between clock modes */
288 /* power state array is low to high, default is first (0) */
289 if (rdev->pm.active_crtc_count > 1) {
290 rdev->pm.requested_power_state_index = -1;
291 /* start at 1 as we don't want the default mode */
292 for (i = 1; i < rdev->pm.num_power_states; i++) {
293 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
294 continue;
295 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
296 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
297 rdev->pm.requested_power_state_index = i;
298 break;
299 }
300 }
301 /* if nothing selected, grab the default state. */
302 if (rdev->pm.requested_power_state_index == -1)
303 rdev->pm.requested_power_state_index = 0;
304 } else
305 rdev->pm.requested_power_state_index = 1;
306
307 switch (rdev->pm.dynpm_planned_action) {
308 case DYNPM_ACTION_MINIMUM:
309 rdev->pm.requested_clock_mode_index = 0;
310 rdev->pm.dynpm_can_downclock = false;
311 break;
312 case DYNPM_ACTION_DOWNCLOCK:
313 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
314 if (rdev->pm.current_clock_mode_index == 0) {
315 rdev->pm.requested_clock_mode_index = 0;
316 rdev->pm.dynpm_can_downclock = false;
317 } else
318 rdev->pm.requested_clock_mode_index =
319 rdev->pm.current_clock_mode_index - 1;
320 } else {
321 rdev->pm.requested_clock_mode_index = 0;
322 rdev->pm.dynpm_can_downclock = false;
323 }
324 /* don't use the power state if crtcs are active and no display flag is set */
325 if ((rdev->pm.active_crtc_count > 0) &&
326 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
327 clock_info[rdev->pm.requested_clock_mode_index].flags &
328 RADEON_PM_MODE_NO_DISPLAY)) {
329 rdev->pm.requested_clock_mode_index++;
330 }
331 break;
332 case DYNPM_ACTION_UPCLOCK:
333 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
334 if (rdev->pm.current_clock_mode_index ==
335 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
336 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
337 rdev->pm.dynpm_can_upclock = false;
338 } else
339 rdev->pm.requested_clock_mode_index =
340 rdev->pm.current_clock_mode_index + 1;
341 } else {
342 rdev->pm.requested_clock_mode_index =
343 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
344 rdev->pm.dynpm_can_upclock = false;
345 }
346 break;
347 case DYNPM_ACTION_DEFAULT:
348 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
349 rdev->pm.requested_clock_mode_index = 0;
350 rdev->pm.dynpm_can_upclock = false;
351 break;
352 case DYNPM_ACTION_NONE:
353 default:
354 DRM_ERROR("Requested mode for not defined action\n");
355 return;
356 }
357 }
358
359 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
360 rdev->pm.power_state[rdev->pm.requested_power_state_index].
361 clock_info[rdev->pm.requested_clock_mode_index].sclk,
362 rdev->pm.power_state[rdev->pm.requested_power_state_index].
363 clock_info[rdev->pm.requested_clock_mode_index].mclk,
364 rdev->pm.power_state[rdev->pm.requested_power_state_index].
365 pcie_lanes);
366}
367
368void rs780_pm_init_profile(struct radeon_device *rdev)
369{
370 if (rdev->pm.num_power_states == 2) {
371 /* default */
372 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
373 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
374 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
375 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
376 /* low sh */
377 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
378 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
379 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
380 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
381 /* mid sh */
382 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
383 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
384 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
385 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
386 /* high sh */
387 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
388 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
389 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
390 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
391 /* low mh */
392 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
393 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
394 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
395 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
396 /* mid mh */
397 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
398 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
399 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
400 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
401 /* high mh */
402 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
403 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
404 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
405 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
406 } else if (rdev->pm.num_power_states == 3) {
407 /* default */
408 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
409 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
410 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
411 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
412 /* low sh */
413 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
414 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
415 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
416 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
417 /* mid sh */
418 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
419 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
420 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
421 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
422 /* high sh */
423 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
424 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
425 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
426 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
427 /* low mh */
428 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
429 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
430 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
431 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
432 /* mid mh */
433 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
434 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
435 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
436 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
437 /* high mh */
438 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
439 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
440 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
441 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
442 } else {
443 /* default */
444 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
445 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
446 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
447 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
448 /* low sh */
449 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
450 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
451 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
452 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
453 /* mid sh */
454 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
455 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
456 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
457 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
458 /* high sh */
459 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
460 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
461 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
462 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
463 /* low mh */
464 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
465 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
466 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
467 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
468 /* mid mh */
469 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
470 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
471 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
472 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
473 /* high mh */
474 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
475 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
476 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
477 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
478 }
479}
480
481void r600_pm_init_profile(struct radeon_device *rdev)
482{
483 int idx;
484
485 if (rdev->family == CHIP_R600) {
486 /* XXX */
487 /* default */
488 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
489 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
490 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
491 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
492 /* low sh */
493 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
494 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
495 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
496 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
497 /* mid sh */
498 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
499 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
500 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
501 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
502 /* high sh */
503 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
504 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
505 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
506 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
507 /* low mh */
508 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
509 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
510 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
511 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
512 /* mid mh */
513 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
514 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
515 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
516 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
517 /* high mh */
518 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
519 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
520 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
521 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
522 } else {
523 if (rdev->pm.num_power_states < 4) {
524 /* default */
525 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
526 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
527 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
528 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
529 /* low sh */
530 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
531 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
532 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
533 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
534 /* mid sh */
535 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
536 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
537 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
538 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
539 /* high sh */
540 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
541 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
542 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
543 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
544 /* low mh */
545 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
546 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
547 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
548 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
549 /* low mh */
550 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
551 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
552 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
553 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
554 /* high mh */
555 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
556 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
557 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
558 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
559 } else {
560 /* default */
561 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
562 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
563 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
564 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
565 /* low sh */
566 if (rdev->flags & RADEON_IS_MOBILITY)
567 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
568 else
569 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
570 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
571 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
572 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
573 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
574 /* mid sh */
575 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
576 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
577 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
578 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
579 /* high sh */
580 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
581 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
582 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
583 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
584 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
585 /* low mh */
586 if (rdev->flags & RADEON_IS_MOBILITY)
587 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
588 else
589 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
590 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
591 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
592 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
593 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
594 /* mid mh */
595 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
596 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
597 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
598 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
599 /* high mh */
600 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
601 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
602 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
603 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
604 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
605 }
606 }
607}
608
609void r600_pm_misc(struct radeon_device *rdev)
610{
611 int req_ps_idx = rdev->pm.requested_power_state_index;
612 int req_cm_idx = rdev->pm.requested_clock_mode_index;
613 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
614 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
615
616 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
617 /* 0xff01 is a flag rather then an actual voltage */
618 if (voltage->voltage == 0xff01)
619 return;
620 if (voltage->voltage != rdev->pm.current_vddc) {
621 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
622 rdev->pm.current_vddc = voltage->voltage;
623 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
624 }
625 }
626}
627
628bool r600_gui_idle(struct radeon_device *rdev)
629{
630 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
631 return false;
632 else
633 return true;
634}
635
636/* hpd for digital panel detect/disconnect */
637bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
638{
639 bool connected = false;
640
641 if (ASIC_IS_DCE3(rdev)) {
642 switch (hpd) {
643 case RADEON_HPD_1:
644 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
645 connected = true;
646 break;
647 case RADEON_HPD_2:
648 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
649 connected = true;
650 break;
651 case RADEON_HPD_3:
652 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
653 connected = true;
654 break;
655 case RADEON_HPD_4:
656 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
657 connected = true;
658 break;
659 /* DCE 3.2 */
660 case RADEON_HPD_5:
661 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
662 connected = true;
663 break;
664 case RADEON_HPD_6:
665 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
666 connected = true;
667 break;
668 default:
669 break;
670 }
671 } else {
672 switch (hpd) {
673 case RADEON_HPD_1:
674 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
675 connected = true;
676 break;
677 case RADEON_HPD_2:
678 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
679 connected = true;
680 break;
681 case RADEON_HPD_3:
682 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
683 connected = true;
684 break;
685 default:
686 break;
687 }
688 }
689 return connected;
690}
691
692void r600_hpd_set_polarity(struct radeon_device *rdev,
693 enum radeon_hpd_id hpd)
694{
695 u32 tmp;
696 bool connected = r600_hpd_sense(rdev, hpd);
697
698 if (ASIC_IS_DCE3(rdev)) {
699 switch (hpd) {
700 case RADEON_HPD_1:
701 tmp = RREG32(DC_HPD1_INT_CONTROL);
702 if (connected)
703 tmp &= ~DC_HPDx_INT_POLARITY;
704 else
705 tmp |= DC_HPDx_INT_POLARITY;
706 WREG32(DC_HPD1_INT_CONTROL, tmp);
707 break;
708 case RADEON_HPD_2:
709 tmp = RREG32(DC_HPD2_INT_CONTROL);
710 if (connected)
711 tmp &= ~DC_HPDx_INT_POLARITY;
712 else
713 tmp |= DC_HPDx_INT_POLARITY;
714 WREG32(DC_HPD2_INT_CONTROL, tmp);
715 break;
716 case RADEON_HPD_3:
717 tmp = RREG32(DC_HPD3_INT_CONTROL);
718 if (connected)
719 tmp &= ~DC_HPDx_INT_POLARITY;
720 else
721 tmp |= DC_HPDx_INT_POLARITY;
722 WREG32(DC_HPD3_INT_CONTROL, tmp);
723 break;
724 case RADEON_HPD_4:
725 tmp = RREG32(DC_HPD4_INT_CONTROL);
726 if (connected)
727 tmp &= ~DC_HPDx_INT_POLARITY;
728 else
729 tmp |= DC_HPDx_INT_POLARITY;
730 WREG32(DC_HPD4_INT_CONTROL, tmp);
731 break;
732 case RADEON_HPD_5:
733 tmp = RREG32(DC_HPD5_INT_CONTROL);
734 if (connected)
735 tmp &= ~DC_HPDx_INT_POLARITY;
736 else
737 tmp |= DC_HPDx_INT_POLARITY;
738 WREG32(DC_HPD5_INT_CONTROL, tmp);
739 break;
740 /* DCE 3.2 */
741 case RADEON_HPD_6:
742 tmp = RREG32(DC_HPD6_INT_CONTROL);
743 if (connected)
744 tmp &= ~DC_HPDx_INT_POLARITY;
745 else
746 tmp |= DC_HPDx_INT_POLARITY;
747 WREG32(DC_HPD6_INT_CONTROL, tmp);
748 break;
749 default:
750 break;
751 }
752 } else {
753 switch (hpd) {
754 case RADEON_HPD_1:
755 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
756 if (connected)
757 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
758 else
759 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
760 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
761 break;
762 case RADEON_HPD_2:
763 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
764 if (connected)
765 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
766 else
767 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
768 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
769 break;
770 case RADEON_HPD_3:
771 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
772 if (connected)
773 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
774 else
775 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
776 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
777 break;
778 default:
779 break;
780 }
781 }
782}
783
784void r600_hpd_init(struct radeon_device *rdev)
785{
786 struct drm_device *dev = rdev->ddev;
787 struct drm_connector *connector;
788 unsigned enable = 0;
789
790 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
791 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
792
793 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
794 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
795 /* don't try to enable hpd on eDP or LVDS avoid breaking the
796 * aux dp channel on imac and help (but not completely fix)
797 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
798 */
799 continue;
800 }
801 if (ASIC_IS_DCE3(rdev)) {
802 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
803 if (ASIC_IS_DCE32(rdev))
804 tmp |= DC_HPDx_EN;
805
806 switch (radeon_connector->hpd.hpd) {
807 case RADEON_HPD_1:
808 WREG32(DC_HPD1_CONTROL, tmp);
809 break;
810 case RADEON_HPD_2:
811 WREG32(DC_HPD2_CONTROL, tmp);
812 break;
813 case RADEON_HPD_3:
814 WREG32(DC_HPD3_CONTROL, tmp);
815 break;
816 case RADEON_HPD_4:
817 WREG32(DC_HPD4_CONTROL, tmp);
818 break;
819 /* DCE 3.2 */
820 case RADEON_HPD_5:
821 WREG32(DC_HPD5_CONTROL, tmp);
822 break;
823 case RADEON_HPD_6:
824 WREG32(DC_HPD6_CONTROL, tmp);
825 break;
826 default:
827 break;
828 }
829 } else {
830 switch (radeon_connector->hpd.hpd) {
831 case RADEON_HPD_1:
832 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
833 break;
834 case RADEON_HPD_2:
835 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
836 break;
837 case RADEON_HPD_3:
838 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
839 break;
840 default:
841 break;
842 }
843 }
844 enable |= 1 << radeon_connector->hpd.hpd;
845 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
846 }
847 radeon_irq_kms_enable_hpd(rdev, enable);
848}
849
850void r600_hpd_fini(struct radeon_device *rdev)
851{
852 struct drm_device *dev = rdev->ddev;
853 struct drm_connector *connector;
854 unsigned disable = 0;
855
856 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
857 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
858 if (ASIC_IS_DCE3(rdev)) {
859 switch (radeon_connector->hpd.hpd) {
860 case RADEON_HPD_1:
861 WREG32(DC_HPD1_CONTROL, 0);
862 break;
863 case RADEON_HPD_2:
864 WREG32(DC_HPD2_CONTROL, 0);
865 break;
866 case RADEON_HPD_3:
867 WREG32(DC_HPD3_CONTROL, 0);
868 break;
869 case RADEON_HPD_4:
870 WREG32(DC_HPD4_CONTROL, 0);
871 break;
872 /* DCE 3.2 */
873 case RADEON_HPD_5:
874 WREG32(DC_HPD5_CONTROL, 0);
875 break;
876 case RADEON_HPD_6:
877 WREG32(DC_HPD6_CONTROL, 0);
878 break;
879 default:
880 break;
881 }
882 } else {
883 switch (radeon_connector->hpd.hpd) {
884 case RADEON_HPD_1:
885 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
886 break;
887 case RADEON_HPD_2:
888 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
889 break;
890 case RADEON_HPD_3:
891 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
892 break;
893 default:
894 break;
895 }
896 }
897 disable |= 1 << radeon_connector->hpd.hpd;
898 }
899 radeon_irq_kms_disable_hpd(rdev, disable);
900}
901
902#ifdef __NetBSD__
903/*
904 * XXX Can't use bus_space here because this is all mapped through the
905 * radeon_bo abstraction. Can't assume we're x86 because this is
906 * AMD/ATI Radeon, not Intel.
907 */
908
909# define __iomem volatile
910# define readl fake_readl
911
912static inline uint32_t
913fake_readl(const void __iomem *ptr)
914{
915 uint32_t v;
916
917 v = *(const uint32_t __iomem *)ptr;
918 membar_consumer();
919
920 return v;
921}
922#endif
923
924/*
925 * R600 PCIE GART
926 */
927void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
928{
929 unsigned i;
930 u32 tmp;
931
932 /* flush hdp cache so updates hit vram */
933 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
934 !(rdev->flags & RADEON_IS_AGP)) {
935 void __iomem *ptr = rdev->gart.ptr;
936
937 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
938 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
939 * This seems to cause problems on some AGP cards. Just use the old
940 * method for them.
941 */
942 WREG32(HDP_DEBUG1, 0);
943 (void)readl(ptr);
944 } else
945 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
946
947 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
948 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
949 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
950 for (i = 0; i < rdev->usec_timeout; i++) {
951 /* read MC_STATUS */
952 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
953 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
954 if (tmp == 2) {
955 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
956 return;
957 }
958 if (tmp) {
959 return;
960 }
961 udelay(1);
962 }
963}
964
965#ifdef __NetBSD__
966# undef __iomem
967# undef readl
968#endif
969
970int r600_pcie_gart_init(struct radeon_device *rdev)
971{
972 int r;
973
974 if (rdev->gart.robj) {
975 WARN(1, "R600 PCIE GART already initialized\n");
976 return 0;
977 }
978 /* Initialize common gart structure */
979 r = radeon_gart_init(rdev);
980 if (r)
981 return r;
982 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
983 return radeon_gart_table_vram_alloc(rdev);
984}
985
986static int r600_pcie_gart_enable(struct radeon_device *rdev)
987{
988 u32 tmp;
989 int r, i;
990
991 if (rdev->gart.robj == NULL) {
992 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
993 return -EINVAL;
994 }
995 r = radeon_gart_table_vram_pin(rdev);
996 if (r)
997 return r;
998 radeon_gart_restore(rdev);
999
1000 /* Setup L2 cache */
1001 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1002 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1003 EFFECTIVE_L2_QUEUE_SIZE(7));
1004 WREG32(VM_L2_CNTL2, 0);
1005 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1006 /* Setup TLB control */
1007 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1008 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1009 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1010 ENABLE_WAIT_L2_QUERY;
1011 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1012 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1013 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1014 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1015 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1016 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1017 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1018 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1019 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1020 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1021 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1022 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1023 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1024 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1025 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1026 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1027 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1028 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1029 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1030 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1031 (u32)(rdev->dummy_page.addr >> 12));
1032 for (i = 1; i < 7; i++)
1033 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1034
1035 r600_pcie_gart_tlb_flush(rdev);
1036 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1037 (unsigned)(rdev->mc.gtt_size >> 20),
1038 (unsigned long long)rdev->gart.table_addr);
1039 rdev->gart.ready = true;
1040 return 0;
1041}
1042
1043static void r600_pcie_gart_disable(struct radeon_device *rdev)
1044{
1045 u32 tmp;
1046 int i;
1047
1048 /* Disable all tables */
1049 for (i = 0; i < 7; i++)
1050 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1051
1052 /* Disable L2 cache */
1053 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1054 EFFECTIVE_L2_QUEUE_SIZE(7));
1055 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1056 /* Setup L1 TLB control */
1057 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1058 ENABLE_WAIT_L2_QUERY;
1059 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1060 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1061 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1062 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1063 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1064 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1065 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1066 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1067 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1068 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1069 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1070 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1071 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1072 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1073 radeon_gart_table_vram_unpin(rdev);
1074}
1075
1076static void r600_pcie_gart_fini(struct radeon_device *rdev)
1077{
1078 radeon_gart_fini(rdev);
1079 r600_pcie_gart_disable(rdev);
1080 radeon_gart_table_vram_free(rdev);
1081}
1082
1083static void r600_agp_enable(struct radeon_device *rdev)
1084{
1085 u32 tmp;
1086 int i;
1087
1088 /* Setup L2 cache */
1089 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1090 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1091 EFFECTIVE_L2_QUEUE_SIZE(7));
1092 WREG32(VM_L2_CNTL2, 0);
1093 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1094 /* Setup TLB control */
1095 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1096 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1097 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1098 ENABLE_WAIT_L2_QUERY;
1099 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1100 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1101 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1102 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1103 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1104 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1105 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1106 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1107 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1108 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1109 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1110 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1111 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1112 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1113 for (i = 0; i < 7; i++)
1114 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1115}
1116
1117int r600_mc_wait_for_idle(struct radeon_device *rdev)
1118{
1119 unsigned i;
1120 u32 tmp;
1121
1122 for (i = 0; i < rdev->usec_timeout; i++) {
1123 /* read MC_STATUS */
1124 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1125 if (!tmp)
1126 return 0;
1127 udelay(1);
1128 }
1129 return -1;
1130}
1131
1132uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1133{
1134 unsigned long flags;
1135 uint32_t r;
1136
1137 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1138 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1139 r = RREG32(R_0028FC_MC_DATA);
1140 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
1141 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1142 return r;
1143}
1144
1145void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1146{
1147 unsigned long flags;
1148
1149 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1150 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1151 S_0028F8_MC_IND_WR_EN(1));
1152 WREG32(R_0028FC_MC_DATA, v);
1153 WREG32(R_0028F8_MC_INDEX, 0x7F);
1154 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1155}
1156
1157static void r600_mc_program(struct radeon_device *rdev)
1158{
1159 struct rv515_mc_save save;
1160 u32 tmp;
1161 int i, j;
1162
1163 /* Initialize HDP */
1164 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1165 WREG32((0x2c14 + j), 0x00000000);
1166 WREG32((0x2c18 + j), 0x00000000);
1167 WREG32((0x2c1c + j), 0x00000000);
1168 WREG32((0x2c20 + j), 0x00000000);
1169 WREG32((0x2c24 + j), 0x00000000);
1170 }
1171 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1172
1173 rv515_mc_stop(rdev, &save);
1174 if (r600_mc_wait_for_idle(rdev)) {
1175 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1176 }
1177 /* Lockout access through VGA aperture (doesn't exist before R600) */
1178 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1179 /* Update configuration */
1180 if (rdev->flags & RADEON_IS_AGP) {
1181 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1182 /* VRAM before AGP */
1183 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1184 rdev->mc.vram_start >> 12);
1185 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1186 rdev->mc.gtt_end >> 12);
1187 } else {
1188 /* VRAM after AGP */
1189 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1190 rdev->mc.gtt_start >> 12);
1191 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1192 rdev->mc.vram_end >> 12);
1193 }
1194 } else {
1195 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1196 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1197 }
1198 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1199 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1200 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1201 WREG32(MC_VM_FB_LOCATION, tmp);
1202 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1203 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1204 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1205 if (rdev->flags & RADEON_IS_AGP) {
1206 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1207 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1208 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1209 } else {
1210 WREG32(MC_VM_AGP_BASE, 0);
1211 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1212 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1213 }
1214 if (r600_mc_wait_for_idle(rdev)) {
1215 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1216 }
1217 rv515_mc_resume(rdev, &save);
1218 /* we need to own VRAM, so turn off the VGA renderer here
1219 * to stop it overwriting our objects */
1220 rv515_vga_render_disable(rdev);
1221}
1222
1223/**
1224 * r600_vram_gtt_location - try to find VRAM & GTT location
1225 * @rdev: radeon device structure holding all necessary informations
1226 * @mc: memory controller structure holding memory informations
1227 *
1228 * Function will place try to place VRAM at same place as in CPU (PCI)
1229 * address space as some GPU seems to have issue when we reprogram at
1230 * different address space.
1231 *
1232 * If there is not enough space to fit the unvisible VRAM after the
1233 * aperture then we limit the VRAM size to the aperture.
1234 *
1235 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1236 * them to be in one from GPU point of view so that we can program GPU to
1237 * catch access outside them (weird GPU policy see ??).
1238 *
1239 * This function will never fails, worst case are limiting VRAM or GTT.
1240 *
1241 * Note: GTT start, end, size should be initialized before calling this
1242 * function on AGP platform.
1243 */
1244static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1245{
1246 u64 size_bf, size_af;
1247
1248 if (mc->mc_vram_size > 0xE0000000) {
1249 /* leave room for at least 512M GTT */
1250 dev_warn(rdev->dev, "limiting VRAM\n");
1251 mc->real_vram_size = 0xE0000000;
1252 mc->mc_vram_size = 0xE0000000;
1253 }
1254 if (rdev->flags & RADEON_IS_AGP) {
1255 size_bf = mc->gtt_start;
1256 size_af = mc->mc_mask - mc->gtt_end;
1257 if (size_bf > size_af) {
1258 if (mc->mc_vram_size > size_bf) {
1259 dev_warn(rdev->dev, "limiting VRAM\n");
1260 mc->real_vram_size = size_bf;
1261 mc->mc_vram_size = size_bf;
1262 }
1263 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1264 } else {
1265 if (mc->mc_vram_size > size_af) {
1266 dev_warn(rdev->dev, "limiting VRAM\n");
1267 mc->real_vram_size = size_af;
1268 mc->mc_vram_size = size_af;
1269 }
1270 mc->vram_start = mc->gtt_end + 1;
1271 }
1272 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1273 dev_info(rdev->dev, "VRAM: %"PRIu64"M 0x%08"PRIX64" - 0x%08"PRIX64" (%"PRIu64"M used)\n",
1274 mc->mc_vram_size >> 20, mc->vram_start,
1275 mc->vram_end, mc->real_vram_size >> 20);
1276 } else {
1277 u64 base = 0;
1278 if (rdev->flags & RADEON_IS_IGP) {
1279 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1280 base <<= 24;
1281 }
1282 radeon_vram_location(rdev, &rdev->mc, base);
1283 rdev->mc.gtt_base_align = 0;
1284 radeon_gtt_location(rdev, mc);
1285 }
1286}
1287
1288static int r600_mc_init(struct radeon_device *rdev)
1289{
1290 u32 tmp;
1291 int chansize, numchan;
1292 uint32_t h_addr, l_addr;
1293 unsigned long long k8_addr;
1294
1295 /* Get VRAM informations */
1296 rdev->mc.vram_is_ddr = true;
1297 tmp = RREG32(RAMCFG);
1298 if (tmp & CHANSIZE_OVERRIDE) {
1299 chansize = 16;
1300 } else if (tmp & CHANSIZE_MASK) {
1301 chansize = 64;
1302 } else {
1303 chansize = 32;
1304 }
1305 tmp = RREG32(CHMAP);
1306 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1307 case 0:
1308 default:
1309 numchan = 1;
1310 break;
1311 case 1:
1312 numchan = 2;
1313 break;
1314 case 2:
1315 numchan = 4;
1316 break;
1317 case 3:
1318 numchan = 8;
1319 break;
1320 }
1321 rdev->mc.vram_width = numchan * chansize;
1322 /* Could aper size report 0 ? */
1323 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1324 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1325 /* Setup GPU memory space */
1326 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1327 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1328 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1329 r600_vram_gtt_location(rdev, &rdev->mc);
1330
1331 if (rdev->flags & RADEON_IS_IGP) {
1332 rs690_pm_info(rdev);
1333 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1334
1335 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
1336 /* Use K8 direct mapping for fast fb access. */
1337 rdev->fastfb_working = false;
1338 h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
1339 l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
1340 k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
1341#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
1342 if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
1343#endif
1344 {
1345 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
1346 * memory is present.
1347 */
1348 if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
1349 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
1350 (unsigned long long)rdev->mc.aper_base, k8_addr);
1351 rdev->mc.aper_base = (resource_size_t)k8_addr;
1352 rdev->fastfb_working = true;
1353 }
1354 }
1355 }
1356 }
1357
1358 radeon_update_bandwidth_info(rdev);
1359 return 0;
1360}
1361
1362int r600_vram_scratch_init(struct radeon_device *rdev)
1363{
1364 int r;
1365
1366 if (rdev->vram_scratch.robj == NULL) {
1367 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1368 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1369 NULL, &rdev->vram_scratch.robj);
1370 if (r) {
1371 return r;
1372 }
1373 }
1374
1375 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1376 if (unlikely(r != 0))
1377 return r;
1378 r = radeon_bo_pin(rdev->vram_scratch.robj,
1379 RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1380 if (r) {
1381 radeon_bo_unreserve(rdev->vram_scratch.robj);
1382 return r;
1383 }
1384 r = radeon_bo_kmap(rdev->vram_scratch.robj,
1385 (void **)__UNVOLATILE(&rdev->vram_scratch.ptr));
1386 if (r)
1387 radeon_bo_unpin(rdev->vram_scratch.robj);
1388 radeon_bo_unreserve(rdev->vram_scratch.robj);
1389
1390 return r;
1391}
1392
1393void r600_vram_scratch_fini(struct radeon_device *rdev)
1394{
1395 int r;
1396
1397 if (rdev->vram_scratch.robj == NULL) {
1398 return;
1399 }
1400 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1401 if (likely(r == 0)) {
1402 radeon_bo_kunmap(rdev->vram_scratch.robj);
1403 radeon_bo_unpin(rdev->vram_scratch.robj);
1404 radeon_bo_unreserve(rdev->vram_scratch.robj);
1405 }
1406 radeon_bo_unref(&rdev->vram_scratch.robj);
1407}
1408
1409void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
1410{
1411 u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
1412
1413 if (hung)
1414 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1415 else
1416 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1417
1418 WREG32(R600_BIOS_3_SCRATCH, tmp);
1419}
1420
1421static void r600_print_gpu_status_regs(struct radeon_device *rdev)
1422{
1423 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
1424 RREG32(R_008010_GRBM_STATUS));
1425 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1426 RREG32(R_008014_GRBM_STATUS2));
1427 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
1428 RREG32(R_000E50_SRBM_STATUS));
1429 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1430 RREG32(CP_STALLED_STAT1));
1431 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1432 RREG32(CP_STALLED_STAT2));
1433 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1434 RREG32(CP_BUSY_STAT));
1435 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1436 RREG32(CP_STAT));
1437 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1438 RREG32(DMA_STATUS_REG));
1439}
1440
1441static bool r600_is_display_hung(struct radeon_device *rdev)
1442{
1443 u32 crtc_hung = 0;
1444 u32 crtc_status[2];
1445 u32 i, j, tmp;
1446
1447 for (i = 0; i < rdev->num_crtc; i++) {
1448 if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
1449 crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1450 crtc_hung |= (1 << i);
1451 }
1452 }
1453
1454 for (j = 0; j < 10; j++) {
1455 for (i = 0; i < rdev->num_crtc; i++) {
1456 if (crtc_hung & (1 << i)) {
1457 tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1458 if (tmp != crtc_status[i])
1459 crtc_hung &= ~(1 << i);
1460 }
1461 }
1462 if (crtc_hung == 0)
1463 return false;
1464 udelay(100);
1465 }
1466
1467 return true;
1468}
1469
1470u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
1471{
1472 u32 reset_mask = 0;
1473 u32 tmp;
1474
1475 /* GRBM_STATUS */
1476 tmp = RREG32(R_008010_GRBM_STATUS);
1477 if (rdev->family >= CHIP_RV770) {
1478 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1479 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1480 G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1481 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1482 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1483 reset_mask |= RADEON_RESET_GFX;
1484 } else {
1485 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1486 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1487 G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1488 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1489 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1490 reset_mask |= RADEON_RESET_GFX;
1491 }
1492
1493 if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
1494 G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
1495 reset_mask |= RADEON_RESET_CP;
1496
1497 if (G_008010_GRBM_EE_BUSY(tmp))
1498 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1499
1500 /* DMA_STATUS_REG */
1501 tmp = RREG32(DMA_STATUS_REG);
1502 if (!(tmp & DMA_IDLE))
1503 reset_mask |= RADEON_RESET_DMA;
1504
1505 /* SRBM_STATUS */
1506 tmp = RREG32(R_000E50_SRBM_STATUS);
1507 if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
1508 reset_mask |= RADEON_RESET_RLC;
1509
1510 if (G_000E50_IH_BUSY(tmp))
1511 reset_mask |= RADEON_RESET_IH;
1512
1513 if (G_000E50_SEM_BUSY(tmp))
1514 reset_mask |= RADEON_RESET_SEM;
1515
1516 if (G_000E50_GRBM_RQ_PENDING(tmp))
1517 reset_mask |= RADEON_RESET_GRBM;
1518
1519 if (G_000E50_VMC_BUSY(tmp))
1520 reset_mask |= RADEON_RESET_VMC;
1521
1522 if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
1523 G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
1524 G_000E50_MCDW_BUSY(tmp))
1525 reset_mask |= RADEON_RESET_MC;
1526
1527 if (r600_is_display_hung(rdev))
1528 reset_mask |= RADEON_RESET_DISPLAY;
1529
1530 /* Skip MC reset as it's mostly likely not hung, just busy */
1531 if (reset_mask & RADEON_RESET_MC) {
1532 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1533 reset_mask &= ~RADEON_RESET_MC;
1534 }
1535
1536 return reset_mask;
1537}
1538
1539static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1540{
1541 struct rv515_mc_save save;
1542 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1543 u32 tmp;
1544
1545 if (reset_mask == 0)
1546 return;
1547
1548 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1549
1550 r600_print_gpu_status_regs(rdev);
1551
1552 /* Disable CP parsing/prefetching */
1553 if (rdev->family >= CHIP_RV770)
1554 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1555 else
1556 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1557
1558 /* disable the RLC */
1559 WREG32(RLC_CNTL, 0);
1560
1561 if (reset_mask & RADEON_RESET_DMA) {
1562 /* Disable DMA */
1563 tmp = RREG32(DMA_RB_CNTL);
1564 tmp &= ~DMA_RB_ENABLE;
1565 WREG32(DMA_RB_CNTL, tmp);
1566 }
1567
1568 mdelay(50);
1569
1570 rv515_mc_stop(rdev, &save);
1571 if (r600_mc_wait_for_idle(rdev)) {
1572 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1573 }
1574
1575 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1576 if (rdev->family >= CHIP_RV770)
1577 grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
1578 S_008020_SOFT_RESET_CB(1) |
1579 S_008020_SOFT_RESET_PA(1) |
1580 S_008020_SOFT_RESET_SC(1) |
1581 S_008020_SOFT_RESET_SPI(1) |
1582 S_008020_SOFT_RESET_SX(1) |
1583 S_008020_SOFT_RESET_SH(1) |
1584 S_008020_SOFT_RESET_TC(1) |
1585 S_008020_SOFT_RESET_TA(1) |
1586 S_008020_SOFT_RESET_VC(1) |
1587 S_008020_SOFT_RESET_VGT(1);
1588 else
1589 grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
1590 S_008020_SOFT_RESET_DB(1) |
1591 S_008020_SOFT_RESET_CB(1) |
1592 S_008020_SOFT_RESET_PA(1) |
1593 S_008020_SOFT_RESET_SC(1) |
1594 S_008020_SOFT_RESET_SMX(1) |
1595 S_008020_SOFT_RESET_SPI(1) |
1596 S_008020_SOFT_RESET_SX(1) |
1597 S_008020_SOFT_RESET_SH(1) |
1598 S_008020_SOFT_RESET_TC(1) |
1599 S_008020_SOFT_RESET_TA(1) |
1600 S_008020_SOFT_RESET_VC(1) |
1601 S_008020_SOFT_RESET_VGT(1);
1602 }
1603
1604 if (reset_mask & RADEON_RESET_CP) {
1605 grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
1606 S_008020_SOFT_RESET_VGT(1);
1607
1608 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1609 }
1610
1611 if (reset_mask & RADEON_RESET_DMA) {
1612 if (rdev->family >= CHIP_RV770)
1613 srbm_soft_reset |= RV770_SOFT_RESET_DMA;
1614 else
1615 srbm_soft_reset |= SOFT_RESET_DMA;
1616 }
1617
1618 if (reset_mask & RADEON_RESET_RLC)
1619 srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
1620
1621 if (reset_mask & RADEON_RESET_SEM)
1622 srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
1623
1624 if (reset_mask & RADEON_RESET_IH)
1625 srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
1626
1627 if (reset_mask & RADEON_RESET_GRBM)
1628 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1629
1630 if (!(rdev->flags & RADEON_IS_IGP)) {
1631 if (reset_mask & RADEON_RESET_MC)
1632 srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
1633 }
1634
1635 if (reset_mask & RADEON_RESET_VMC)
1636 srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
1637
1638 if (grbm_soft_reset) {
1639 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1640 tmp |= grbm_soft_reset;
1641 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1642 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1643 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1644
1645 udelay(50);
1646
1647 tmp &= ~grbm_soft_reset;
1648 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1649 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1650 }
1651
1652 if (srbm_soft_reset) {
1653 tmp = RREG32(SRBM_SOFT_RESET);
1654 tmp |= srbm_soft_reset;
1655 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1656 WREG32(SRBM_SOFT_RESET, tmp);
1657 tmp = RREG32(SRBM_SOFT_RESET);
1658
1659 udelay(50);
1660
1661 tmp &= ~srbm_soft_reset;
1662 WREG32(SRBM_SOFT_RESET, tmp);
1663 tmp = RREG32(SRBM_SOFT_RESET);
1664 }
1665
1666 /* Wait a little for things to settle down */
1667 mdelay(1);
1668
1669 rv515_mc_resume(rdev, &save);
1670 udelay(50);
1671
1672 r600_print_gpu_status_regs(rdev);
1673}
1674
1675static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
1676{
1677 struct rv515_mc_save save;
1678 u32 tmp, i;
1679
1680 dev_info(rdev->dev, "GPU pci config reset\n");
1681
1682 /* disable dpm? */
1683
1684 /* Disable CP parsing/prefetching */
1685 if (rdev->family >= CHIP_RV770)
1686 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1687 else
1688 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1689
1690 /* disable the RLC */
1691 WREG32(RLC_CNTL, 0);
1692
1693 /* Disable DMA */
1694 tmp = RREG32(DMA_RB_CNTL);
1695 tmp &= ~DMA_RB_ENABLE;
1696 WREG32(DMA_RB_CNTL, tmp);
1697
1698 mdelay(50);
1699
1700 /* set mclk/sclk to bypass */
1701 if (rdev->family >= CHIP_RV770)
1702 rv770_set_clk_bypass_mode(rdev);
1703 /* disable BM */
1704 pci_clear_master(rdev->pdev);
1705 /* disable mem access */
1706 rv515_mc_stop(rdev, &save);
1707 if (r600_mc_wait_for_idle(rdev)) {
1708 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1709 }
1710
1711 /* BIF reset workaround. Not sure if this is needed on 6xx */
1712 tmp = RREG32(BUS_CNTL);
1713 tmp |= VGA_COHE_SPEC_TIMER_DIS;
1714 WREG32(BUS_CNTL, tmp);
1715
1716 tmp = RREG32(BIF_SCRATCH0);
1717
1718 /* reset */
1719 radeon_pci_config_reset(rdev);
1720 mdelay(1);
1721
1722 /* BIF reset workaround. Not sure if this is needed on 6xx */
1723 tmp = SOFT_RESET_BIF;
1724 WREG32(SRBM_SOFT_RESET, tmp);
1725 mdelay(1);
1726 WREG32(SRBM_SOFT_RESET, 0);
1727
1728 /* wait for asic to come out of reset */
1729 for (i = 0; i < rdev->usec_timeout; i++) {
1730 if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
1731 break;
1732 udelay(1);
1733 }
1734}
1735
1736int r600_asic_reset(struct radeon_device *rdev)
1737{
1738 u32 reset_mask;
1739
1740 reset_mask = r600_gpu_check_soft_reset(rdev);
1741
1742 if (reset_mask)
1743 r600_set_bios_scratch_engine_hung(rdev, true);
1744
1745 /* try soft reset */
1746 r600_gpu_soft_reset(rdev, reset_mask);
1747
1748 reset_mask = r600_gpu_check_soft_reset(rdev);
1749
1750 /* try pci config reset */
1751 if (reset_mask && radeon_hard_reset)
1752 r600_gpu_pci_config_reset(rdev);
1753
1754 reset_mask = r600_gpu_check_soft_reset(rdev);
1755
1756 if (!reset_mask)
1757 r600_set_bios_scratch_engine_hung(rdev, false);
1758
1759 return 0;
1760}
1761
1762/**
1763 * r600_gfx_is_lockup - Check if the GFX engine is locked up
1764 *
1765 * @rdev: radeon_device pointer
1766 * @ring: radeon_ring structure holding ring information
1767 *
1768 * Check if the GFX engine is locked up.
1769 * Returns true if the engine appears to be locked up, false if not.
1770 */
1771bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1772{
1773 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
1774
1775 if (!(reset_mask & (RADEON_RESET_GFX |
1776 RADEON_RESET_COMPUTE |
1777 RADEON_RESET_CP))) {
1778 radeon_ring_lockup_update(rdev, ring);
1779 return false;
1780 }
1781 return radeon_ring_test_lockup(rdev, ring);
1782}
1783
1784u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1785 u32 tiling_pipe_num,
1786 u32 max_rb_num,
1787 u32 total_max_rb_num,
1788 u32 disabled_rb_mask)
1789{
1790 u32 rendering_pipe_num, rb_num_width, req_rb_num;
1791 u32 pipe_rb_ratio, pipe_rb_remain, tmp;
1792 u32 data = 0, mask = 1 << (max_rb_num - 1);
1793 unsigned i, j;
1794
1795 /* mask out the RBs that don't exist on that asic */
1796 tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
1797 /* make sure at least one RB is available */
1798 if ((tmp & 0xff) != 0xff)
1799 disabled_rb_mask = tmp;
1800
1801 rendering_pipe_num = 1 << tiling_pipe_num;
1802 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1803 BUG_ON(rendering_pipe_num < req_rb_num);
1804
1805 pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1806 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1807
1808 if (rdev->family <= CHIP_RV740) {
1809 /* r6xx/r7xx */
1810 rb_num_width = 2;
1811 } else {
1812 /* eg+ */
1813 rb_num_width = 4;
1814 }
1815
1816 for (i = 0; i < max_rb_num; i++) {
1817 if (!(mask & disabled_rb_mask)) {
1818 for (j = 0; j < pipe_rb_ratio; j++) {
1819 data <<= rb_num_width;
1820 data |= max_rb_num - i - 1;
1821 }
1822 if (pipe_rb_remain) {
1823 data <<= rb_num_width;
1824 data |= max_rb_num - i - 1;
1825 pipe_rb_remain--;
1826 }
1827 }
1828 mask >>= 1;
1829 }
1830
1831 return data;
1832}
1833
1834int r600_count_pipe_bits(uint32_t val)
1835{
1836 return hweight32(val);
1837}
1838
1839static void r600_gpu_init(struct radeon_device *rdev)
1840{
1841 u32 tiling_config;
1842 u32 ramcfg;
1843 u32 cc_rb_backend_disable;
1844 u32 cc_gc_shader_pipe_config;
1845 u32 tmp;
1846 int i, j;
1847 u32 sq_config;
1848 u32 sq_gpr_resource_mgmt_1 = 0;
1849 u32 sq_gpr_resource_mgmt_2 = 0;
1850 u32 sq_thread_resource_mgmt = 0;
1851 u32 sq_stack_resource_mgmt_1 = 0;
1852 u32 sq_stack_resource_mgmt_2 = 0;
1853 u32 disabled_rb_mask;
1854
1855 rdev->config.r600.tiling_group_size = 256;
1856 switch (rdev->family) {
1857 case CHIP_R600:
1858 rdev->config.r600.max_pipes = 4;
1859 rdev->config.r600.max_tile_pipes = 8;
1860 rdev->config.r600.max_simds = 4;
1861 rdev->config.r600.max_backends = 4;
1862 rdev->config.r600.max_gprs = 256;
1863 rdev->config.r600.max_threads = 192;
1864 rdev->config.r600.max_stack_entries = 256;
1865 rdev->config.r600.max_hw_contexts = 8;
1866 rdev->config.r600.max_gs_threads = 16;
1867 rdev->config.r600.sx_max_export_size = 128;
1868 rdev->config.r600.sx_max_export_pos_size = 16;
1869 rdev->config.r600.sx_max_export_smx_size = 128;
1870 rdev->config.r600.sq_num_cf_insts = 2;
1871 break;
1872 case CHIP_RV630:
1873 case CHIP_RV635:
1874 rdev->config.r600.max_pipes = 2;
1875 rdev->config.r600.max_tile_pipes = 2;
1876 rdev->config.r600.max_simds = 3;
1877 rdev->config.r600.max_backends = 1;
1878 rdev->config.r600.max_gprs = 128;
1879 rdev->config.r600.max_threads = 192;
1880 rdev->config.r600.max_stack_entries = 128;
1881 rdev->config.r600.max_hw_contexts = 8;
1882 rdev->config.r600.max_gs_threads = 4;
1883 rdev->config.r600.sx_max_export_size = 128;
1884 rdev->config.r600.sx_max_export_pos_size = 16;
1885 rdev->config.r600.sx_max_export_smx_size = 128;
1886 rdev->config.r600.sq_num_cf_insts = 2;
1887 break;
1888 case CHIP_RV610:
1889 case CHIP_RV620:
1890 case CHIP_RS780:
1891 case CHIP_RS880:
1892 rdev->config.r600.max_pipes = 1;
1893 rdev->config.r600.max_tile_pipes = 1;
1894 rdev->config.r600.max_simds = 2;
1895 rdev->config.r600.max_backends = 1;
1896 rdev->config.r600.max_gprs = 128;
1897 rdev->config.r600.max_threads = 192;
1898 rdev->config.r600.max_stack_entries = 128;
1899 rdev->config.r600.max_hw_contexts = 4;
1900 rdev->config.r600.max_gs_threads = 4;
1901 rdev->config.r600.sx_max_export_size = 128;
1902 rdev->config.r600.sx_max_export_pos_size = 16;
1903 rdev->config.r600.sx_max_export_smx_size = 128;
1904 rdev->config.r600.sq_num_cf_insts = 1;
1905 break;
1906 case CHIP_RV670:
1907 rdev->config.r600.max_pipes = 4;
1908 rdev->config.r600.max_tile_pipes = 4;
1909 rdev->config.r600.max_simds = 4;
1910 rdev->config.r600.max_backends = 4;
1911 rdev->config.r600.max_gprs = 192;
1912 rdev->config.r600.max_threads = 192;
1913 rdev->config.r600.max_stack_entries = 256;
1914 rdev->config.r600.max_hw_contexts = 8;
1915 rdev->config.r600.max_gs_threads = 16;
1916 rdev->config.r600.sx_max_export_size = 128;
1917 rdev->config.r600.sx_max_export_pos_size = 16;
1918 rdev->config.r600.sx_max_export_smx_size = 128;
1919 rdev->config.r600.sq_num_cf_insts = 2;
1920 break;
1921 default:
1922 break;
1923 }
1924
1925 /* Initialize HDP */
1926 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1927 WREG32((0x2c14 + j), 0x00000000);
1928 WREG32((0x2c18 + j), 0x00000000);
1929 WREG32((0x2c1c + j), 0x00000000);
1930 WREG32((0x2c20 + j), 0x00000000);
1931 WREG32((0x2c24 + j), 0x00000000);
1932 }
1933
1934 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1935
1936 /* Setup tiling */
1937 tiling_config = 0;
1938 ramcfg = RREG32(RAMCFG);
1939 switch (rdev->config.r600.max_tile_pipes) {
1940 case 1:
1941 tiling_config |= PIPE_TILING(0);
1942 break;
1943 case 2:
1944 tiling_config |= PIPE_TILING(1);
1945 break;
1946 case 4:
1947 tiling_config |= PIPE_TILING(2);
1948 break;
1949 case 8:
1950 tiling_config |= PIPE_TILING(3);
1951 break;
1952 default:
1953 break;
1954 }
1955 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1956 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1957 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1958 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1959
1960 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1961 if (tmp > 3) {
1962 tiling_config |= ROW_TILING(3);
1963 tiling_config |= SAMPLE_SPLIT(3);
1964 } else {
1965 tiling_config |= ROW_TILING(tmp);
1966 tiling_config |= SAMPLE_SPLIT(tmp);
1967 }
1968 tiling_config |= BANK_SWAPS(1);
1969
1970 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1971 tmp = R6XX_MAX_BACKENDS -
1972 r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
1973 if (tmp < rdev->config.r600.max_backends) {
1974 rdev->config.r600.max_backends = tmp;
1975 }
1976
1977 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
1978 tmp = R6XX_MAX_PIPES -
1979 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
1980 if (tmp < rdev->config.r600.max_pipes) {
1981 rdev->config.r600.max_pipes = tmp;
1982 }
1983 tmp = R6XX_MAX_SIMDS -
1984 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1985 if (tmp < rdev->config.r600.max_simds) {
1986 rdev->config.r600.max_simds = tmp;
1987 }
1988
1989 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
1990 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1991 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
1992 R6XX_MAX_BACKENDS, disabled_rb_mask);
1993 tiling_config |= tmp << 16;
1994 rdev->config.r600.backend_map = tmp;
1995
1996 rdev->config.r600.tile_config = tiling_config;
1997 WREG32(GB_TILING_CONFIG, tiling_config);
1998 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1999 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
2000 WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
2001
2002 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
2003 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
2004 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
2005
2006 /* Setup some CP states */
2007 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
2008 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
2009
2010 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
2011 SYNC_WALKER | SYNC_ALIGNER));
2012 /* Setup various GPU states */
2013 if (rdev->family == CHIP_RV670)
2014 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
2015
2016 tmp = RREG32(SX_DEBUG_1);
2017 tmp |= SMX_EVENT_RELEASE;
2018 if ((rdev->family > CHIP_R600))
2019 tmp |= ENABLE_NEW_SMX_ADDRESS;
2020 WREG32(SX_DEBUG_1, tmp);
2021
2022 if (((rdev->family) == CHIP_R600) ||
2023 ((rdev->family) == CHIP_RV630) ||
2024 ((rdev->family) == CHIP_RV610) ||
2025 ((rdev->family) == CHIP_RV620) ||
2026 ((rdev->family) == CHIP_RS780) ||
2027 ((rdev->family) == CHIP_RS880)) {
2028 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
2029 } else {
2030 WREG32(DB_DEBUG, 0);
2031 }
2032 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
2033 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
2034
2035 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2036 WREG32(VGT_NUM_INSTANCES, 0);
2037
2038 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
2039 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
2040
2041 tmp = RREG32(SQ_MS_FIFO_SIZES);
2042 if (((rdev->family) == CHIP_RV610) ||
2043 ((rdev->family) == CHIP_RV620) ||
2044 ((rdev->family) == CHIP_RS780) ||
2045 ((rdev->family) == CHIP_RS880)) {
2046 tmp = (CACHE_FIFO_SIZE(0xa) |
2047 FETCH_FIFO_HIWATER(0xa) |
2048 DONE_FIFO_HIWATER(0xe0) |
2049 ALU_UPDATE_FIFO_HIWATER(0x8));
2050 } else if (((rdev->family) == CHIP_R600) ||
2051 ((rdev->family) == CHIP_RV630)) {
2052 tmp &= ~DONE_FIFO_HIWATER(0xff);
2053 tmp |= DONE_FIFO_HIWATER(0x4);
2054 }
2055 WREG32(SQ_MS_FIFO_SIZES, tmp);
2056
2057 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
2058 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
2059 */
2060 sq_config = RREG32(SQ_CONFIG);
2061 sq_config &= ~(PS_PRIO(3) |
2062 VS_PRIO(3) |
2063 GS_PRIO(3) |
2064 ES_PRIO(3));
2065 sq_config |= (DX9_CONSTS |
2066 VC_ENABLE |
2067 PS_PRIO(0) |
2068 VS_PRIO(1) |
2069 GS_PRIO(2) |
2070 ES_PRIO(3));
2071
2072 if ((rdev->family) == CHIP_R600) {
2073 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
2074 NUM_VS_GPRS(124) |
2075 NUM_CLAUSE_TEMP_GPRS(4));
2076 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
2077 NUM_ES_GPRS(0));
2078 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
2079 NUM_VS_THREADS(48) |
2080 NUM_GS_THREADS(4) |
2081 NUM_ES_THREADS(4));
2082 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
2083 NUM_VS_STACK_ENTRIES(128));
2084 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
2085 NUM_ES_STACK_ENTRIES(0));
2086 } else if (((rdev->family) == CHIP_RV610) ||
2087 ((rdev->family) == CHIP_RV620) ||
2088 ((rdev->family) == CHIP_RS780) ||
2089 ((rdev->family) == CHIP_RS880)) {
2090 /* no vertex cache */
2091 sq_config &= ~VC_ENABLE;
2092
2093 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2094 NUM_VS_GPRS(44) |
2095 NUM_CLAUSE_TEMP_GPRS(2));
2096 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2097 NUM_ES_GPRS(17));
2098 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2099 NUM_VS_THREADS(78) |
2100 NUM_GS_THREADS(4) |
2101 NUM_ES_THREADS(31));
2102 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2103 NUM_VS_STACK_ENTRIES(40));
2104 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2105 NUM_ES_STACK_ENTRIES(16));
2106 } else if (((rdev->family) == CHIP_RV630) ||
2107 ((rdev->family) == CHIP_RV635)) {
2108 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2109 NUM_VS_GPRS(44) |
2110 NUM_CLAUSE_TEMP_GPRS(2));
2111 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
2112 NUM_ES_GPRS(18));
2113 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2114 NUM_VS_THREADS(78) |
2115 NUM_GS_THREADS(4) |
2116 NUM_ES_THREADS(31));
2117 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2118 NUM_VS_STACK_ENTRIES(40));
2119 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2120 NUM_ES_STACK_ENTRIES(16));
2121 } else if ((rdev->family) == CHIP_RV670) {
2122 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2123 NUM_VS_GPRS(44) |
2124 NUM_CLAUSE_TEMP_GPRS(2));
2125 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2126 NUM_ES_GPRS(17));
2127 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2128 NUM_VS_THREADS(78) |
2129 NUM_GS_THREADS(4) |
2130 NUM_ES_THREADS(31));
2131 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
2132 NUM_VS_STACK_ENTRIES(64));
2133 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
2134 NUM_ES_STACK_ENTRIES(64));
2135 }
2136
2137 WREG32(SQ_CONFIG, sq_config);
2138 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2139 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2140 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2141 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2142 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2143
2144 if (((rdev->family) == CHIP_RV610) ||
2145 ((rdev->family) == CHIP_RV620) ||
2146 ((rdev->family) == CHIP_RS780) ||
2147 ((rdev->family) == CHIP_RS880)) {
2148 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
2149 } else {
2150 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
2151 }
2152
2153 /* More default values. 2D/3D driver should adjust as needed */
2154 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
2155 S1_X(0x4) | S1_Y(0xc)));
2156 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
2157 S1_X(0x2) | S1_Y(0x2) |
2158 S2_X(0xa) | S2_Y(0x6) |
2159 S3_X(0x6) | S3_Y(0xa)));
2160 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
2161 S1_X(0x4) | S1_Y(0xc) |
2162 S2_X(0x1) | S2_Y(0x6) |
2163 S3_X(0xa) | S3_Y(0xe)));
2164 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
2165 S5_X(0x0) | S5_Y(0x0) |
2166 S6_X(0xb) | S6_Y(0x4) |
2167 S7_X(0x7) | S7_Y(0x8)));
2168
2169 WREG32(VGT_STRMOUT_EN, 0);
2170 tmp = rdev->config.r600.max_pipes * 16;
2171 switch (rdev->family) {
2172 case CHIP_RV610:
2173 case CHIP_RV620:
2174 case CHIP_RS780:
2175 case CHIP_RS880:
2176 tmp += 32;
2177 break;
2178 case CHIP_RV670:
2179 tmp += 128;
2180 break;
2181 default:
2182 break;
2183 }
2184 if (tmp > 256) {
2185 tmp = 256;
2186 }
2187 WREG32(VGT_ES_PER_GS, 128);
2188 WREG32(VGT_GS_PER_ES, tmp);
2189 WREG32(VGT_GS_PER_VS, 2);
2190 WREG32(VGT_GS_VERTEX_REUSE, 16);
2191
2192 /* more default values. 2D/3D driver should adjust as needed */
2193 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2194 WREG32(VGT_STRMOUT_EN, 0);
2195 WREG32(SX_MISC, 0);
2196 WREG32(PA_SC_MODE_CNTL, 0);
2197 WREG32(PA_SC_AA_CONFIG, 0);
2198 WREG32(PA_SC_LINE_STIPPLE, 0);
2199 WREG32(SPI_INPUT_Z, 0);
2200 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
2201 WREG32(CB_COLOR7_FRAG, 0);
2202
2203 /* Clear render buffer base addresses */
2204 WREG32(CB_COLOR0_BASE, 0);
2205 WREG32(CB_COLOR1_BASE, 0);
2206 WREG32(CB_COLOR2_BASE, 0);
2207 WREG32(CB_COLOR3_BASE, 0);
2208 WREG32(CB_COLOR4_BASE, 0);
2209 WREG32(CB_COLOR5_BASE, 0);
2210 WREG32(CB_COLOR6_BASE, 0);
2211 WREG32(CB_COLOR7_BASE, 0);
2212 WREG32(CB_COLOR7_FRAG, 0);
2213
2214 switch (rdev->family) {
2215 case CHIP_RV610:
2216 case CHIP_RV620:
2217 case CHIP_RS780:
2218 case CHIP_RS880:
2219 tmp = TC_L2_SIZE(8);
2220 break;
2221 case CHIP_RV630:
2222 case CHIP_RV635:
2223 tmp = TC_L2_SIZE(4);
2224 break;
2225 case CHIP_R600:
2226 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
2227 break;
2228 default:
2229 tmp = TC_L2_SIZE(0);
2230 break;
2231 }
2232 WREG32(TC_CNTL, tmp);
2233
2234 tmp = RREG32(HDP_HOST_PATH_CNTL);
2235 WREG32(HDP_HOST_PATH_CNTL, tmp);
2236
2237 tmp = RREG32(ARB_POP);
2238 tmp |= ENABLE_TC128;
2239 WREG32(ARB_POP, tmp);
2240
2241 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2242 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
2243 NUM_CLIP_SEQ(3)));
2244 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
2245 WREG32(VC_ENHANCE, 0);
2246}
2247
2248
2249/*
2250 * Indirect registers accessor
2251 */
2252u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
2253{
2254 unsigned long flags;
2255 u32 r;
2256
2257 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2258 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2259 (void)RREG32(PCIE_PORT_INDEX);
2260 r = RREG32(PCIE_PORT_DATA);
2261 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2262 return r;
2263}
2264
2265void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2266{
2267 unsigned long flags;
2268
2269 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2270 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2271 (void)RREG32(PCIE_PORT_INDEX);
2272 WREG32(PCIE_PORT_DATA, (v));
2273 (void)RREG32(PCIE_PORT_DATA);
2274 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2275}
2276
2277/*
2278 * CP & Ring
2279 */
2280void r600_cp_stop(struct radeon_device *rdev)
2281{
2282 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
2283 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2284 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
2285 WREG32(SCRATCH_UMSK, 0);
2286 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2287}
2288
2289int r600_init_microcode(struct radeon_device *rdev)
2290{
2291 const char *chip_name;
2292 const char *rlc_chip_name;
2293 const char *smc_chip_name = "RV770";
2294 size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
2295 char fw_name[30];
2296 int err;
2297
2298 DRM_DEBUG("\n");
2299
2300 switch (rdev->family) {
2301 case CHIP_R600:
2302 chip_name = "R600";
2303 rlc_chip_name = "R600";
2304 break;
2305 case CHIP_RV610:
2306 chip_name = "RV610";
2307 rlc_chip_name = "R600";
2308 break;
2309 case CHIP_RV630:
2310 chip_name = "RV630";
2311 rlc_chip_name = "R600";
2312 break;
2313 case CHIP_RV620:
2314 chip_name = "RV620";
2315 rlc_chip_name = "R600";
2316 break;
2317 case CHIP_RV635:
2318 chip_name = "RV635";
2319 rlc_chip_name = "R600";
2320 break;
2321 case CHIP_RV670:
2322 chip_name = "RV670";
2323 rlc_chip_name = "R600";
2324 break;
2325 case CHIP_RS780:
2326 case CHIP_RS880:
2327 chip_name = "RS780";
2328 rlc_chip_name = "R600";
2329 break;
2330 case CHIP_RV770:
2331 chip_name = "RV770";
2332 rlc_chip_name = "R700";
2333 smc_chip_name = "RV770";
2334#ifdef __NetBSD__ /* XXX ALIGN means something else. */
2335 smc_req_size = round_up(RV770_SMC_UCODE_SIZE, 4);
2336#else
2337 smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
2338#endif
2339 break;
2340 case CHIP_RV730:
2341 chip_name = "RV730";
2342 rlc_chip_name = "R700";
2343 smc_chip_name = "RV730";
2344#ifdef __NetBSD__ /* XXX ALIGN means something else. */
2345 smc_req_size = round_up(RV730_SMC_UCODE_SIZE, 4);
2346#else
2347 smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
2348#endif
2349 break;
2350 case CHIP_RV710:
2351 chip_name = "RV710";
2352 rlc_chip_name = "R700";
2353 smc_chip_name = "RV710";
2354#ifdef __NetBSD__ /* XXX ALIGN means something else. */
2355 smc_req_size = round_up(RV710_SMC_UCODE_SIZE, 4);
2356#else
2357 smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
2358#endif
2359 break;
2360 case CHIP_RV740:
2361 chip_name = "RV730";
2362 rlc_chip_name = "R700";
2363 smc_chip_name = "RV740";
2364#ifdef __NetBSD__ /* XXX ALIGN means something else. */
2365 smc_req_size = round_up(RV740_SMC_UCODE_SIZE, 4);
2366#else
2367 smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
2368#endif
2369 break;
2370 case CHIP_CEDAR:
2371 chip_name = "CEDAR";
2372 rlc_chip_name = "CEDAR";
2373 smc_chip_name = "CEDAR";
2374#ifdef __NetBSD__ /* XXX ALIGN means something else. */
2375 smc_req_size = round_up(CEDAR_SMC_UCODE_SIZE, 4);
2376#else
2377 smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
2378#endif
2379 break;
2380 case CHIP_REDWOOD:
2381 chip_name = "REDWOOD";
2382 rlc_chip_name = "REDWOOD";
2383 smc_chip_name = "REDWOOD";
2384#ifdef __NetBSD__ /* XXX ALIGN means something else. */
2385 smc_req_size = round_up(REDWOOD_SMC_UCODE_SIZE, 4);
2386#else
2387 smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
2388#endif
2389 break;
2390 case CHIP_JUNIPER:
2391 chip_name = "JUNIPER";
2392 rlc_chip_name = "JUNIPER";
2393 smc_chip_name = "JUNIPER";
2394#ifdef __NetBSD__ /* XXX ALIGN means something else. */
2395 smc_req_size = round_up(JUNIPER_SMC_UCODE_SIZE, 4);
2396#else
2397 smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
2398#endif
2399 break;
2400 case CHIP_CYPRESS:
2401 case CHIP_HEMLOCK:
2402 chip_name = "CYPRESS";
2403 rlc_chip_name = "CYPRESS";
2404 smc_chip_name = "CYPRESS";
2405#ifdef __NetBSD__ /* XXX ALIGN means something else. */
2406 smc_req_size = round_up(CYPRESS_SMC_UCODE_SIZE, 4);
2407#else
2408 smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
2409#endif
2410 break;
2411 case CHIP_PALM:
2412 chip_name = "PALM";
2413 rlc_chip_name = "SUMO";
2414 break;
2415 case CHIP_SUMO:
2416 chip_name = "SUMO";
2417 rlc_chip_name = "SUMO";
2418 break;
2419 case CHIP_SUMO2:
2420 chip_name = "SUMO2";
2421 rlc_chip_name = "SUMO";
2422 break;
2423 default: BUG();
2424 }
2425
2426 if (rdev->family >= CHIP_CEDAR) {
2427 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2428 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2429 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2430 } else if (rdev->family >= CHIP_RV770) {
2431 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2432 me_req_size = R700_PM4_UCODE_SIZE * 4;
2433 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2434 } else {
2435 pfp_req_size = R600_PFP_UCODE_SIZE * 4;
2436 me_req_size = R600_PM4_UCODE_SIZE * 12;
2437 rlc_req_size = R600_RLC_UCODE_SIZE * 4;
2438 }
2439
2440 DRM_INFO("Loading %s Microcode\n", chip_name);
2441
2442 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2443 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
2444 if (err)
2445 goto out;
2446 if (rdev->pfp_fw->size != pfp_req_size) {
2447 printk(KERN_ERR
2448 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2449 rdev->pfp_fw->size, fw_name);
2450 err = -EINVAL;
2451 goto out;
2452 }
2453
2454 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2455 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
2456 if (err)
2457 goto out;
2458 if (rdev->me_fw->size != me_req_size) {
2459 printk(KERN_ERR
2460 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2461 rdev->me_fw->size, fw_name);
2462 err = -EINVAL;
2463 }
2464
2465 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2466 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
2467 if (err)
2468 goto out;
2469 if (rdev->rlc_fw->size != rlc_req_size) {
2470 printk(KERN_ERR
2471 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2472 rdev->rlc_fw->size, fw_name);
2473 err = -EINVAL;
2474 }
2475
2476 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
2477 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
2478 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2479 if (err) {
2480 printk(KERN_ERR
2481 "smc: error loading firmware \"%s\"\n",
2482 fw_name);
2483 release_firmware(rdev->smc_fw);
2484 rdev->smc_fw = NULL;
2485 err = 0;
2486 } else if (rdev->smc_fw->size != smc_req_size) {
2487 printk(KERN_ERR
2488 "smc: Bogus length %zu in firmware \"%s\"\n",
2489 rdev->smc_fw->size, fw_name);
2490 err = -EINVAL;
2491 }
2492 }
2493
2494out:
2495 if (err) {
2496 if (err != -EINVAL)
2497 printk(KERN_ERR
2498 "r600_cp: Failed to load firmware \"%s\"\n",
2499 fw_name);
2500 release_firmware(rdev->pfp_fw);
2501 rdev->pfp_fw = NULL;
2502 release_firmware(rdev->me_fw);
2503 rdev->me_fw = NULL;
2504 release_firmware(rdev->rlc_fw);
2505 rdev->rlc_fw = NULL;
2506 release_firmware(rdev->smc_fw);
2507 rdev->smc_fw = NULL;
2508 }
2509 return err;
2510}
2511
2512u32 r600_gfx_get_rptr(struct radeon_device *rdev,
2513 struct radeon_ring *ring)
2514{
2515 u32 rptr;
2516
2517 if (rdev->wb.enabled)
2518 rptr = rdev->wb.wb[ring->rptr_offs/4];
2519 else
2520 rptr = RREG32(R600_CP_RB_RPTR);
2521
2522 return rptr;
2523}
2524
2525u32 r600_gfx_get_wptr(struct radeon_device *rdev,
2526 struct radeon_ring *ring)
2527{
2528 u32 wptr;
2529
2530 wptr = RREG32(R600_CP_RB_WPTR);
2531
2532 return wptr;
2533}
2534
2535void r600_gfx_set_wptr(struct radeon_device *rdev,
2536 struct radeon_ring *ring)
2537{
2538 WREG32(R600_CP_RB_WPTR, ring->wptr);
2539 (void)RREG32(R600_CP_RB_WPTR);
2540}
2541
2542static int r600_cp_load_microcode(struct radeon_device *rdev)
2543{
2544 const __be32 *fw_data;
2545 int i;
2546
2547 if (!rdev->me_fw || !rdev->pfp_fw)
2548 return -EINVAL;
2549
2550 r600_cp_stop(rdev);
2551
2552 WREG32(CP_RB_CNTL,
2553#ifdef __BIG_ENDIAN
2554 BUF_SWAP_32BIT |
2555#endif
2556 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2557
2558 /* Reset cp */
2559 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2560 RREG32(GRBM_SOFT_RESET);
2561 mdelay(15);
2562 WREG32(GRBM_SOFT_RESET, 0);
2563
2564 WREG32(CP_ME_RAM_WADDR, 0);
2565
2566 fw_data = (const __be32 *)rdev->me_fw->data;
2567 WREG32(CP_ME_RAM_WADDR, 0);
2568 for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
2569 WREG32(CP_ME_RAM_DATA,
2570 be32_to_cpup(fw_data++));
2571
2572 fw_data = (const __be32 *)rdev->pfp_fw->data;
2573 WREG32(CP_PFP_UCODE_ADDR, 0);
2574 for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
2575 WREG32(CP_PFP_UCODE_DATA,
2576 be32_to_cpup(fw_data++));
2577
2578 WREG32(CP_PFP_UCODE_ADDR, 0);
2579 WREG32(CP_ME_RAM_WADDR, 0);
2580 WREG32(CP_ME_RAM_RADDR, 0);
2581 return 0;
2582}
2583
2584int r600_cp_start(struct radeon_device *rdev)
2585{
2586 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2587 int r;
2588 uint32_t cp_me;
2589
2590 r = radeon_ring_lock(rdev, ring, 7);
2591 if (r) {
2592 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2593 return r;
2594 }
2595 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2596 radeon_ring_write(ring, 0x1);
2597 if (rdev->family >= CHIP_RV770) {
2598 radeon_ring_write(ring, 0x0);
2599 radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
2600 } else {
2601 radeon_ring_write(ring, 0x3);
2602 radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
2603 }
2604 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2605 radeon_ring_write(ring, 0);
2606 radeon_ring_write(ring, 0);
2607 radeon_ring_unlock_commit(rdev, ring);
2608
2609 cp_me = 0xff;
2610 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2611 return 0;
2612}
2613
2614int r600_cp_resume(struct radeon_device *rdev)
2615{
2616 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2617 u32 tmp;
2618 u32 rb_bufsz;
2619 int r;
2620
2621 /* Reset cp */
2622 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2623 RREG32(GRBM_SOFT_RESET);
2624 mdelay(15);
2625 WREG32(GRBM_SOFT_RESET, 0);
2626
2627 /* Set ring buffer size */
2628 rb_bufsz = order_base_2(ring->ring_size / 8);
2629 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2630#ifdef __BIG_ENDIAN
2631 tmp |= BUF_SWAP_32BIT;
2632#endif
2633 WREG32(CP_RB_CNTL, tmp);
2634 WREG32(CP_SEM_WAIT_TIMER, 0x0);
2635
2636 /* Set the write pointer delay */
2637 WREG32(CP_RB_WPTR_DELAY, 0);
2638
2639 /* Initialize the ring buffer's read and write pointers */
2640 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2641 WREG32(CP_RB_RPTR_WR, 0);
2642 ring->wptr = 0;
2643 WREG32(CP_RB_WPTR, ring->wptr);
2644
2645 /* set the wb address whether it's enabled or not */
2646 WREG32(CP_RB_RPTR_ADDR,
2647 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2648 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2649 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2650
2651 if (rdev->wb.enabled)
2652 WREG32(SCRATCH_UMSK, 0xff);
2653 else {
2654 tmp |= RB_NO_UPDATE;
2655 WREG32(SCRATCH_UMSK, 0);
2656 }
2657
2658 mdelay(1);
2659 WREG32(CP_RB_CNTL, tmp);
2660
2661 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2662 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2663
2664 r600_cp_start(rdev);
2665 ring->ready = true;
2666 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
2667 if (r) {
2668 ring->ready = false;
2669 return r;
2670 }
2671
2672 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
2673 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
2674
2675 return 0;
2676}
2677
2678void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
2679{
2680 u32 rb_bufsz;
2681 int r;
2682
2683 /* Align ring size */
2684 rb_bufsz = order_base_2(ring_size / 8);
2685 ring_size = (1 << (rb_bufsz + 1)) * 4;
2686 ring->ring_size = ring_size;
2687 ring->align_mask = 16 - 1;
2688
2689 if (radeon_ring_supports_scratch_reg(rdev, ring)) {
2690 r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
2691 if (r) {
2692 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
2693 ring->rptr_save_reg = 0;
2694 }
2695 }
2696}
2697
2698void r600_cp_fini(struct radeon_device *rdev)
2699{
2700 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2701 r600_cp_stop(rdev);
2702 radeon_ring_fini(rdev, ring);
2703 radeon_scratch_free(rdev, ring->rptr_save_reg);
2704}
2705
2706/*
2707 * GPU scratch registers helpers function.
2708 */
2709void r600_scratch_init(struct radeon_device *rdev)
2710{
2711 int i;
2712
2713 rdev->scratch.num_reg = 7;
2714 rdev->scratch.reg_base = SCRATCH_REG0;
2715 for (i = 0; i < rdev->scratch.num_reg; i++) {
2716 rdev->scratch.free[i] = true;
2717 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2718 }
2719}
2720
2721int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2722{
2723 uint32_t scratch;
2724 uint32_t tmp = 0;
2725 unsigned i;
2726 int r;
2727
2728 r = radeon_scratch_get(rdev, &scratch);
2729 if (r) {
2730 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2731 return r;
2732 }
2733 WREG32(scratch, 0xCAFEDEAD);
2734 r = radeon_ring_lock(rdev, ring, 3);
2735 if (r) {
2736 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
2737 radeon_scratch_free(rdev, scratch);
2738 return r;
2739 }
2740 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2741 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2742 radeon_ring_write(ring, 0xDEADBEEF);
2743 radeon_ring_unlock_commit(rdev, ring);
2744 for (i = 0; i < rdev->usec_timeout; i++) {
2745 tmp = RREG32(scratch);
2746 if (tmp == 0xDEADBEEF)
2747 break;
2748 DRM_UDELAY(1);
2749 }
2750 if (i < rdev->usec_timeout) {
2751 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2752 } else {
2753 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2754 ring->idx, scratch, tmp);
2755 r = -EINVAL;
2756 }
2757 radeon_scratch_free(rdev, scratch);
2758 return r;
2759}
2760
2761/*
2762 * CP fences/semaphores
2763 */
2764
2765void r600_fence_ring_emit(struct radeon_device *rdev,
2766 struct radeon_fence *fence)
2767{
2768 struct radeon_ring *ring = &rdev->ring[fence->ring];
2769 u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
2770 PACKET3_SH_ACTION_ENA;
2771
2772 if (rdev->family >= CHIP_RV770)
2773 cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
2774
2775 if (rdev->wb.use_event) {
2776 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2777 /* flush read cache over gart */
2778 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2779 radeon_ring_write(ring, cp_coher_cntl);
2780 radeon_ring_write(ring, 0xFFFFFFFF);
2781 radeon_ring_write(ring, 0);
2782 radeon_ring_write(ring, 10); /* poll interval */
2783 /* EVENT_WRITE_EOP - flush caches, send int */
2784 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2785 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2786 radeon_ring_write(ring, addr & 0xffffffff);
2787 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2788 radeon_ring_write(ring, fence->seq);
2789 radeon_ring_write(ring, 0);
2790 } else {
2791 /* flush read cache over gart */
2792 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2793 radeon_ring_write(ring, cp_coher_cntl);
2794 radeon_ring_write(ring, 0xFFFFFFFF);
2795 radeon_ring_write(ring, 0);
2796 radeon_ring_write(ring, 10); /* poll interval */
2797 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2798 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2799 /* wait for 3D idle clean */
2800 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2801 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2802 radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2803 /* Emit fence sequence & fire IRQ */
2804 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2805 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2806 radeon_ring_write(ring, fence->seq);
2807 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2808 radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2809 radeon_ring_write(ring, RB_INT_STAT);
2810 }
2811}
2812
2813bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2814 struct radeon_ring *ring,
2815 struct radeon_semaphore *semaphore,
2816 bool emit_wait)
2817{
2818 uint64_t addr = semaphore->gpu_addr;
2819 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2820
2821 if (rdev->family < CHIP_CAYMAN)
2822 sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2823
2824 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2825 radeon_ring_write(ring, addr & 0xffffffff);
2826 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2827
2828 return true;
2829}
2830
2831/**
2832 * r600_copy_cpdma - copy pages using the CP DMA engine
2833 *
2834 * @rdev: radeon_device pointer
2835 * @src_offset: src GPU address
2836 * @dst_offset: dst GPU address
2837 * @num_gpu_pages: number of GPU pages to xfer
2838 * @fence: radeon fence object
2839 *
2840 * Copy GPU paging using the CP DMA engine (r6xx+).
2841 * Used by the radeon ttm implementation to move pages if
2842 * registered as the asic copy callback.
2843 */
2844int r600_copy_cpdma(struct radeon_device *rdev,
2845 uint64_t src_offset, uint64_t dst_offset,
2846 unsigned num_gpu_pages,
2847 struct radeon_fence **fence)
2848{
2849 struct radeon_semaphore *sem = NULL;
2850 int ring_index = rdev->asic->copy.blit_ring_index;
2851 struct radeon_ring *ring = &rdev->ring[ring_index];
2852 u32 size_in_bytes, cur_size_in_bytes, tmp;
2853 int i, num_loops;
2854 int r = 0;
2855
2856 r = radeon_semaphore_create(rdev, &sem);
2857 if (r) {
2858 DRM_ERROR("radeon: moving bo (%d).\n", r);
2859 return r;
2860 }
2861
2862 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
2863 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
2864 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
2865 if (r) {
2866 DRM_ERROR("radeon: moving bo (%d).\n", r);
2867 radeon_semaphore_free(rdev, &sem, NULL);
2868 return r;
2869 }
2870
2871 radeon_semaphore_sync_to(sem, *fence);
2872 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
2873
2874 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2875 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2876 radeon_ring_write(ring, WAIT_3D_IDLE_bit);
2877 for (i = 0; i < num_loops; i++) {
2878 cur_size_in_bytes = size_in_bytes;
2879 if (cur_size_in_bytes > 0x1fffff)
2880 cur_size_in_bytes = 0x1fffff;
2881 size_in_bytes -= cur_size_in_bytes;
2882 tmp = upper_32_bits(src_offset) & 0xff;
2883 if (size_in_bytes == 0)
2884 tmp |= PACKET3_CP_DMA_CP_SYNC;
2885 radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
2886 radeon_ring_write(ring, src_offset & 0xffffffff);
2887 radeon_ring_write(ring, tmp);
2888 radeon_ring_write(ring, dst_offset & 0xffffffff);
2889 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
2890 radeon_ring_write(ring, cur_size_in_bytes);
2891 src_offset += cur_size_in_bytes;
2892 dst_offset += cur_size_in_bytes;
2893 }
2894 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2895 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2896 radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
2897
2898 r = radeon_fence_emit(rdev, fence, ring->idx);
2899 if (r) {
2900 radeon_ring_unlock_undo(rdev, ring);
2901 radeon_semaphore_free(rdev, &sem, NULL);
2902 return r;
2903 }
2904
2905 radeon_ring_unlock_commit(rdev, ring);
2906 radeon_semaphore_free(rdev, &sem, *fence);
2907
2908 return r;
2909}
2910
2911int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2912 uint32_t tiling_flags, uint32_t pitch,
2913 uint32_t offset, uint32_t obj_size)
2914{
2915 /* FIXME: implement */
2916 return 0;
2917}
2918
2919void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2920{
2921 /* FIXME: implement */
2922}
2923
2924static int r600_startup(struct radeon_device *rdev)
2925{
2926 struct radeon_ring *ring;
2927 int r;
2928
2929 /* enable pcie gen2 link */
2930 r600_pcie_gen2_enable(rdev);
2931
2932 /* scratch needs to be initialized before MC */
2933 r = r600_vram_scratch_init(rdev);
2934 if (r)
2935 return r;
2936
2937 r600_mc_program(rdev);
2938
2939 if (rdev->flags & RADEON_IS_AGP) {
2940 r600_agp_enable(rdev);
2941 } else {
2942 r = r600_pcie_gart_enable(rdev);
2943 if (r)
2944 return r;
2945 }
2946 r600_gpu_init(rdev);
2947
2948 /* allocate wb buffer */
2949 r = radeon_wb_init(rdev);
2950 if (r)
2951 return r;
2952
2953 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
2954 if (r) {
2955 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2956 return r;
2957 }
2958
2959 /* Enable IRQ */
2960 if (!rdev->irq.installed) {
2961 r = radeon_irq_kms_init(rdev);
2962 if (r)
2963 return r;
2964 }
2965
2966 r = r600_irq_init(rdev);
2967 if (r) {
2968 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2969 radeon_irq_kms_fini(rdev);
2970 return r;
2971 }
2972 r600_irq_set(rdev);
2973
2974 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2975 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2976 RADEON_CP_PACKET2);
2977 if (r)
2978 return r;
2979
2980 r = r600_cp_load_microcode(rdev);
2981 if (r)
2982 return r;
2983 r = r600_cp_resume(rdev);
2984 if (r)
2985 return r;
2986
2987 r = radeon_ib_pool_init(rdev);
2988 if (r) {
2989 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2990 return r;
2991 }
2992
2993 r = r600_audio_init(rdev);
2994 if (r) {
2995 DRM_ERROR("radeon: audio init failed\n");
2996 return r;
2997 }
2998
2999 return 0;
3000}
3001
3002void r600_vga_set_state(struct radeon_device *rdev, bool state)
3003{
3004 uint32_t temp;
3005
3006 temp = RREG32(CONFIG_CNTL);
3007 if (state == false) {
3008 temp &= ~(1<<0);
3009 temp |= (1<<1);
3010 } else {
3011 temp &= ~(1<<1);
3012 }
3013 WREG32(CONFIG_CNTL, temp);
3014}
3015
3016int r600_resume(struct radeon_device *rdev)
3017{
3018 int r;
3019
3020 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
3021 * posting will perform necessary task to bring back GPU into good
3022 * shape.
3023 */
3024 /* post card */
3025 atom_asic_init(rdev->mode_info.atom_context);
3026
3027 if (rdev->pm.pm_method == PM_METHOD_DPM)
3028 radeon_pm_resume(rdev);
3029
3030 rdev->accel_working = true;
3031 r = r600_startup(rdev);
3032 if (r) {
3033 DRM_ERROR("r600 startup failed on resume\n");
3034 rdev->accel_working = false;
3035 return r;
3036 }
3037
3038 return r;
3039}
3040
3041int r600_suspend(struct radeon_device *rdev)
3042{
3043 radeon_pm_suspend(rdev);
3044 r600_audio_fini(rdev);
3045 r600_cp_stop(rdev);
3046 r600_irq_suspend(rdev);
3047 radeon_wb_disable(rdev);
3048 r600_pcie_gart_disable(rdev);
3049
3050 return 0;
3051}
3052
3053/* Plan is to move initialization in that function and use
3054 * helper function so that radeon_device_init pretty much
3055 * do nothing more than calling asic specific function. This
3056 * should also allow to remove a bunch of callback function
3057 * like vram_info.
3058 */
3059int r600_init(struct radeon_device *rdev)
3060{
3061 int r;
3062
3063 if (r600_debugfs_mc_info_init(rdev)) {
3064 DRM_ERROR("Failed to register debugfs file for mc !\n");
3065 }
3066 /* Read BIOS */
3067 if (!radeon_get_bios(rdev)) {
3068 if (ASIC_IS_AVIVO(rdev))
3069 return -EINVAL;
3070 }
3071 /* Must be an ATOMBIOS */
3072 if (!rdev->is_atom_bios) {
3073 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
3074 return -EINVAL;
3075 }
3076 r = radeon_atombios_init(rdev);
3077 if (r)
3078 return r;
3079 /* Post card if necessary */
3080 if (!radeon_card_posted(rdev)) {
3081 if (!rdev->bios) {
3082 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3083 return -EINVAL;
3084 }
3085 DRM_INFO("GPU not posted. posting now...\n");
3086 atom_asic_init(rdev->mode_info.atom_context);
3087 }
3088 /* Initialize scratch registers */
3089 r600_scratch_init(rdev);
3090 /* Initialize surface registers */
3091 radeon_surface_init(rdev);
3092 /* Initialize clocks */
3093 radeon_get_clock_info(rdev->ddev);
3094 /* Fence driver */
3095 r = radeon_fence_driver_init(rdev);
3096 if (r)
3097 return r;
3098 if (rdev->flags & RADEON_IS_AGP) {
3099 r = radeon_agp_init(rdev);
3100 if (r)
3101 radeon_agp_disable(rdev);
3102 }
3103 r = r600_mc_init(rdev);
3104 if (r)
3105 return r;
3106 /* Memory manager */
3107 r = radeon_bo_init(rdev);
3108 if (r)
3109 return r;
3110
3111 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3112 r = r600_init_microcode(rdev);
3113 if (r) {
3114 DRM_ERROR("Failed to load firmware!\n");
3115 return r;
3116 }
3117 }
3118
3119 /* Initialize power management */
3120 radeon_pm_init(rdev);
3121
3122 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3123 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
3124
3125 rdev->ih.ring_obj = NULL;
3126 r600_ih_ring_init(rdev, 64 * 1024);
3127
3128 r = r600_pcie_gart_init(rdev);
3129 if (r)
3130 return r;
3131
3132 rdev->accel_working = true;
3133 r = r600_startup(rdev);
3134 if (r) {
3135 dev_err(rdev->dev, "disabling GPU acceleration\n");
3136 r600_cp_fini(rdev);
3137 r600_irq_fini(rdev);
3138 radeon_wb_fini(rdev);
3139 radeon_ib_pool_fini(rdev);
3140 radeon_irq_kms_fini(rdev);
3141 r600_pcie_gart_fini(rdev);
3142 rdev->accel_working = false;
3143 }
3144
3145 return 0;
3146}
3147
3148void r600_fini(struct radeon_device *rdev)
3149{
3150 radeon_pm_fini(rdev);
3151 r600_audio_fini(rdev);
3152 r600_cp_fini(rdev);
3153 r600_irq_fini(rdev);
3154 radeon_wb_fini(rdev);
3155 radeon_ib_pool_fini(rdev);
3156 radeon_irq_kms_fini(rdev);
3157 r600_pcie_gart_fini(rdev);
3158 r600_vram_scratch_fini(rdev);
3159 radeon_agp_fini(rdev);
3160 radeon_gem_fini(rdev);
3161 radeon_fence_driver_fini(rdev);
3162 radeon_bo_fini(rdev);
3163 radeon_atombios_fini(rdev);
3164 kfree(rdev->bios);
3165 rdev->bios = NULL;
3166}
3167
3168
3169/*
3170 * CS stuff
3171 */
3172void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3173{
3174 struct radeon_ring *ring = &rdev->ring[ib->ring];
3175 u32 next_rptr;
3176
3177 if (ring->rptr_save_reg) {
3178 next_rptr = ring->wptr + 3 + 4;
3179 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3180 radeon_ring_write(ring, ((ring->rptr_save_reg -
3181 PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
3182 radeon_ring_write(ring, next_rptr);
3183 } else if (rdev->wb.enabled) {
3184 next_rptr = ring->wptr + 5 + 4;
3185 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
3186 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3187 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
3188 radeon_ring_write(ring, next_rptr);
3189 radeon_ring_write(ring, 0);
3190 }
3191
3192 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3193 radeon_ring_write(ring,
3194#ifdef __BIG_ENDIAN
3195 (2 << 0) |
3196#endif
3197 (ib->gpu_addr & 0xFFFFFFFC));
3198 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
3199 radeon_ring_write(ring, ib->length_dw);
3200}
3201
3202int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3203{
3204 struct radeon_ib ib;
3205 uint32_t scratch;
3206 uint32_t tmp = 0;
3207 unsigned i;
3208 int r;
3209
3210 r = radeon_scratch_get(rdev, &scratch);
3211 if (r) {
3212 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3213 return r;
3214 }
3215 WREG32(scratch, 0xCAFEDEAD);
3216 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3217 if (r) {
3218 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3219 goto free_scratch;
3220 }
3221 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
3222 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3223 ib.ptr[2] = 0xDEADBEEF;
3224 ib.length_dw = 3;
3225 r = radeon_ib_schedule(rdev, &ib, NULL);
3226 if (r) {
3227 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3228 goto free_ib;
3229 }
3230 r = radeon_fence_wait(ib.fence, false);
3231 if (r) {
3232 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3233 goto free_ib;
3234 }
3235 for (i = 0; i < rdev->usec_timeout; i++) {
3236 tmp = RREG32(scratch);
3237 if (tmp == 0xDEADBEEF)
3238 break;
3239 DRM_UDELAY(1);
3240 }
3241 if (i < rdev->usec_timeout) {
3242 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3243 } else {
3244 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3245 scratch, tmp);
3246 r = -EINVAL;
3247 }
3248free_ib:
3249 radeon_ib_free(rdev, &ib);
3250free_scratch:
3251 radeon_scratch_free(rdev, scratch);
3252 return r;
3253}
3254
3255/*
3256 * Interrupts
3257 *
3258 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
3259 * the same as the CP ring buffer, but in reverse. Rather than the CPU
3260 * writing to the ring and the GPU consuming, the GPU writes to the ring
3261 * and host consumes. As the host irq handler processes interrupts, it
3262 * increments the rptr. When the rptr catches up with the wptr, all the
3263 * current interrupts have been processed.
3264 */
3265
3266void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3267{
3268 u32 rb_bufsz;
3269
3270 /* Align ring size */
3271 rb_bufsz = order_base_2(ring_size / 4);
3272 ring_size = (1 << rb_bufsz) * 4;
3273 rdev->ih.ring_size = ring_size;
3274 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
3275 rdev->ih.rptr = 0;
3276}
3277
3278int r600_ih_ring_alloc(struct radeon_device *rdev)
3279{
3280 int r;
3281
3282 /* Allocate ring buffer */
3283 if (rdev->ih.ring_obj == NULL) {
3284 r = radeon_bo_create(rdev, rdev->ih.ring_size,
3285 PAGE_SIZE, true,
3286 RADEON_GEM_DOMAIN_GTT,
3287 NULL, &rdev->ih.ring_obj);
3288 if (r) {
3289 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
3290 return r;
3291 }
3292 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3293 if (unlikely(r != 0))
3294 return r;
3295 r = radeon_bo_pin(rdev->ih.ring_obj,
3296 RADEON_GEM_DOMAIN_GTT,
3297 &rdev->ih.gpu_addr);
3298 if (r) {
3299 radeon_bo_unreserve(rdev->ih.ring_obj);
3300 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
3301 return r;
3302 }
3303 r = radeon_bo_kmap(rdev->ih.ring_obj,
3304 (void **)__UNVOLATILE(&rdev->ih.ring));
3305 radeon_bo_unreserve(rdev->ih.ring_obj);
3306 if (r) {
3307 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
3308 return r;
3309 }
3310 }
3311 return 0;
3312}
3313
3314void r600_ih_ring_fini(struct radeon_device *rdev)
3315{
3316 int r;
3317 if (rdev->ih.ring_obj) {
3318 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3319 if (likely(r == 0)) {
3320 radeon_bo_kunmap(rdev->ih.ring_obj);
3321 radeon_bo_unpin(rdev->ih.ring_obj);
3322 radeon_bo_unreserve(rdev->ih.ring_obj);
3323 }
3324 radeon_bo_unref(&rdev->ih.ring_obj);
3325 rdev->ih.ring = NULL;
3326 rdev->ih.ring_obj = NULL;
3327 }
3328}
3329
3330void r600_rlc_stop(struct radeon_device *rdev)
3331{
3332
3333 if ((rdev->family >= CHIP_RV770) &&
3334 (rdev->family <= CHIP_RV740)) {
3335 /* r7xx asics need to soft reset RLC before halting */
3336 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
3337 RREG32(SRBM_SOFT_RESET);
3338 mdelay(15);
3339 WREG32(SRBM_SOFT_RESET, 0);
3340 RREG32(SRBM_SOFT_RESET);
3341 }
3342
3343 WREG32(RLC_CNTL, 0);
3344}
3345
3346static void r600_rlc_start(struct radeon_device *rdev)
3347{
3348 WREG32(RLC_CNTL, RLC_ENABLE);
3349}
3350
3351static int r600_rlc_resume(struct radeon_device *rdev)
3352{
3353 u32 i;
3354 const __be32 *fw_data;
3355
3356 if (!rdev->rlc_fw)
3357 return -EINVAL;
3358
3359 r600_rlc_stop(rdev);
3360
3361 WREG32(RLC_HB_CNTL, 0);
3362
3363 WREG32(RLC_HB_BASE, 0);
3364 WREG32(RLC_HB_RPTR, 0);
3365 WREG32(RLC_HB_WPTR, 0);
3366 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3367 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
3368 WREG32(RLC_MC_CNTL, 0);
3369 WREG32(RLC_UCODE_CNTL, 0);
3370
3371 fw_data = (const __be32 *)rdev->rlc_fw->data;
3372 if (rdev->family >= CHIP_RV770) {
3373 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
3374 WREG32(RLC_UCODE_ADDR, i);
3375 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3376 }
3377 } else {
3378 for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
3379 WREG32(RLC_UCODE_ADDR, i);
3380 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3381 }
3382 }
3383 WREG32(RLC_UCODE_ADDR, 0);
3384
3385 r600_rlc_start(rdev);
3386
3387 return 0;
3388}
3389
3390static void r600_enable_interrupts(struct radeon_device *rdev)
3391{
3392 u32 ih_cntl = RREG32(IH_CNTL);
3393 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3394
3395 ih_cntl |= ENABLE_INTR;
3396 ih_rb_cntl |= IH_RB_ENABLE;
3397 WREG32(IH_CNTL, ih_cntl);
3398 WREG32(IH_RB_CNTL, ih_rb_cntl);
3399 rdev->ih.enabled = true;
3400}
3401
3402void r600_disable_interrupts(struct radeon_device *rdev)
3403{
3404 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3405 u32 ih_cntl = RREG32(IH_CNTL);
3406
3407 ih_rb_cntl &= ~IH_RB_ENABLE;
3408 ih_cntl &= ~ENABLE_INTR;
3409 WREG32(IH_RB_CNTL, ih_rb_cntl);
3410 WREG32(IH_CNTL, ih_cntl);
3411 /* set rptr, wptr to 0 */
3412 WREG32(IH_RB_RPTR, 0);
3413 WREG32(IH_RB_WPTR, 0);
3414 rdev->ih.enabled = false;
3415 rdev->ih.rptr = 0;
3416}
3417
3418static void r600_disable_interrupt_state(struct radeon_device *rdev)
3419{
3420 u32 tmp;
3421
3422 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
3423 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3424 WREG32(DMA_CNTL, tmp);
3425 WREG32(GRBM_INT_CNTL, 0);
3426 WREG32(DxMODE_INT_MASK, 0);
3427 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
3428 WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
3429 if (ASIC_IS_DCE3(rdev)) {
3430 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
3431 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
3432 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3433 WREG32(DC_HPD1_INT_CONTROL, tmp);
3434 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3435 WREG32(DC_HPD2_INT_CONTROL, tmp);
3436 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3437 WREG32(DC_HPD3_INT_CONTROL, tmp);
3438 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3439 WREG32(DC_HPD4_INT_CONTROL, tmp);
3440 if (ASIC_IS_DCE32(rdev)) {
3441 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3442 WREG32(DC_HPD5_INT_CONTROL, tmp);
3443 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3444 WREG32(DC_HPD6_INT_CONTROL, tmp);
3445 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3446 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3447 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3448 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3449 } else {
3450 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3451 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3452 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3453 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3454 }
3455 } else {
3456 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
3457 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
3458 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3459 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3460 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3461 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3462 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3463 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3464 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3465 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3466 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3467 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3468 }
3469}
3470
3471int r600_irq_init(struct radeon_device *rdev)
3472{
3473 int ret = 0;
3474 int rb_bufsz;
3475 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
3476
3477 /* allocate ring */
3478 ret = r600_ih_ring_alloc(rdev);
3479 if (ret)
3480 return ret;
3481
3482 /* disable irqs */
3483 r600_disable_interrupts(rdev);
3484
3485 /* init rlc */
3486 if (rdev->family >= CHIP_CEDAR)
3487 ret = evergreen_rlc_resume(rdev);
3488 else
3489 ret = r600_rlc_resume(rdev);
3490 if (ret) {
3491 r600_ih_ring_fini(rdev);
3492 return ret;
3493 }
3494
3495 /* setup interrupt control */
3496 /* set dummy read address to ring address */
3497 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
3498 interrupt_cntl = RREG32(INTERRUPT_CNTL);
3499 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3500 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3501 */
3502 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
3503 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3504 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3505 WREG32(INTERRUPT_CNTL, interrupt_cntl);
3506
3507 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
3508 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
3509
3510 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3511 IH_WPTR_OVERFLOW_CLEAR |
3512 (rb_bufsz << 1));
3513
3514 if (rdev->wb.enabled)
3515 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
3516
3517 /* set the writeback address whether it's enabled or not */
3518 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
3519 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
3520
3521 WREG32(IH_RB_CNTL, ih_rb_cntl);
3522
3523 /* set rptr, wptr to 0 */
3524 WREG32(IH_RB_RPTR, 0);
3525 WREG32(IH_RB_WPTR, 0);
3526
3527 /* Default settings for IH_CNTL (disabled at first) */
3528 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3529 /* RPTR_REARM only works if msi's are enabled */
3530 if (rdev->msi_enabled)
3531 ih_cntl |= RPTR_REARM;
3532 WREG32(IH_CNTL, ih_cntl);
3533
3534 /* force the active interrupt state to all disabled */
3535 if (rdev->family >= CHIP_CEDAR)
3536 evergreen_disable_interrupt_state(rdev);
3537 else
3538 r600_disable_interrupt_state(rdev);
3539
3540 /* at this point everything should be setup correctly to enable master */
3541 pci_set_master(rdev->pdev);
3542
3543 /* enable irqs */
3544 r600_enable_interrupts(rdev);
3545
3546 return ret;
3547}
3548
3549void r600_irq_suspend(struct radeon_device *rdev)
3550{
3551 r600_irq_disable(rdev);
3552 r600_rlc_stop(rdev);
3553}
3554
3555void r600_irq_fini(struct radeon_device *rdev)
3556{
3557 r600_irq_suspend(rdev);
3558 r600_ih_ring_fini(rdev);
3559}
3560
3561int r600_irq_set(struct radeon_device *rdev)
3562{
3563 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3564 u32 mode_int = 0;
3565 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3566 u32 grbm_int_cntl = 0;
3567 u32 hdmi0, hdmi1;
3568 u32 dma_cntl;
3569 u32 thermal_int = 0;
3570
3571 if (!rdev->irq.installed) {
3572 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3573 return -EINVAL;
3574 }
3575 /* don't enable anything if the ih is disabled */
3576 if (!rdev->ih.enabled) {
3577 r600_disable_interrupts(rdev);
3578 /* force the active interrupt state to all disabled */
3579 r600_disable_interrupt_state(rdev);
3580 return 0;
3581 }
3582
3583 if (ASIC_IS_DCE3(rdev)) {
3584 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3585 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3586 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3587 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3588 if (ASIC_IS_DCE32(rdev)) {
3589 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3590 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3591 hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3592 hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3593 } else {
3594 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3595 hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3596 }
3597 } else {
3598 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3599 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3600 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3601 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3602 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3603 }
3604
3605 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3606
3607 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3608 thermal_int = RREG32(CG_THERMAL_INT) &
3609 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3610 } else if (rdev->family >= CHIP_RV770) {
3611 thermal_int = RREG32(RV770_CG_THERMAL_INT) &
3612 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3613 }
3614 if (rdev->irq.dpm_thermal) {
3615 DRM_DEBUG("dpm thermal\n");
3616 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
3617 }
3618
3619 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3620 DRM_DEBUG("r600_irq_set: sw int\n");
3621 cp_int_cntl |= RB_INT_ENABLE;
3622 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3623 }
3624
3625 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3626 DRM_DEBUG("r600_irq_set: sw int dma\n");
3627 dma_cntl |= TRAP_ENABLE;
3628 }
3629
3630 if (rdev->irq.crtc_vblank_int[0] ||
3631 atomic_read(&rdev->irq.pflip[0])) {
3632 DRM_DEBUG("r600_irq_set: vblank 0\n");
3633 mode_int |= D1MODE_VBLANK_INT_MASK;
3634 }
3635 if (rdev->irq.crtc_vblank_int[1] ||
3636 atomic_read(&rdev->irq.pflip[1])) {
3637 DRM_DEBUG("r600_irq_set: vblank 1\n");
3638 mode_int |= D2MODE_VBLANK_INT_MASK;
3639 }
3640 if (rdev->irq.hpd[0]) {
3641 DRM_DEBUG("r600_irq_set: hpd 1\n");
3642 hpd1 |= DC_HPDx_INT_EN;
3643 }
3644 if (rdev->irq.hpd[1]) {
3645 DRM_DEBUG("r600_irq_set: hpd 2\n");
3646 hpd2 |= DC_HPDx_INT_EN;
3647 }
3648 if (rdev->irq.hpd[2]) {
3649 DRM_DEBUG("r600_irq_set: hpd 3\n");
3650 hpd3 |= DC_HPDx_INT_EN;
3651 }
3652 if (rdev->irq.hpd[3]) {
3653 DRM_DEBUG("r600_irq_set: hpd 4\n");
3654 hpd4 |= DC_HPDx_INT_EN;
3655 }
3656 if (rdev->irq.hpd[4]) {
3657 DRM_DEBUG("r600_irq_set: hpd 5\n");
3658 hpd5 |= DC_HPDx_INT_EN;
3659 }
3660 if (rdev->irq.hpd[5]) {
3661 DRM_DEBUG("r600_irq_set: hpd 6\n");
3662 hpd6 |= DC_HPDx_INT_EN;
3663 }
3664 if (rdev->irq.afmt[0]) {
3665 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3666 hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3667 }
3668 if (rdev->irq.afmt[1]) {
3669 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3670 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3671 }
3672
3673 WREG32(CP_INT_CNTL, cp_int_cntl);
3674 WREG32(DMA_CNTL, dma_cntl);
3675 WREG32(DxMODE_INT_MASK, mode_int);
3676 WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3677 WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3678 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3679 if (ASIC_IS_DCE3(rdev)) {
3680 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3681 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3682 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3683 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3684 if (ASIC_IS_DCE32(rdev)) {
3685 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3686 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3687 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
3688 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
3689 } else {
3690 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3691 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3692 }
3693 } else {
3694 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3695 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3696 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3697 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3698 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3699 }
3700 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3701 WREG32(CG_THERMAL_INT, thermal_int);
3702 } else if (rdev->family >= CHIP_RV770) {
3703 WREG32(RV770_CG_THERMAL_INT, thermal_int);
3704 }
3705
3706 return 0;
3707}
3708
3709static void r600_irq_ack(struct radeon_device *rdev)
3710{
3711 u32 tmp;
3712
3713 if (ASIC_IS_DCE3(rdev)) {
3714 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3715 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3716 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3717 if (ASIC_IS_DCE32(rdev)) {
3718 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
3719 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
3720 } else {
3721 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3722 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
3723 }
3724 } else {
3725 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3726 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3727 rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3728 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3729 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
3730 }
3731 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3732 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3733
3734 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3735 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3736 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3737 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3738 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3739 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3740 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3741 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3742 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3743 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3744 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3745 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3746 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3747 if (ASIC_IS_DCE3(rdev)) {
3748 tmp = RREG32(DC_HPD1_INT_CONTROL);
3749 tmp |= DC_HPDx_INT_ACK;
3750 WREG32(DC_HPD1_INT_CONTROL, tmp);
3751 } else {
3752 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3753 tmp |= DC_HPDx_INT_ACK;
3754 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3755 }
3756 }
3757 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3758 if (ASIC_IS_DCE3(rdev)) {
3759 tmp = RREG32(DC_HPD2_INT_CONTROL);
3760 tmp |= DC_HPDx_INT_ACK;
3761 WREG32(DC_HPD2_INT_CONTROL, tmp);
3762 } else {
3763 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3764 tmp |= DC_HPDx_INT_ACK;
3765 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3766 }
3767 }
3768 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3769 if (ASIC_IS_DCE3(rdev)) {
3770 tmp = RREG32(DC_HPD3_INT_CONTROL);
3771 tmp |= DC_HPDx_INT_ACK;
3772 WREG32(DC_HPD3_INT_CONTROL, tmp);
3773 } else {
3774 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3775 tmp |= DC_HPDx_INT_ACK;
3776 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3777 }
3778 }
3779 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3780 tmp = RREG32(DC_HPD4_INT_CONTROL);
3781 tmp |= DC_HPDx_INT_ACK;
3782 WREG32(DC_HPD4_INT_CONTROL, tmp);
3783 }
3784 if (ASIC_IS_DCE32(rdev)) {
3785 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3786 tmp = RREG32(DC_HPD5_INT_CONTROL);
3787 tmp |= DC_HPDx_INT_ACK;
3788 WREG32(DC_HPD5_INT_CONTROL, tmp);
3789 }
3790 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3791 tmp = RREG32(DC_HPD5_INT_CONTROL);
3792 tmp |= DC_HPDx_INT_ACK;
3793 WREG32(DC_HPD6_INT_CONTROL, tmp);
3794 }
3795 if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
3796 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
3797 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3798 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3799 }
3800 if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
3801 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
3802 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3803 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3804 }
3805 } else {
3806 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3807 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
3808 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3809 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3810 }
3811 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3812 if (ASIC_IS_DCE3(rdev)) {
3813 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
3814 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3815 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3816 } else {
3817 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
3818 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3819 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3820 }
3821 }
3822 }
3823}
3824
3825void r600_irq_disable(struct radeon_device *rdev)
3826{
3827 r600_disable_interrupts(rdev);
3828 /* Wait and acknowledge irq */
3829 mdelay(1);
3830 r600_irq_ack(rdev);
3831 r600_disable_interrupt_state(rdev);
3832}
3833
3834static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3835{
3836 u32 wptr, tmp;
3837
3838 if (rdev->wb.enabled)
3839 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3840 else
3841 wptr = RREG32(IH_RB_WPTR);
3842
3843 if (wptr & RB_OVERFLOW) {
3844 /* When a ring buffer overflow happen start parsing interrupt
3845 * from the last not overwritten vector (wptr + 16). Hopefully
3846 * this should allow us to catchup.
3847 */
3848 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3849 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3850 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3851 tmp = RREG32(IH_RB_CNTL);
3852 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3853 WREG32(IH_RB_CNTL, tmp);
3854 }
3855 return (wptr & rdev->ih.ptr_mask);
3856}
3857
3858/* r600 IV Ring
3859 * Each IV ring entry is 128 bits:
3860 * [7:0] - interrupt source id
3861 * [31:8] - reserved
3862 * [59:32] - interrupt source data
3863 * [127:60] - reserved
3864 *
3865 * The basic interrupt vector entries
3866 * are decoded as follows:
3867 * src_id src_data description
3868 * 1 0 D1 Vblank
3869 * 1 1 D1 Vline
3870 * 5 0 D2 Vblank
3871 * 5 1 D2 Vline
3872 * 19 0 FP Hot plug detection A
3873 * 19 1 FP Hot plug detection B
3874 * 19 2 DAC A auto-detection
3875 * 19 3 DAC B auto-detection
3876 * 21 4 HDMI block A
3877 * 21 5 HDMI block B
3878 * 176 - CP_INT RB
3879 * 177 - CP_INT IB1
3880 * 178 - CP_INT IB2
3881 * 181 - EOP Interrupt
3882 * 233 - GUI Idle
3883 *
3884 * Note, these are based on r600 and may need to be
3885 * adjusted or added to on newer asics
3886 */
3887
3888int r600_irq_process(struct radeon_device *rdev)
3889{
3890 u32 wptr;
3891 u32 rptr;
3892 u32 src_id, src_data;
3893 u32 ring_index;
3894 bool queue_hotplug = false;
3895 bool queue_hdmi = false;
3896 bool queue_thermal = false;
3897
3898 if (!rdev->ih.enabled || rdev->shutdown)
3899 return IRQ_NONE;
3900
3901 /* No MSIs, need a dummy read to flush PCI DMAs */
3902 if (!rdev->msi_enabled)
3903 RREG32(IH_RB_WPTR);
3904
3905 wptr = r600_get_ih_wptr(rdev);
3906
3907restart_ih:
3908 /* is somebody else already processing irqs? */
3909 if (atomic_xchg(&rdev->ih.lock, 1))
3910 return IRQ_NONE;
3911
3912 rptr = rdev->ih.rptr;
3913 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3914
3915 /* Order reading of wptr vs. reading of IH ring data */
3916 rmb();
3917
3918 /* display interrupts */
3919 r600_irq_ack(rdev);
3920
3921 while (rptr != wptr) {
3922 /* wptr/rptr are in bytes! */
3923 ring_index = rptr / 4;
3924 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3925 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3926
3927 switch (src_id) {
3928 case 1: /* D1 vblank/vline */
3929 switch (src_data) {
3930 case 0: /* D1 vblank */
3931 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
3932 if (rdev->irq.crtc_vblank_int[0]) {
3933 drm_handle_vblank(rdev->ddev, 0);
3934#ifdef __NetBSD__
3935 spin_lock(&rdev->irq.vblank_lock);
3936 rdev->pm.vblank_sync = true;
3937 DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
3938 spin_unlock(&rdev->irq.vblank_lock);
3939#else
3940 rdev->pm.vblank_sync = true;
3941 wake_up(&rdev->irq.vblank_queue);
3942#endif
3943 }
3944 if (atomic_read(&rdev->irq.pflip[0]))
3945 radeon_crtc_handle_flip(rdev, 0);
3946 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3947 DRM_DEBUG("IH: D1 vblank\n");
3948 }
3949 break;
3950 case 1: /* D1 vline */
3951 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3952 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3953 DRM_DEBUG("IH: D1 vline\n");
3954 }
3955 break;
3956 default:
3957 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3958 break;
3959 }
3960 break;
3961 case 5: /* D2 vblank/vline */
3962 switch (src_data) {
3963 case 0: /* D2 vblank */
3964 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
3965 if (rdev->irq.crtc_vblank_int[1]) {
3966 drm_handle_vblank(rdev->ddev, 1);
3967#ifdef __NetBSD__
3968 spin_lock(&rdev->irq.vblank_lock);
3969 rdev->pm.vblank_sync = true;
3970 DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
3971 spin_unlock(&rdev->irq.vblank_lock);
3972#else
3973 rdev->pm.vblank_sync = true;
3974 wake_up(&rdev->irq.vblank_queue);
3975#endif
3976 }
3977 if (atomic_read(&rdev->irq.pflip[1]))
3978 radeon_crtc_handle_flip(rdev, 1);
3979 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3980 DRM_DEBUG("IH: D2 vblank\n");
3981 }
3982 break;
3983 case 1: /* D1 vline */
3984 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3985 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
3986 DRM_DEBUG("IH: D2 vline\n");
3987 }
3988 break;
3989 default:
3990 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3991 break;
3992 }
3993 break;
3994 case 9: /* D1 pflip */
3995 DRM_DEBUG("IH: D1 flip\n");
3996 radeon_crtc_handle_flip(rdev, 0);
3997 break;
3998 case 11: /* D2 pflip */
3999 DRM_DEBUG("IH: D2 flip\n");
4000 radeon_crtc_handle_flip(rdev, 1);
4001 break;
4002 case 19: /* HPD/DAC hotplug */
4003 switch (src_data) {
4004 case 0:
4005 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
4006 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
4007 queue_hotplug = true;
4008 DRM_DEBUG("IH: HPD1\n");
4009 }
4010 break;
4011 case 1:
4012 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
4013 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
4014 queue_hotplug = true;
4015 DRM_DEBUG("IH: HPD2\n");
4016 }
4017 break;
4018 case 4:
4019 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
4020 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
4021 queue_hotplug = true;
4022 DRM_DEBUG("IH: HPD3\n");
4023 }
4024 break;
4025 case 5:
4026 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
4027 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
4028 queue_hotplug = true;
4029 DRM_DEBUG("IH: HPD4\n");
4030 }
4031 break;
4032 case 10:
4033 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
4034 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
4035 queue_hotplug = true;
4036 DRM_DEBUG("IH: HPD5\n");
4037 }
4038 break;
4039 case 12:
4040 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
4041 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
4042 queue_hotplug = true;
4043 DRM_DEBUG("IH: HPD6\n");
4044 }
4045 break;
4046 default:
4047 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4048 break;
4049 }
4050 break;
4051 case 21: /* hdmi */
4052 switch (src_data) {
4053 case 4:
4054 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
4055 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4056 queue_hdmi = true;
4057 DRM_DEBUG("IH: HDMI0\n");
4058 }
4059 break;
4060 case 5:
4061 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
4062 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4063 queue_hdmi = true;
4064 DRM_DEBUG("IH: HDMI1\n");
4065 }
4066 break;
4067 default:
4068 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
4069 break;
4070 }
4071 break;
4072 case 124: /* UVD */
4073 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
4074 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
4075 break;
4076 case 176: /* CP_INT in ring buffer */
4077 case 177: /* CP_INT in IB1 */
4078 case 178: /* CP_INT in IB2 */
4079 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
4080 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4081 break;
4082 case 181: /* CP EOP event */
4083 DRM_DEBUG("IH: CP EOP\n");
4084 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4085 break;
4086 case 224: /* DMA trap event */
4087 DRM_DEBUG("IH: DMA trap\n");
4088 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
4089 break;
4090 case 230: /* thermal low to high */
4091 DRM_DEBUG("IH: thermal low to high\n");
4092 rdev->pm.dpm.thermal.high_to_low = false;
4093 queue_thermal = true;
4094 break;
4095 case 231: /* thermal high to low */
4096 DRM_DEBUG("IH: thermal high to low\n");
4097 rdev->pm.dpm.thermal.high_to_low = true;
4098 queue_thermal = true;
4099 break;
4100 case 233: /* GUI IDLE */
4101 DRM_DEBUG("IH: GUI idle\n");
4102 break;
4103 default:
4104 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4105 break;
4106 }
4107
4108 /* wptr/rptr are in bytes! */
4109 rptr += 16;
4110 rptr &= rdev->ih.ptr_mask;
4111 }
4112 if (queue_hotplug)
4113 schedule_work(&rdev->hotplug_work);
4114 if (queue_hdmi)
4115 schedule_work(&rdev->audio_work);
4116 if (queue_thermal && rdev->pm.dpm_enabled)
4117 schedule_work(&rdev->pm.dpm.thermal.work);
4118 rdev->ih.rptr = rptr;
4119 WREG32(IH_RB_RPTR, rdev->ih.rptr);
4120 atomic_set(&rdev->ih.lock, 0);
4121
4122 /* make sure wptr hasn't changed while processing */
4123 wptr = r600_get_ih_wptr(rdev);
4124 if (wptr != rptr)
4125 goto restart_ih;
4126
4127 return IRQ_HANDLED;
4128}
4129
4130/*
4131 * Debugfs info
4132 */
4133#if defined(CONFIG_DEBUG_FS)
4134
4135static int r600_debugfs_mc_info(struct seq_file *m, void *data)
4136{
4137 struct drm_info_node *node = (struct drm_info_node *) m->private;
4138 struct drm_device *dev = node->minor->dev;
4139 struct radeon_device *rdev = dev->dev_private;
4140
4141 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
4142 DREG32_SYS(m, rdev, VM_L2_STATUS);
4143 return 0;
4144}
4145
4146static struct drm_info_list r600_mc_info_list[] = {
4147 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
4148};
4149#endif
4150
4151int r600_debugfs_mc_info_init(struct radeon_device *rdev)
4152{
4153#if defined(CONFIG_DEBUG_FS)
4154 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
4155#else
4156 return 0;
4157#endif
4158}
4159
4160#ifdef __NetBSD__
4161# define __iomem volatile
4162# define readl fake_readl
4163#endif
4164
4165/**
4166 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
4167 * rdev: radeon device structure
4168 * bo: buffer object struct which userspace is waiting for idle
4169 *
4170 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
4171 * through ring buffer, this leads to corruption in rendering, see
4172 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
4173 * directly perform HDP flush by writing register through MMIO.
4174 */
4175void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
4176{
4177 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
4178 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
4179 * This seems to cause problems on some AGP cards. Just use the old
4180 * method for them.
4181 */
4182 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
4183 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
4184 void __iomem *ptr = rdev->vram_scratch.ptr;
4185
4186 WREG32(HDP_DEBUG1, 0);
4187 (void)readl(ptr);
4188 } else
4189 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
4190}
4191
4192#ifdef __NetBSD__
4193# undef __iomem
4194# undef readl
4195#endif
4196
4197void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4198{
4199 u32 link_width_cntl, mask;
4200
4201 if (rdev->flags & RADEON_IS_IGP)
4202 return;
4203
4204 if (!(rdev->flags & RADEON_IS_PCIE))
4205 return;
4206
4207 /* x2 cards have a special sequence */
4208 if (ASIC_IS_X2(rdev))
4209 return;
4210
4211 radeon_gui_idle(rdev);
4212
4213 switch (lanes) {
4214 case 0:
4215 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
4216 break;
4217 case 1:
4218 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
4219 break;
4220 case 2:
4221 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
4222 break;
4223 case 4:
4224 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
4225 break;
4226 case 8:
4227 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
4228 break;
4229 case 12:
4230 /* not actually supported */
4231 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
4232 break;
4233 case 16:
4234 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
4235 break;
4236 default:
4237 DRM_ERROR("invalid pcie lane request: %d\n", lanes);
4238 return;
4239 }
4240
4241 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4242 link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
4243 link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
4244 link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
4245 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
4246
4247 WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4248}
4249
4250int r600_get_pcie_lanes(struct radeon_device *rdev)
4251{
4252 u32 link_width_cntl;
4253
4254 if (rdev->flags & RADEON_IS_IGP)
4255 return 0;
4256
4257 if (!(rdev->flags & RADEON_IS_PCIE))
4258 return 0;
4259
4260 /* x2 cards have a special sequence */
4261 if (ASIC_IS_X2(rdev))
4262 return 0;
4263
4264 radeon_gui_idle(rdev);
4265
4266 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4267
4268 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
4269 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4270 return 1;
4271 case RADEON_PCIE_LC_LINK_WIDTH_X2:
4272 return 2;
4273 case RADEON_PCIE_LC_LINK_WIDTH_X4:
4274 return 4;
4275 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4276 return 8;
4277 case RADEON_PCIE_LC_LINK_WIDTH_X12:
4278 /* not actually supported */
4279 return 12;
4280 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4281 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4282 default:
4283 return 16;
4284 }
4285}
4286
4287static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4288{
4289#ifndef __NetBSD__ /* XXX radeon pcie */
4290 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
4291 u16 link_cntl2;
4292
4293 if (radeon_pcie_gen2 == 0)
4294 return;
4295
4296 if (rdev->flags & RADEON_IS_IGP)
4297 return;
4298
4299 if (!(rdev->flags & RADEON_IS_PCIE))
4300 return;
4301
4302 /* x2 cards have a special sequence */
4303 if (ASIC_IS_X2(rdev))
4304 return;
4305
4306 /* only RV6xx+ chips are supported */
4307 if (rdev->family <= CHIP_R600)
4308 return;
4309
4310 if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
4311 (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
4312 return;
4313
4314 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4315 if (speed_cntl & LC_CURRENT_DATA_RATE) {
4316 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4317 return;
4318 }
4319
4320 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4321
4322 /* 55 nm r6xx asics */
4323 if ((rdev->family == CHIP_RV670) ||
4324 (rdev->family == CHIP_RV620) ||
4325 (rdev->family == CHIP_RV635)) {
4326 /* advertise upconfig capability */
4327 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4328 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4329 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4330 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4331 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
4332 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
4333 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
4334 LC_RECONFIG_ARC_MISSING_ESCAPE);
4335 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
4336 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4337 } else {
4338 link_width_cntl |= LC_UPCONFIGURE_DIS;
4339 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4340 }
4341 }
4342
4343 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4344 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
4345 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4346
4347 /* 55 nm r6xx asics */
4348 if ((rdev->family == CHIP_RV670) ||
4349 (rdev->family == CHIP_RV620) ||
4350 (rdev->family == CHIP_RV635)) {
4351 WREG32(MM_CFGREGS_CNTL, 0x8);
4352 link_cntl2 = RREG32(0x4088);
4353 WREG32(MM_CFGREGS_CNTL, 0);
4354 /* not supported yet */
4355 if (link_cntl2 & SELECTABLE_DEEMPHASIS)
4356 return;
4357 }
4358
4359 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
4360 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
4361 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
4362 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
4363 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
4364 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4365
4366 tmp = RREG32(0x541c);
4367 WREG32(0x541c, tmp | 0x8);
4368 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
4369 link_cntl2 = RREG16(0x4088);
4370 link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
4371 link_cntl2 |= 0x2;
4372 WREG16(0x4088, link_cntl2);
4373 WREG32(MM_CFGREGS_CNTL, 0);
4374
4375 if ((rdev->family == CHIP_RV670) ||
4376 (rdev->family == CHIP_RV620) ||
4377 (rdev->family == CHIP_RV635)) {
4378 training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
4379 training_cntl &= ~LC_POINT_7_PLUS_EN;
4380 WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
4381 } else {
4382 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4383 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
4384 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4385 }
4386
4387 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4388 speed_cntl |= LC_GEN2_EN_STRAP;
4389 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4390
4391 } else {
4392 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4393 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4394 if (1)
4395 link_width_cntl |= LC_UPCONFIGURE_DIS;
4396 else
4397 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4398 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4399 }
4400#endif
4401}
4402
4403/**
4404 * r600_get_gpu_clock_counter - return GPU clock counter snapshot
4405 *
4406 * @rdev: radeon_device pointer
4407 *
4408 * Fetches a GPU clock counter snapshot (R6xx-cayman).
4409 * Returns the 64 bit clock counter snapshot.
4410 */
4411uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
4412{
4413 uint64_t clock;
4414
4415 mutex_lock(&rdev->gpu_clock_mutex);
4416 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4417 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
4418 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4419 mutex_unlock(&rdev->gpu_clock_mutex);
4420 return clock;
4421}
4422