1 | /* |
2 | * Copyright 2011 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Authors: Alex Deucher |
23 | */ |
24 | |
25 | #include "drmP.h" |
26 | #include "radeon.h" |
27 | #include "rv770d.h" |
28 | #include "r600_dpm.h" |
29 | #include "rv770_dpm.h" |
30 | #include "cypress_dpm.h" |
31 | #include "atom.h" |
32 | #include <linux/seq_file.h> |
33 | |
34 | #define MC_CG_ARB_FREQ_F0 0x0a |
35 | #define MC_CG_ARB_FREQ_F1 0x0b |
36 | #define MC_CG_ARB_FREQ_F2 0x0c |
37 | #define MC_CG_ARB_FREQ_F3 0x0d |
38 | |
39 | #define MC_CG_SEQ_DRAMCONF_S0 0x05 |
40 | #define MC_CG_SEQ_DRAMCONF_S1 0x06 |
41 | |
42 | #define PCIE_BUS_CLK 10000 |
43 | #define TCLK (PCIE_BUS_CLK / 10) |
44 | |
45 | #define SMC_RAM_END 0xC000 |
46 | |
47 | struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps) |
48 | { |
49 | struct rv7xx_ps *ps = rps->ps_priv; |
50 | |
51 | return ps; |
52 | } |
53 | |
54 | struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev) |
55 | { |
56 | struct rv7xx_power_info *pi = rdev->pm.dpm.priv; |
57 | |
58 | return pi; |
59 | } |
60 | |
61 | struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev) |
62 | { |
63 | struct evergreen_power_info *pi = rdev->pm.dpm.priv; |
64 | |
65 | return pi; |
66 | } |
67 | |
68 | static void rv770_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev, |
69 | bool enable) |
70 | { |
71 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
72 | u32 tmp; |
73 | |
74 | tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); |
75 | if (enable) { |
76 | tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK; |
77 | tmp |= LC_HW_VOLTAGE_IF_CONTROL(1); |
78 | tmp |= LC_GEN2_EN_STRAP; |
79 | } else { |
80 | if (!pi->boot_in_gen2) { |
81 | tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK; |
82 | tmp &= ~LC_GEN2_EN_STRAP; |
83 | } |
84 | } |
85 | if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) || |
86 | (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) |
87 | WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp); |
88 | |
89 | } |
90 | |
91 | static void rv770_enable_l0s(struct radeon_device *rdev) |
92 | { |
93 | u32 tmp; |
94 | |
95 | tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L0S_INACTIVITY_MASK; |
96 | tmp |= LC_L0S_INACTIVITY(3); |
97 | WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp); |
98 | } |
99 | |
100 | static void rv770_enable_l1(struct radeon_device *rdev) |
101 | { |
102 | u32 tmp; |
103 | |
104 | tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL); |
105 | tmp &= ~LC_L1_INACTIVITY_MASK; |
106 | tmp |= LC_L1_INACTIVITY(4); |
107 | tmp &= ~LC_PMI_TO_L1_DIS; |
108 | tmp &= ~LC_ASPM_TO_L1_DIS; |
109 | WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp); |
110 | } |
111 | |
112 | static void rv770_enable_pll_sleep_in_l1(struct radeon_device *rdev) |
113 | { |
114 | u32 tmp; |
115 | |
116 | tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L1_INACTIVITY_MASK; |
117 | tmp |= LC_L1_INACTIVITY(8); |
118 | WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp); |
119 | |
120 | /* NOTE, this is a PCIE indirect reg, not PCIE PORT */ |
121 | tmp = RREG32_PCIE(PCIE_P_CNTL); |
122 | tmp |= P_PLL_PWRDN_IN_L1L23; |
123 | tmp &= ~P_PLL_BUF_PDNB; |
124 | tmp &= ~P_PLL_PDNB; |
125 | tmp |= P_ALLOW_PRX_FRONTEND_SHUTOFF; |
126 | WREG32_PCIE(PCIE_P_CNTL, tmp); |
127 | } |
128 | |
129 | static void rv770_gfx_clock_gating_enable(struct radeon_device *rdev, |
130 | bool enable) |
131 | { |
132 | if (enable) |
133 | WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN); |
134 | else { |
135 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN); |
136 | WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON); |
137 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON); |
138 | RREG32(GB_TILING_CONFIG); |
139 | } |
140 | } |
141 | |
142 | static void rv770_mg_clock_gating_enable(struct radeon_device *rdev, |
143 | bool enable) |
144 | { |
145 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
146 | |
147 | if (enable) { |
148 | u32 mgcg_cgtt_local0; |
149 | |
150 | if (rdev->family == CHIP_RV770) |
151 | mgcg_cgtt_local0 = RV770_MGCGTTLOCAL0_DFLT; |
152 | else |
153 | mgcg_cgtt_local0 = RV7XX_MGCGTTLOCAL0_DFLT; |
154 | |
155 | WREG32(CG_CGTT_LOCAL_0, mgcg_cgtt_local0); |
156 | WREG32(CG_CGTT_LOCAL_1, (RV770_MGCGTTLOCAL1_DFLT & 0xFFFFCFFF)); |
157 | |
158 | if (pi->mgcgtssm) |
159 | WREG32(CGTS_SM_CTRL_REG, RV770_MGCGCGTSSMCTRL_DFLT); |
160 | } else { |
161 | WREG32(CG_CGTT_LOCAL_0, 0xFFFFFFFF); |
162 | WREG32(CG_CGTT_LOCAL_1, 0xFFFFCFFF); |
163 | } |
164 | } |
165 | |
166 | void rv770_restore_cgcg(struct radeon_device *rdev) |
167 | { |
168 | bool dpm_en = false, cg_en = false; |
169 | |
170 | if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN) |
171 | dpm_en = true; |
172 | if (RREG32(SCLK_PWRMGT_CNTL) & DYN_GFX_CLK_OFF_EN) |
173 | cg_en = true; |
174 | |
175 | if (dpm_en && !cg_en) |
176 | WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN); |
177 | } |
178 | |
179 | static void rv770_start_dpm(struct radeon_device *rdev) |
180 | { |
181 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); |
182 | |
183 | WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF); |
184 | |
185 | WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN); |
186 | } |
187 | |
188 | void rv770_stop_dpm(struct radeon_device *rdev) |
189 | { |
190 | PPSMC_Result result; |
191 | |
192 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); |
193 | |
194 | if (result != PPSMC_Result_OK) |
195 | DRM_ERROR("Could not force DPM to low.\n" ); |
196 | |
197 | WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); |
198 | |
199 | WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); |
200 | |
201 | WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF); |
202 | } |
203 | |
204 | bool rv770_dpm_enabled(struct radeon_device *rdev) |
205 | { |
206 | if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN) |
207 | return true; |
208 | else |
209 | return false; |
210 | } |
211 | |
212 | void rv770_enable_thermal_protection(struct radeon_device *rdev, |
213 | bool enable) |
214 | { |
215 | if (enable) |
216 | WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); |
217 | else |
218 | WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); |
219 | } |
220 | |
221 | void rv770_enable_acpi_pm(struct radeon_device *rdev) |
222 | { |
223 | WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); |
224 | } |
225 | |
226 | u8 rv770_get_seq_value(struct radeon_device *rdev, |
227 | struct rv7xx_pl *pl) |
228 | { |
229 | return (pl->flags & ATOM_PPLIB_R600_FLAGS_LOWPOWER) ? |
230 | MC_CG_SEQ_DRAMCONF_S0 : MC_CG_SEQ_DRAMCONF_S1; |
231 | } |
232 | |
233 | int rv770_read_smc_soft_register(struct radeon_device *rdev, |
234 | u16 reg_offset, u32 *value) |
235 | { |
236 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
237 | |
238 | return rv770_read_smc_sram_dword(rdev, |
239 | pi->soft_regs_start + reg_offset, |
240 | value, pi->sram_end); |
241 | } |
242 | |
243 | int rv770_write_smc_soft_register(struct radeon_device *rdev, |
244 | u16 reg_offset, u32 value) |
245 | { |
246 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
247 | |
248 | return rv770_write_smc_sram_dword(rdev, |
249 | pi->soft_regs_start + reg_offset, |
250 | value, pi->sram_end); |
251 | } |
252 | |
253 | int rv770_populate_smc_t(struct radeon_device *rdev, |
254 | struct radeon_ps *radeon_state, |
255 | RV770_SMC_SWSTATE *smc_state) |
256 | { |
257 | struct rv7xx_ps *state = rv770_get_ps(radeon_state); |
258 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
259 | int i; |
260 | int a_n; |
261 | int a_d; |
262 | u8 l[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; |
263 | u8 r[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; |
264 | u32 a_t; |
265 | |
266 | l[0] = 0; |
267 | r[2] = 100; |
268 | |
269 | a_n = (int)state->medium.sclk * pi->lmp + |
270 | (int)state->low.sclk * (R600_AH_DFLT - pi->rlp); |
271 | a_d = (int)state->low.sclk * (100 - (int)pi->rlp) + |
272 | (int)state->medium.sclk * pi->lmp; |
273 | |
274 | l[1] = (u8)(pi->lmp - (int)pi->lmp * a_n / a_d); |
275 | r[0] = (u8)(pi->rlp + (100 - (int)pi->rlp) * a_n / a_d); |
276 | |
277 | a_n = (int)state->high.sclk * pi->lhp + (int)state->medium.sclk * |
278 | (R600_AH_DFLT - pi->rmp); |
279 | a_d = (int)state->medium.sclk * (100 - (int)pi->rmp) + |
280 | (int)state->high.sclk * pi->lhp; |
281 | |
282 | l[2] = (u8)(pi->lhp - (int)pi->lhp * a_n / a_d); |
283 | r[1] = (u8)(pi->rmp + (100 - (int)pi->rmp) * a_n / a_d); |
284 | |
285 | for (i = 0; i < (RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1); i++) { |
286 | a_t = CG_R(r[i] * pi->bsp / 200) | CG_L(l[i] * pi->bsp / 200); |
287 | smc_state->levels[i].aT = cpu_to_be32(a_t); |
288 | } |
289 | |
290 | a_t = CG_R(r[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1] * pi->pbsp / 200) | |
291 | CG_L(l[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1] * pi->pbsp / 200); |
292 | |
293 | smc_state->levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1].aT = |
294 | cpu_to_be32(a_t); |
295 | |
296 | return 0; |
297 | } |
298 | |
299 | int rv770_populate_smc_sp(struct radeon_device *rdev, |
300 | struct radeon_ps *radeon_state, |
301 | RV770_SMC_SWSTATE *smc_state) |
302 | { |
303 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
304 | int i; |
305 | |
306 | for (i = 0; i < (RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1); i++) |
307 | smc_state->levels[i].bSP = cpu_to_be32(pi->dsp); |
308 | |
309 | smc_state->levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1].bSP = |
310 | cpu_to_be32(pi->psp); |
311 | |
312 | return 0; |
313 | } |
314 | |
315 | static void rv770_calculate_fractional_mpll_feedback_divider(u32 memory_clock, |
316 | u32 reference_clock, |
317 | bool gddr5, |
318 | struct atom_clock_dividers *dividers, |
319 | u32 *clkf, |
320 | u32 *clkfrac) |
321 | { |
322 | u32 post_divider, reference_divider, feedback_divider8; |
323 | u32 fyclk; |
324 | |
325 | if (gddr5) |
326 | fyclk = (memory_clock * 8) / 2; |
327 | else |
328 | fyclk = (memory_clock * 4) / 2; |
329 | |
330 | post_divider = dividers->post_div; |
331 | reference_divider = dividers->ref_div; |
332 | |
333 | feedback_divider8 = |
334 | (8 * fyclk * reference_divider * post_divider) / reference_clock; |
335 | |
336 | *clkf = feedback_divider8 / 8; |
337 | *clkfrac = feedback_divider8 % 8; |
338 | } |
339 | |
340 | static int rv770_encode_yclk_post_div(u32 postdiv, u32 *encoded_postdiv) |
341 | { |
342 | int ret = 0; |
343 | |
344 | switch (postdiv) { |
345 | case 1: |
346 | *encoded_postdiv = 0; |
347 | break; |
348 | case 2: |
349 | *encoded_postdiv = 1; |
350 | break; |
351 | case 4: |
352 | *encoded_postdiv = 2; |
353 | break; |
354 | case 8: |
355 | *encoded_postdiv = 3; |
356 | break; |
357 | case 16: |
358 | *encoded_postdiv = 4; |
359 | break; |
360 | default: |
361 | ret = -EINVAL; |
362 | break; |
363 | } |
364 | |
365 | return ret; |
366 | } |
367 | |
368 | u32 rv770_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf) |
369 | { |
370 | if (clkf <= 0x10) |
371 | return 0x4B; |
372 | if (clkf <= 0x19) |
373 | return 0x5B; |
374 | if (clkf <= 0x21) |
375 | return 0x2B; |
376 | if (clkf <= 0x27) |
377 | return 0x6C; |
378 | if (clkf <= 0x31) |
379 | return 0x9D; |
380 | return 0xC6; |
381 | } |
382 | |
383 | static int rv770_populate_mclk_value(struct radeon_device *rdev, |
384 | u32 engine_clock, u32 memory_clock, |
385 | RV7XX_SMC_MCLK_VALUE *mclk) |
386 | { |
387 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
388 | u8 encoded_reference_dividers[] = { 0, 16, 17, 20, 21 }; |
389 | u32 mpll_ad_func_cntl = |
390 | pi->clk_regs.rv770.mpll_ad_func_cntl; |
391 | u32 mpll_ad_func_cntl_2 = |
392 | pi->clk_regs.rv770.mpll_ad_func_cntl_2; |
393 | u32 mpll_dq_func_cntl = |
394 | pi->clk_regs.rv770.mpll_dq_func_cntl; |
395 | u32 mpll_dq_func_cntl_2 = |
396 | pi->clk_regs.rv770.mpll_dq_func_cntl_2; |
397 | u32 mclk_pwrmgt_cntl = |
398 | pi->clk_regs.rv770.mclk_pwrmgt_cntl; |
399 | u32 dll_cntl = pi->clk_regs.rv770.dll_cntl; |
400 | struct atom_clock_dividers dividers; |
401 | u32 reference_clock = rdev->clock.mpll.reference_freq; |
402 | u32 clkf, clkfrac; |
403 | u32 postdiv_yclk; |
404 | u32 ibias; |
405 | int ret; |
406 | |
407 | ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM, |
408 | memory_clock, false, ÷rs); |
409 | if (ret) |
410 | return ret; |
411 | |
412 | if ((dividers.ref_div < 1) || (dividers.ref_div > 5)) |
413 | return -EINVAL; |
414 | |
415 | rv770_calculate_fractional_mpll_feedback_divider(memory_clock, reference_clock, |
416 | pi->mem_gddr5, |
417 | ÷rs, &clkf, &clkfrac); |
418 | |
419 | ret = rv770_encode_yclk_post_div(dividers.post_div, &postdiv_yclk); |
420 | if (ret) |
421 | return ret; |
422 | |
423 | ibias = rv770_map_clkf_to_ibias(rdev, clkf); |
424 | |
425 | mpll_ad_func_cntl &= ~(CLKR_MASK | |
426 | YCLK_POST_DIV_MASK | |
427 | CLKF_MASK | |
428 | CLKFRAC_MASK | |
429 | IBIAS_MASK); |
430 | mpll_ad_func_cntl |= CLKR(encoded_reference_dividers[dividers.ref_div - 1]); |
431 | mpll_ad_func_cntl |= YCLK_POST_DIV(postdiv_yclk); |
432 | mpll_ad_func_cntl |= CLKF(clkf); |
433 | mpll_ad_func_cntl |= CLKFRAC(clkfrac); |
434 | mpll_ad_func_cntl |= IBIAS(ibias); |
435 | |
436 | if (dividers.vco_mode) |
437 | mpll_ad_func_cntl_2 |= VCO_MODE; |
438 | else |
439 | mpll_ad_func_cntl_2 &= ~VCO_MODE; |
440 | |
441 | if (pi->mem_gddr5) { |
442 | rv770_calculate_fractional_mpll_feedback_divider(memory_clock, |
443 | reference_clock, |
444 | pi->mem_gddr5, |
445 | ÷rs, &clkf, &clkfrac); |
446 | |
447 | ibias = rv770_map_clkf_to_ibias(rdev, clkf); |
448 | |
449 | ret = rv770_encode_yclk_post_div(dividers.post_div, &postdiv_yclk); |
450 | if (ret) |
451 | return ret; |
452 | |
453 | mpll_dq_func_cntl &= ~(CLKR_MASK | |
454 | YCLK_POST_DIV_MASK | |
455 | CLKF_MASK | |
456 | CLKFRAC_MASK | |
457 | IBIAS_MASK); |
458 | mpll_dq_func_cntl |= CLKR(encoded_reference_dividers[dividers.ref_div - 1]); |
459 | mpll_dq_func_cntl |= YCLK_POST_DIV(postdiv_yclk); |
460 | mpll_dq_func_cntl |= CLKF(clkf); |
461 | mpll_dq_func_cntl |= CLKFRAC(clkfrac); |
462 | mpll_dq_func_cntl |= IBIAS(ibias); |
463 | |
464 | if (dividers.vco_mode) |
465 | mpll_dq_func_cntl_2 |= VCO_MODE; |
466 | else |
467 | mpll_dq_func_cntl_2 &= ~VCO_MODE; |
468 | } |
469 | |
470 | mclk->mclk770.mclk_value = cpu_to_be32(memory_clock); |
471 | mclk->mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); |
472 | mclk->mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2); |
473 | mclk->mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); |
474 | mclk->mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2); |
475 | mclk->mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); |
476 | mclk->mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl); |
477 | |
478 | return 0; |
479 | } |
480 | |
481 | static int rv770_populate_sclk_value(struct radeon_device *rdev, |
482 | u32 engine_clock, |
483 | RV770_SMC_SCLK_VALUE *sclk) |
484 | { |
485 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
486 | struct atom_clock_dividers dividers; |
487 | u32 spll_func_cntl = |
488 | pi->clk_regs.rv770.cg_spll_func_cntl; |
489 | u32 spll_func_cntl_2 = |
490 | pi->clk_regs.rv770.cg_spll_func_cntl_2; |
491 | u32 spll_func_cntl_3 = |
492 | pi->clk_regs.rv770.cg_spll_func_cntl_3; |
493 | u32 cg_spll_spread_spectrum = |
494 | pi->clk_regs.rv770.cg_spll_spread_spectrum; |
495 | u32 cg_spll_spread_spectrum_2 = |
496 | pi->clk_regs.rv770.cg_spll_spread_spectrum_2; |
497 | u64 tmp; |
498 | u32 reference_clock = rdev->clock.spll.reference_freq; |
499 | u32 reference_divider, post_divider; |
500 | u32 fbdiv; |
501 | int ret; |
502 | |
503 | ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
504 | engine_clock, false, ÷rs); |
505 | if (ret) |
506 | return ret; |
507 | |
508 | reference_divider = 1 + dividers.ref_div; |
509 | |
510 | if (dividers.enable_post_div) |
511 | post_divider = (0x0f & (dividers.post_div >> 4)) + (0x0f & dividers.post_div) + 2; |
512 | else |
513 | post_divider = 1; |
514 | |
515 | tmp = (u64) engine_clock * reference_divider * post_divider * 16384; |
516 | do_div(tmp, reference_clock); |
517 | fbdiv = (u32) tmp; |
518 | |
519 | if (dividers.enable_post_div) |
520 | spll_func_cntl |= SPLL_DIVEN; |
521 | else |
522 | spll_func_cntl &= ~SPLL_DIVEN; |
523 | spll_func_cntl &= ~(SPLL_HILEN_MASK | SPLL_LOLEN_MASK | SPLL_REF_DIV_MASK); |
524 | spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div); |
525 | spll_func_cntl |= SPLL_HILEN((dividers.post_div >> 4) & 0xf); |
526 | spll_func_cntl |= SPLL_LOLEN(dividers.post_div & 0xf); |
527 | |
528 | spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; |
529 | spll_func_cntl_2 |= SCLK_MUX_SEL(2); |
530 | |
531 | spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; |
532 | spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); |
533 | spll_func_cntl_3 |= SPLL_DITHEN; |
534 | |
535 | if (pi->sclk_ss) { |
536 | struct radeon_atom_ss ss; |
537 | u32 vco_freq = engine_clock * post_divider; |
538 | |
539 | if (radeon_atombios_get_asic_ss_info(rdev, &ss, |
540 | ASIC_INTERNAL_ENGINE_SS, vco_freq)) { |
541 | u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); |
542 | u32 clk_v = ss.percentage * fbdiv / (clk_s * 10000); |
543 | |
544 | cg_spll_spread_spectrum &= ~CLKS_MASK; |
545 | cg_spll_spread_spectrum |= CLKS(clk_s); |
546 | cg_spll_spread_spectrum |= SSEN; |
547 | |
548 | cg_spll_spread_spectrum_2 &= ~CLKV_MASK; |
549 | cg_spll_spread_spectrum_2 |= CLKV(clk_v); |
550 | } |
551 | } |
552 | |
553 | sclk->sclk_value = cpu_to_be32(engine_clock); |
554 | sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl); |
555 | sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2); |
556 | sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3); |
557 | sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(cg_spll_spread_spectrum); |
558 | sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(cg_spll_spread_spectrum_2); |
559 | |
560 | return 0; |
561 | } |
562 | |
563 | int rv770_populate_vddc_value(struct radeon_device *rdev, u16 vddc, |
564 | RV770_SMC_VOLTAGE_VALUE *voltage) |
565 | { |
566 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
567 | int i; |
568 | |
569 | if (!pi->voltage_control) { |
570 | voltage->index = 0; |
571 | voltage->value = 0; |
572 | return 0; |
573 | } |
574 | |
575 | for (i = 0; i < pi->valid_vddc_entries; i++) { |
576 | if (vddc <= pi->vddc_table[i].vddc) { |
577 | voltage->index = pi->vddc_table[i].vddc_index; |
578 | voltage->value = cpu_to_be16(vddc); |
579 | break; |
580 | } |
581 | } |
582 | |
583 | if (i == pi->valid_vddc_entries) |
584 | return -EINVAL; |
585 | |
586 | return 0; |
587 | } |
588 | |
589 | int rv770_populate_mvdd_value(struct radeon_device *rdev, u32 mclk, |
590 | RV770_SMC_VOLTAGE_VALUE *voltage) |
591 | { |
592 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
593 | |
594 | if (!pi->mvdd_control) { |
595 | voltage->index = MVDD_HIGH_INDEX; |
596 | voltage->value = cpu_to_be16(MVDD_HIGH_VALUE); |
597 | return 0; |
598 | } |
599 | |
600 | if (mclk <= pi->mvdd_split_frequency) { |
601 | voltage->index = MVDD_LOW_INDEX; |
602 | voltage->value = cpu_to_be16(MVDD_LOW_VALUE); |
603 | } else { |
604 | voltage->index = MVDD_HIGH_INDEX; |
605 | voltage->value = cpu_to_be16(MVDD_HIGH_VALUE); |
606 | } |
607 | |
608 | return 0; |
609 | } |
610 | |
611 | static int rv770_convert_power_level_to_smc(struct radeon_device *rdev, |
612 | struct rv7xx_pl *pl, |
613 | RV770_SMC_HW_PERFORMANCE_LEVEL *level, |
614 | u8 watermark_level) |
615 | { |
616 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
617 | int ret; |
618 | |
619 | level->gen2PCIE = pi->pcie_gen2 ? |
620 | ((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0; |
621 | level->gen2XSP = (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0; |
622 | level->backbias = (pl->flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? 1 : 0; |
623 | level->displayWatermark = watermark_level; |
624 | |
625 | if (rdev->family == CHIP_RV740) |
626 | ret = rv740_populate_sclk_value(rdev, pl->sclk, |
627 | &level->sclk); |
628 | else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) |
629 | ret = rv730_populate_sclk_value(rdev, pl->sclk, |
630 | &level->sclk); |
631 | else |
632 | ret = rv770_populate_sclk_value(rdev, pl->sclk, |
633 | &level->sclk); |
634 | if (ret) |
635 | return ret; |
636 | |
637 | if (rdev->family == CHIP_RV740) { |
638 | if (pi->mem_gddr5) { |
639 | if (pl->mclk <= pi->mclk_strobe_mode_threshold) |
640 | level->strobeMode = |
641 | rv740_get_mclk_frequency_ratio(pl->mclk) | 0x10; |
642 | else |
643 | level->strobeMode = 0; |
644 | |
645 | if (pl->mclk > pi->mclk_edc_enable_threshold) |
646 | level->mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG; |
647 | else |
648 | level->mcFlags = 0; |
649 | } |
650 | ret = rv740_populate_mclk_value(rdev, pl->sclk, |
651 | pl->mclk, &level->mclk); |
652 | } else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) |
653 | ret = rv730_populate_mclk_value(rdev, pl->sclk, |
654 | pl->mclk, &level->mclk); |
655 | else |
656 | ret = rv770_populate_mclk_value(rdev, pl->sclk, |
657 | pl->mclk, &level->mclk); |
658 | if (ret) |
659 | return ret; |
660 | |
661 | ret = rv770_populate_vddc_value(rdev, pl->vddc, |
662 | &level->vddc); |
663 | if (ret) |
664 | return ret; |
665 | |
666 | ret = rv770_populate_mvdd_value(rdev, pl->mclk, &level->mvdd); |
667 | |
668 | return ret; |
669 | } |
670 | |
671 | static int rv770_convert_power_state_to_smc(struct radeon_device *rdev, |
672 | struct radeon_ps *radeon_state, |
673 | RV770_SMC_SWSTATE *smc_state) |
674 | { |
675 | struct rv7xx_ps *state = rv770_get_ps(radeon_state); |
676 | int ret; |
677 | |
678 | if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC)) |
679 | smc_state->flags |= PPSMC_SWSTATE_FLAG_DC; |
680 | |
681 | ret = rv770_convert_power_level_to_smc(rdev, |
682 | &state->low, |
683 | &smc_state->levels[0], |
684 | PPSMC_DISPLAY_WATERMARK_LOW); |
685 | if (ret) |
686 | return ret; |
687 | |
688 | ret = rv770_convert_power_level_to_smc(rdev, |
689 | &state->medium, |
690 | &smc_state->levels[1], |
691 | PPSMC_DISPLAY_WATERMARK_LOW); |
692 | if (ret) |
693 | return ret; |
694 | |
695 | ret = rv770_convert_power_level_to_smc(rdev, |
696 | &state->high, |
697 | &smc_state->levels[2], |
698 | PPSMC_DISPLAY_WATERMARK_HIGH); |
699 | if (ret) |
700 | return ret; |
701 | |
702 | smc_state->levels[0].arbValue = MC_CG_ARB_FREQ_F1; |
703 | smc_state->levels[1].arbValue = MC_CG_ARB_FREQ_F2; |
704 | smc_state->levels[2].arbValue = MC_CG_ARB_FREQ_F3; |
705 | |
706 | smc_state->levels[0].seqValue = rv770_get_seq_value(rdev, |
707 | &state->low); |
708 | smc_state->levels[1].seqValue = rv770_get_seq_value(rdev, |
709 | &state->medium); |
710 | smc_state->levels[2].seqValue = rv770_get_seq_value(rdev, |
711 | &state->high); |
712 | |
713 | rv770_populate_smc_sp(rdev, radeon_state, smc_state); |
714 | |
715 | return rv770_populate_smc_t(rdev, radeon_state, smc_state); |
716 | |
717 | } |
718 | |
719 | u32 rv770_calculate_memory_refresh_rate(struct radeon_device *rdev, |
720 | u32 engine_clock) |
721 | { |
722 | u32 dram_rows; |
723 | u32 dram_refresh_rate; |
724 | u32 mc_arb_rfsh_rate; |
725 | u32 tmp; |
726 | |
727 | tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; |
728 | dram_rows = 1 << (tmp + 10); |
729 | tmp = RREG32(MC_SEQ_MISC0) & 3; |
730 | dram_refresh_rate = 1 << (tmp + 3); |
731 | mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64; |
732 | |
733 | return mc_arb_rfsh_rate; |
734 | } |
735 | |
736 | static void rv770_program_memory_timing_parameters(struct radeon_device *rdev, |
737 | struct radeon_ps *radeon_state) |
738 | { |
739 | struct rv7xx_ps *state = rv770_get_ps(radeon_state); |
740 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
741 | u32 sqm_ratio; |
742 | u32 arb_refresh_rate; |
743 | u32 high_clock; |
744 | |
745 | if (state->high.sclk < (state->low.sclk * 0xFF / 0x40)) |
746 | high_clock = state->high.sclk; |
747 | else |
748 | high_clock = (state->low.sclk * 0xFF / 0x40); |
749 | |
750 | radeon_atom_set_engine_dram_timings(rdev, high_clock, |
751 | state->high.mclk); |
752 | |
753 | sqm_ratio = |
754 | STATE0(64 * high_clock / pi->boot_sclk) | |
755 | STATE1(64 * high_clock / state->low.sclk) | |
756 | STATE2(64 * high_clock / state->medium.sclk) | |
757 | STATE3(64 * high_clock / state->high.sclk); |
758 | WREG32(MC_ARB_SQM_RATIO, sqm_ratio); |
759 | |
760 | arb_refresh_rate = |
761 | POWERMODE0(rv770_calculate_memory_refresh_rate(rdev, pi->boot_sclk)) | |
762 | POWERMODE1(rv770_calculate_memory_refresh_rate(rdev, state->low.sclk)) | |
763 | POWERMODE2(rv770_calculate_memory_refresh_rate(rdev, state->medium.sclk)) | |
764 | POWERMODE3(rv770_calculate_memory_refresh_rate(rdev, state->high.sclk)); |
765 | WREG32(MC_ARB_RFSH_RATE, arb_refresh_rate); |
766 | } |
767 | |
768 | void rv770_enable_backbias(struct radeon_device *rdev, |
769 | bool enable) |
770 | { |
771 | if (enable) |
772 | WREG32_P(GENERAL_PWRMGT, BACKBIAS_PAD_EN, ~BACKBIAS_PAD_EN); |
773 | else |
774 | WREG32_P(GENERAL_PWRMGT, 0, ~(BACKBIAS_VALUE | BACKBIAS_PAD_EN)); |
775 | } |
776 | |
777 | static void rv770_enable_spread_spectrum(struct radeon_device *rdev, |
778 | bool enable) |
779 | { |
780 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
781 | |
782 | if (enable) { |
783 | if (pi->sclk_ss) |
784 | WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN); |
785 | |
786 | if (pi->mclk_ss) { |
787 | if (rdev->family == CHIP_RV740) |
788 | rv740_enable_mclk_spread_spectrum(rdev, true); |
789 | } |
790 | } else { |
791 | WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN); |
792 | |
793 | WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN); |
794 | |
795 | WREG32_P(CG_MPLL_SPREAD_SPECTRUM, 0, ~SSEN); |
796 | |
797 | if (rdev->family == CHIP_RV740) |
798 | rv740_enable_mclk_spread_spectrum(rdev, false); |
799 | } |
800 | } |
801 | |
802 | static void rv770_program_mpll_timing_parameters(struct radeon_device *rdev) |
803 | { |
804 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
805 | |
806 | if ((rdev->family == CHIP_RV770) && !pi->mem_gddr5) { |
807 | WREG32(MPLL_TIME, |
808 | (MPLL_LOCK_TIME(R600_MPLLLOCKTIME_DFLT * pi->ref_div) | |
809 | MPLL_RESET_TIME(R600_MPLLRESETTIME_DFLT))); |
810 | } |
811 | } |
812 | |
813 | void rv770_setup_bsp(struct radeon_device *rdev) |
814 | { |
815 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
816 | u32 xclk = radeon_get_xclk(rdev); |
817 | |
818 | r600_calculate_u_and_p(pi->asi, |
819 | xclk, |
820 | 16, |
821 | &pi->bsp, |
822 | &pi->bsu); |
823 | |
824 | r600_calculate_u_and_p(pi->pasi, |
825 | xclk, |
826 | 16, |
827 | &pi->pbsp, |
828 | &pi->pbsu); |
829 | |
830 | pi->dsp = BSP(pi->bsp) | BSU(pi->bsu); |
831 | pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu); |
832 | |
833 | WREG32(CG_BSP, pi->dsp); |
834 | |
835 | } |
836 | |
837 | void rv770_program_git(struct radeon_device *rdev) |
838 | { |
839 | WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK); |
840 | } |
841 | |
842 | void rv770_program_tp(struct radeon_device *rdev) |
843 | { |
844 | int i; |
845 | enum r600_td td = R600_TD_DFLT; |
846 | |
847 | for (i = 0; i < R600_PM_NUMBER_OF_TC; i++) |
848 | WREG32(CG_FFCT_0 + (i * 4), (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i]))); |
849 | |
850 | if (td == R600_TD_AUTO) |
851 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); |
852 | else |
853 | WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); |
854 | if (td == R600_TD_UP) |
855 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); |
856 | if (td == R600_TD_DOWN) |
857 | WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); |
858 | } |
859 | |
860 | void rv770_program_tpp(struct radeon_device *rdev) |
861 | { |
862 | WREG32(CG_TPC, R600_TPC_DFLT); |
863 | } |
864 | |
865 | void rv770_program_sstp(struct radeon_device *rdev) |
866 | { |
867 | WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT))); |
868 | } |
869 | |
870 | void rv770_program_engine_speed_parameters(struct radeon_device *rdev) |
871 | { |
872 | WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC); |
873 | } |
874 | |
875 | static void rv770_enable_display_gap(struct radeon_device *rdev) |
876 | { |
877 | u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); |
878 | |
879 | tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); |
880 | tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) | |
881 | DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); |
882 | WREG32(CG_DISPLAY_GAP_CNTL, tmp); |
883 | } |
884 | |
885 | void rv770_program_vc(struct radeon_device *rdev) |
886 | { |
887 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
888 | |
889 | WREG32(CG_FTV, pi->vrc); |
890 | } |
891 | |
892 | void rv770_clear_vc(struct radeon_device *rdev) |
893 | { |
894 | WREG32(CG_FTV, 0); |
895 | } |
896 | |
897 | int rv770_upload_firmware(struct radeon_device *rdev) |
898 | { |
899 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
900 | int ret; |
901 | |
902 | rv770_reset_smc(rdev); |
903 | rv770_stop_smc_clock(rdev); |
904 | |
905 | ret = rv770_load_smc_ucode(rdev, pi->sram_end); |
906 | if (ret) |
907 | return ret; |
908 | |
909 | return 0; |
910 | } |
911 | |
912 | static int rv770_populate_smc_acpi_state(struct radeon_device *rdev, |
913 | RV770_SMC_STATETABLE *table) |
914 | { |
915 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
916 | |
917 | u32 mpll_ad_func_cntl = |
918 | pi->clk_regs.rv770.mpll_ad_func_cntl; |
919 | u32 mpll_ad_func_cntl_2 = |
920 | pi->clk_regs.rv770.mpll_ad_func_cntl_2; |
921 | u32 mpll_dq_func_cntl = |
922 | pi->clk_regs.rv770.mpll_dq_func_cntl; |
923 | u32 mpll_dq_func_cntl_2 = |
924 | pi->clk_regs.rv770.mpll_dq_func_cntl_2; |
925 | u32 spll_func_cntl = |
926 | pi->clk_regs.rv770.cg_spll_func_cntl; |
927 | u32 spll_func_cntl_2 = |
928 | pi->clk_regs.rv770.cg_spll_func_cntl_2; |
929 | u32 spll_func_cntl_3 = |
930 | pi->clk_regs.rv770.cg_spll_func_cntl_3; |
931 | u32 mclk_pwrmgt_cntl; |
932 | u32 dll_cntl; |
933 | |
934 | table->ACPIState = table->initialState; |
935 | |
936 | table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; |
937 | |
938 | if (pi->acpi_vddc) { |
939 | rv770_populate_vddc_value(rdev, pi->acpi_vddc, |
940 | &table->ACPIState.levels[0].vddc); |
941 | if (pi->pcie_gen2) { |
942 | if (pi->acpi_pcie_gen2) |
943 | table->ACPIState.levels[0].gen2PCIE = 1; |
944 | else |
945 | table->ACPIState.levels[0].gen2PCIE = 0; |
946 | } else |
947 | table->ACPIState.levels[0].gen2PCIE = 0; |
948 | if (pi->acpi_pcie_gen2) |
949 | table->ACPIState.levels[0].gen2XSP = 1; |
950 | else |
951 | table->ACPIState.levels[0].gen2XSP = 0; |
952 | } else { |
953 | rv770_populate_vddc_value(rdev, pi->min_vddc_in_table, |
954 | &table->ACPIState.levels[0].vddc); |
955 | table->ACPIState.levels[0].gen2PCIE = 0; |
956 | } |
957 | |
958 | |
959 | mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN; |
960 | |
961 | mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN; |
962 | |
963 | mclk_pwrmgt_cntl = (MRDCKA0_RESET | |
964 | MRDCKA1_RESET | |
965 | MRDCKB0_RESET | |
966 | MRDCKB1_RESET | |
967 | MRDCKC0_RESET | |
968 | MRDCKC1_RESET | |
969 | MRDCKD0_RESET | |
970 | MRDCKD1_RESET); |
971 | |
972 | dll_cntl = 0xff000000; |
973 | |
974 | spll_func_cntl |= SPLL_RESET | SPLL_SLEEP | SPLL_BYPASS_EN; |
975 | |
976 | spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; |
977 | spll_func_cntl_2 |= SCLK_MUX_SEL(4); |
978 | |
979 | table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); |
980 | table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2); |
981 | table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); |
982 | table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2); |
983 | |
984 | table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); |
985 | table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl); |
986 | |
987 | table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0; |
988 | |
989 | table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl); |
990 | table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2); |
991 | table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3); |
992 | |
993 | table->ACPIState.levels[0].sclk.sclk_value = 0; |
994 | |
995 | rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd); |
996 | |
997 | table->ACPIState.levels[1] = table->ACPIState.levels[0]; |
998 | table->ACPIState.levels[2] = table->ACPIState.levels[0]; |
999 | |
1000 | return 0; |
1001 | } |
1002 | |
1003 | int rv770_populate_initial_mvdd_value(struct radeon_device *rdev, |
1004 | RV770_SMC_VOLTAGE_VALUE *voltage) |
1005 | { |
1006 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1007 | |
1008 | if ((pi->s0_vid_lower_smio_cntl & pi->mvdd_mask_low) == |
1009 | (pi->mvdd_low_smio[MVDD_LOW_INDEX] & pi->mvdd_mask_low) ) { |
1010 | voltage->index = MVDD_LOW_INDEX; |
1011 | voltage->value = cpu_to_be16(MVDD_LOW_VALUE); |
1012 | } else { |
1013 | voltage->index = MVDD_HIGH_INDEX; |
1014 | voltage->value = cpu_to_be16(MVDD_HIGH_VALUE); |
1015 | } |
1016 | |
1017 | return 0; |
1018 | } |
1019 | |
1020 | static int rv770_populate_smc_initial_state(struct radeon_device *rdev, |
1021 | struct radeon_ps *radeon_state, |
1022 | RV770_SMC_STATETABLE *table) |
1023 | { |
1024 | struct rv7xx_ps *initial_state = rv770_get_ps(radeon_state); |
1025 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1026 | u32 a_t; |
1027 | |
1028 | table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = |
1029 | cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl); |
1030 | table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = |
1031 | cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl_2); |
1032 | table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = |
1033 | cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl); |
1034 | table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = |
1035 | cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl_2); |
1036 | table->initialState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = |
1037 | cpu_to_be32(pi->clk_regs.rv770.mclk_pwrmgt_cntl); |
1038 | table->initialState.levels[0].mclk.mclk770.vDLL_CNTL = |
1039 | cpu_to_be32(pi->clk_regs.rv770.dll_cntl); |
1040 | |
1041 | table->initialState.levels[0].mclk.mclk770.vMPLL_SS = |
1042 | cpu_to_be32(pi->clk_regs.rv770.mpll_ss1); |
1043 | table->initialState.levels[0].mclk.mclk770.vMPLL_SS2 = |
1044 | cpu_to_be32(pi->clk_regs.rv770.mpll_ss2); |
1045 | |
1046 | table->initialState.levels[0].mclk.mclk770.mclk_value = |
1047 | cpu_to_be32(initial_state->low.mclk); |
1048 | |
1049 | table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = |
1050 | cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl); |
1051 | table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = |
1052 | cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_2); |
1053 | table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = |
1054 | cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_3); |
1055 | table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM = |
1056 | cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum); |
1057 | table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 = |
1058 | cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum_2); |
1059 | |
1060 | table->initialState.levels[0].sclk.sclk_value = |
1061 | cpu_to_be32(initial_state->low.sclk); |
1062 | |
1063 | table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0; |
1064 | |
1065 | table->initialState.levels[0].seqValue = |
1066 | rv770_get_seq_value(rdev, &initial_state->low); |
1067 | |
1068 | rv770_populate_vddc_value(rdev, |
1069 | initial_state->low.vddc, |
1070 | &table->initialState.levels[0].vddc); |
1071 | rv770_populate_initial_mvdd_value(rdev, |
1072 | &table->initialState.levels[0].mvdd); |
1073 | |
1074 | a_t = CG_R(0xffff) | CG_L(0); |
1075 | table->initialState.levels[0].aT = cpu_to_be32(a_t); |
1076 | |
1077 | table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); |
1078 | |
1079 | if (pi->boot_in_gen2) |
1080 | table->initialState.levels[0].gen2PCIE = 1; |
1081 | else |
1082 | table->initialState.levels[0].gen2PCIE = 0; |
1083 | if (initial_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) |
1084 | table->initialState.levels[0].gen2XSP = 1; |
1085 | else |
1086 | table->initialState.levels[0].gen2XSP = 0; |
1087 | |
1088 | if (rdev->family == CHIP_RV740) { |
1089 | if (pi->mem_gddr5) { |
1090 | if (initial_state->low.mclk <= pi->mclk_strobe_mode_threshold) |
1091 | table->initialState.levels[0].strobeMode = |
1092 | rv740_get_mclk_frequency_ratio(initial_state->low.mclk) | 0x10; |
1093 | else |
1094 | table->initialState.levels[0].strobeMode = 0; |
1095 | |
1096 | if (initial_state->low.mclk >= pi->mclk_edc_enable_threshold) |
1097 | table->initialState.levels[0].mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG; |
1098 | else |
1099 | table->initialState.levels[0].mcFlags = 0; |
1100 | } |
1101 | } |
1102 | |
1103 | table->initialState.levels[1] = table->initialState.levels[0]; |
1104 | table->initialState.levels[2] = table->initialState.levels[0]; |
1105 | |
1106 | table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC; |
1107 | |
1108 | return 0; |
1109 | } |
1110 | |
1111 | static int rv770_populate_smc_vddc_table(struct radeon_device *rdev, |
1112 | RV770_SMC_STATETABLE *table) |
1113 | { |
1114 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1115 | int i; |
1116 | |
1117 | for (i = 0; i < pi->valid_vddc_entries; i++) { |
1118 | table->highSMIO[pi->vddc_table[i].vddc_index] = |
1119 | pi->vddc_table[i].high_smio; |
1120 | table->lowSMIO[pi->vddc_table[i].vddc_index] = |
1121 | cpu_to_be32(pi->vddc_table[i].low_smio); |
1122 | } |
1123 | |
1124 | table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDC] = 0; |
1125 | table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDC] = |
1126 | cpu_to_be32(pi->vddc_mask_low); |
1127 | |
1128 | for (i = 0; |
1129 | ((i < pi->valid_vddc_entries) && |
1130 | (pi->max_vddc_in_table > |
1131 | pi->vddc_table[i].vddc)); |
1132 | i++); |
1133 | |
1134 | table->maxVDDCIndexInPPTable = |
1135 | pi->vddc_table[i].vddc_index; |
1136 | |
1137 | return 0; |
1138 | } |
1139 | |
1140 | static int rv770_populate_smc_mvdd_table(struct radeon_device *rdev, |
1141 | RV770_SMC_STATETABLE *table) |
1142 | { |
1143 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1144 | |
1145 | if (pi->mvdd_control) { |
1146 | table->lowSMIO[MVDD_HIGH_INDEX] |= |
1147 | cpu_to_be32(pi->mvdd_low_smio[MVDD_HIGH_INDEX]); |
1148 | table->lowSMIO[MVDD_LOW_INDEX] |= |
1149 | cpu_to_be32(pi->mvdd_low_smio[MVDD_LOW_INDEX]); |
1150 | |
1151 | table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_MVDD] = 0; |
1152 | table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_MVDD] = |
1153 | cpu_to_be32(pi->mvdd_mask_low); |
1154 | } |
1155 | |
1156 | return 0; |
1157 | } |
1158 | |
1159 | static int rv770_init_smc_table(struct radeon_device *rdev, |
1160 | struct radeon_ps *radeon_boot_state) |
1161 | { |
1162 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1163 | struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state); |
1164 | RV770_SMC_STATETABLE *table = &pi->smc_statetable; |
1165 | int ret; |
1166 | |
1167 | memset(table, 0, sizeof(RV770_SMC_STATETABLE)); |
1168 | |
1169 | pi->boot_sclk = boot_state->low.sclk; |
1170 | |
1171 | rv770_populate_smc_vddc_table(rdev, table); |
1172 | rv770_populate_smc_mvdd_table(rdev, table); |
1173 | |
1174 | switch (rdev->pm.int_thermal_type) { |
1175 | case THERMAL_TYPE_RV770: |
1176 | case THERMAL_TYPE_ADT7473_WITH_INTERNAL: |
1177 | table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; |
1178 | break; |
1179 | case THERMAL_TYPE_NONE: |
1180 | table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; |
1181 | break; |
1182 | case THERMAL_TYPE_EXTERNAL_GPIO: |
1183 | default: |
1184 | table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; |
1185 | break; |
1186 | } |
1187 | |
1188 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) { |
1189 | table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; |
1190 | |
1191 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT) |
1192 | table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK; |
1193 | |
1194 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT) |
1195 | table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE; |
1196 | } |
1197 | |
1198 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) |
1199 | table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; |
1200 | |
1201 | if (pi->mem_gddr5) |
1202 | table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; |
1203 | |
1204 | if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) |
1205 | ret = rv730_populate_smc_initial_state(rdev, radeon_boot_state, table); |
1206 | else |
1207 | ret = rv770_populate_smc_initial_state(rdev, radeon_boot_state, table); |
1208 | if (ret) |
1209 | return ret; |
1210 | |
1211 | if (rdev->family == CHIP_RV740) |
1212 | ret = rv740_populate_smc_acpi_state(rdev, table); |
1213 | else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) |
1214 | ret = rv730_populate_smc_acpi_state(rdev, table); |
1215 | else |
1216 | ret = rv770_populate_smc_acpi_state(rdev, table); |
1217 | if (ret) |
1218 | return ret; |
1219 | |
1220 | table->driverState = table->initialState; |
1221 | |
1222 | return rv770_copy_bytes_to_smc(rdev, |
1223 | pi->state_table_start, |
1224 | (const u8 *)table, |
1225 | sizeof(RV770_SMC_STATETABLE), |
1226 | pi->sram_end); |
1227 | } |
1228 | |
1229 | static int rv770_construct_vddc_table(struct radeon_device *rdev) |
1230 | { |
1231 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1232 | u16 vmin, vmax, step; |
1233 | u32 steps = 0; |
1234 | u8 vddc_index = 0; |
1235 | u32 i; |
1236 | |
1237 | radeon_atom_get_min_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &vmin); |
1238 | radeon_atom_get_max_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &vmax); |
1239 | radeon_atom_get_voltage_step(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &step); |
1240 | |
1241 | steps = (vmax - vmin) / step + 1; |
1242 | |
1243 | if (steps > MAX_NO_VREG_STEPS) |
1244 | return -EINVAL; |
1245 | |
1246 | for (i = 0; i < steps; i++) { |
1247 | u32 gpio_pins, gpio_mask; |
1248 | |
1249 | pi->vddc_table[i].vddc = (u16)(vmin + i * step); |
1250 | radeon_atom_get_voltage_gpio_settings(rdev, |
1251 | pi->vddc_table[i].vddc, |
1252 | SET_VOLTAGE_TYPE_ASIC_VDDC, |
1253 | &gpio_pins, &gpio_mask); |
1254 | pi->vddc_table[i].low_smio = gpio_pins & gpio_mask; |
1255 | pi->vddc_table[i].high_smio = 0; |
1256 | pi->vddc_mask_low = gpio_mask; |
1257 | if (i > 0) { |
1258 | if ((pi->vddc_table[i].low_smio != |
1259 | pi->vddc_table[i - 1].low_smio ) || |
1260 | (pi->vddc_table[i].high_smio != |
1261 | pi->vddc_table[i - 1].high_smio)) |
1262 | vddc_index++; |
1263 | } |
1264 | pi->vddc_table[i].vddc_index = vddc_index; |
1265 | } |
1266 | |
1267 | pi->valid_vddc_entries = (u8)steps; |
1268 | |
1269 | return 0; |
1270 | } |
1271 | |
1272 | static u32 rv770_get_mclk_split_point(struct atom_memory_info *memory_info) |
1273 | { |
1274 | if (memory_info->mem_type == MEM_TYPE_GDDR3) |
1275 | return 30000; |
1276 | |
1277 | return 0; |
1278 | } |
1279 | |
1280 | static int rv770_get_mvdd_pin_configuration(struct radeon_device *rdev) |
1281 | { |
1282 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1283 | u32 gpio_pins, gpio_mask; |
1284 | |
1285 | radeon_atom_get_voltage_gpio_settings(rdev, |
1286 | MVDD_HIGH_VALUE, SET_VOLTAGE_TYPE_ASIC_MVDDC, |
1287 | &gpio_pins, &gpio_mask); |
1288 | pi->mvdd_mask_low = gpio_mask; |
1289 | pi->mvdd_low_smio[MVDD_HIGH_INDEX] = |
1290 | gpio_pins & gpio_mask; |
1291 | |
1292 | radeon_atom_get_voltage_gpio_settings(rdev, |
1293 | MVDD_LOW_VALUE, SET_VOLTAGE_TYPE_ASIC_MVDDC, |
1294 | &gpio_pins, &gpio_mask); |
1295 | pi->mvdd_low_smio[MVDD_LOW_INDEX] = |
1296 | gpio_pins & gpio_mask; |
1297 | |
1298 | return 0; |
1299 | } |
1300 | |
1301 | u8 rv770_get_memory_module_index(struct radeon_device *rdev) |
1302 | { |
1303 | return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff); |
1304 | } |
1305 | |
1306 | static int rv770_get_mvdd_configuration(struct radeon_device *rdev) |
1307 | { |
1308 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1309 | u8 memory_module_index; |
1310 | struct atom_memory_info memory_info; |
1311 | |
1312 | memory_module_index = rv770_get_memory_module_index(rdev); |
1313 | |
1314 | if (radeon_atom_get_memory_info(rdev, memory_module_index, &memory_info)) { |
1315 | pi->mvdd_control = false; |
1316 | return 0; |
1317 | } |
1318 | |
1319 | pi->mvdd_split_frequency = |
1320 | rv770_get_mclk_split_point(&memory_info); |
1321 | |
1322 | if (pi->mvdd_split_frequency == 0) { |
1323 | pi->mvdd_control = false; |
1324 | return 0; |
1325 | } |
1326 | |
1327 | return rv770_get_mvdd_pin_configuration(rdev); |
1328 | } |
1329 | |
1330 | void rv770_enable_voltage_control(struct radeon_device *rdev, |
1331 | bool enable) |
1332 | { |
1333 | if (enable) |
1334 | WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN); |
1335 | else |
1336 | WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN); |
1337 | } |
1338 | |
1339 | static void rv770_program_display_gap(struct radeon_device *rdev) |
1340 | { |
1341 | u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); |
1342 | |
1343 | tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); |
1344 | if (rdev->pm.dpm.new_active_crtcs & 1) { |
1345 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); |
1346 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); |
1347 | } else if (rdev->pm.dpm.new_active_crtcs & 2) { |
1348 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); |
1349 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); |
1350 | } else { |
1351 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); |
1352 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); |
1353 | } |
1354 | WREG32(CG_DISPLAY_GAP_CNTL, tmp); |
1355 | } |
1356 | |
1357 | static void rv770_enable_dynamic_pcie_gen2(struct radeon_device *rdev, |
1358 | bool enable) |
1359 | { |
1360 | rv770_enable_bif_dynamic_pcie_gen2(rdev, enable); |
1361 | |
1362 | if (enable) |
1363 | WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE); |
1364 | else |
1365 | WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE); |
1366 | } |
1367 | |
1368 | static void r7xx_program_memory_timing_parameters(struct radeon_device *rdev, |
1369 | struct radeon_ps *radeon_new_state) |
1370 | { |
1371 | if ((rdev->family == CHIP_RV730) || |
1372 | (rdev->family == CHIP_RV710) || |
1373 | (rdev->family == CHIP_RV740)) |
1374 | rv730_program_memory_timing_parameters(rdev, radeon_new_state); |
1375 | else |
1376 | rv770_program_memory_timing_parameters(rdev, radeon_new_state); |
1377 | } |
1378 | |
1379 | static int rv770_upload_sw_state(struct radeon_device *rdev, |
1380 | struct radeon_ps *radeon_new_state) |
1381 | { |
1382 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1383 | u16 address = pi->state_table_start + |
1384 | offsetof(RV770_SMC_STATETABLE, driverState); |
1385 | RV770_SMC_SWSTATE state = { 0 }; |
1386 | int ret; |
1387 | |
1388 | ret = rv770_convert_power_state_to_smc(rdev, radeon_new_state, &state); |
1389 | if (ret) |
1390 | return ret; |
1391 | |
1392 | return rv770_copy_bytes_to_smc(rdev, address, (const u8 *)&state, |
1393 | sizeof(RV770_SMC_SWSTATE), |
1394 | pi->sram_end); |
1395 | } |
1396 | |
1397 | int rv770_halt_smc(struct radeon_device *rdev) |
1398 | { |
1399 | if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_Halt) != PPSMC_Result_OK) |
1400 | return -EINVAL; |
1401 | |
1402 | if (rv770_wait_for_smc_inactive(rdev) != PPSMC_Result_OK) |
1403 | return -EINVAL; |
1404 | |
1405 | return 0; |
1406 | } |
1407 | |
1408 | int rv770_resume_smc(struct radeon_device *rdev) |
1409 | { |
1410 | if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_Resume) != PPSMC_Result_OK) |
1411 | return -EINVAL; |
1412 | return 0; |
1413 | } |
1414 | |
1415 | int rv770_set_sw_state(struct radeon_device *rdev) |
1416 | { |
1417 | if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK) |
1418 | return -EINVAL; |
1419 | return 0; |
1420 | } |
1421 | |
1422 | int rv770_set_boot_state(struct radeon_device *rdev) |
1423 | { |
1424 | if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToInitialState) != PPSMC_Result_OK) |
1425 | return -EINVAL; |
1426 | return 0; |
1427 | } |
1428 | |
1429 | void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev, |
1430 | struct radeon_ps *new_ps, |
1431 | struct radeon_ps *old_ps) |
1432 | { |
1433 | struct rv7xx_ps *new_state = rv770_get_ps(new_ps); |
1434 | struct rv7xx_ps *current_state = rv770_get_ps(old_ps); |
1435 | |
1436 | if ((new_ps->vclk == old_ps->vclk) && |
1437 | (new_ps->dclk == old_ps->dclk)) |
1438 | return; |
1439 | |
1440 | if (new_state->high.sclk >= current_state->high.sclk) |
1441 | return; |
1442 | |
1443 | radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk); |
1444 | } |
1445 | |
1446 | void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, |
1447 | struct radeon_ps *new_ps, |
1448 | struct radeon_ps *old_ps) |
1449 | { |
1450 | struct rv7xx_ps *new_state = rv770_get_ps(new_ps); |
1451 | struct rv7xx_ps *current_state = rv770_get_ps(old_ps); |
1452 | |
1453 | if ((new_ps->vclk == old_ps->vclk) && |
1454 | (new_ps->dclk == old_ps->dclk)) |
1455 | return; |
1456 | |
1457 | if (new_state->high.sclk < current_state->high.sclk) |
1458 | return; |
1459 | |
1460 | radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk); |
1461 | } |
1462 | |
1463 | int rv770_restrict_performance_levels_before_switch(struct radeon_device *rdev) |
1464 | { |
1465 | if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_NoForcedLevel)) != PPSMC_Result_OK) |
1466 | return -EINVAL; |
1467 | |
1468 | if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_TwoLevelsDisabled)) != PPSMC_Result_OK) |
1469 | return -EINVAL; |
1470 | |
1471 | return 0; |
1472 | } |
1473 | |
1474 | int rv770_dpm_force_performance_level(struct radeon_device *rdev, |
1475 | enum radeon_dpm_forced_level level) |
1476 | { |
1477 | PPSMC_Msg msg; |
1478 | |
1479 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { |
1480 | if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_ZeroLevelsDisabled) != PPSMC_Result_OK) |
1481 | return -EINVAL; |
1482 | msg = PPSMC_MSG_ForceHigh; |
1483 | } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { |
1484 | if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK) |
1485 | return -EINVAL; |
1486 | msg = (PPSMC_Msg)(PPSMC_MSG_TwoLevelsDisabled); |
1487 | } else { |
1488 | if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK) |
1489 | return -EINVAL; |
1490 | msg = (PPSMC_Msg)(PPSMC_MSG_ZeroLevelsDisabled); |
1491 | } |
1492 | |
1493 | if (rv770_send_msg_to_smc(rdev, msg) != PPSMC_Result_OK) |
1494 | return -EINVAL; |
1495 | |
1496 | rdev->pm.dpm.forced_level = level; |
1497 | |
1498 | return 0; |
1499 | } |
1500 | |
1501 | void r7xx_start_smc(struct radeon_device *rdev) |
1502 | { |
1503 | rv770_start_smc(rdev); |
1504 | rv770_start_smc_clock(rdev); |
1505 | } |
1506 | |
1507 | |
1508 | void r7xx_stop_smc(struct radeon_device *rdev) |
1509 | { |
1510 | rv770_reset_smc(rdev); |
1511 | rv770_stop_smc_clock(rdev); |
1512 | } |
1513 | |
1514 | static void rv770_read_clock_registers(struct radeon_device *rdev) |
1515 | { |
1516 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1517 | |
1518 | pi->clk_regs.rv770.cg_spll_func_cntl = |
1519 | RREG32(CG_SPLL_FUNC_CNTL); |
1520 | pi->clk_regs.rv770.cg_spll_func_cntl_2 = |
1521 | RREG32(CG_SPLL_FUNC_CNTL_2); |
1522 | pi->clk_regs.rv770.cg_spll_func_cntl_3 = |
1523 | RREG32(CG_SPLL_FUNC_CNTL_3); |
1524 | pi->clk_regs.rv770.cg_spll_spread_spectrum = |
1525 | RREG32(CG_SPLL_SPREAD_SPECTRUM); |
1526 | pi->clk_regs.rv770.cg_spll_spread_spectrum_2 = |
1527 | RREG32(CG_SPLL_SPREAD_SPECTRUM_2); |
1528 | pi->clk_regs.rv770.mpll_ad_func_cntl = |
1529 | RREG32(MPLL_AD_FUNC_CNTL); |
1530 | pi->clk_regs.rv770.mpll_ad_func_cntl_2 = |
1531 | RREG32(MPLL_AD_FUNC_CNTL_2); |
1532 | pi->clk_regs.rv770.mpll_dq_func_cntl = |
1533 | RREG32(MPLL_DQ_FUNC_CNTL); |
1534 | pi->clk_regs.rv770.mpll_dq_func_cntl_2 = |
1535 | RREG32(MPLL_DQ_FUNC_CNTL_2); |
1536 | pi->clk_regs.rv770.mclk_pwrmgt_cntl = |
1537 | RREG32(MCLK_PWRMGT_CNTL); |
1538 | pi->clk_regs.rv770.dll_cntl = RREG32(DLL_CNTL); |
1539 | } |
1540 | |
1541 | static void r7xx_read_clock_registers(struct radeon_device *rdev) |
1542 | { |
1543 | if (rdev->family == CHIP_RV740) |
1544 | rv740_read_clock_registers(rdev); |
1545 | else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) |
1546 | rv730_read_clock_registers(rdev); |
1547 | else |
1548 | rv770_read_clock_registers(rdev); |
1549 | } |
1550 | |
1551 | void rv770_read_voltage_smio_registers(struct radeon_device *rdev) |
1552 | { |
1553 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1554 | |
1555 | pi->s0_vid_lower_smio_cntl = |
1556 | RREG32(S0_VID_LOWER_SMIO_CNTL); |
1557 | } |
1558 | |
1559 | void rv770_reset_smio_status(struct radeon_device *rdev) |
1560 | { |
1561 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1562 | u32 sw_smio_index, vid_smio_cntl; |
1563 | |
1564 | sw_smio_index = |
1565 | (RREG32(GENERAL_PWRMGT) & SW_SMIO_INDEX_MASK) >> SW_SMIO_INDEX_SHIFT; |
1566 | switch (sw_smio_index) { |
1567 | case 3: |
1568 | vid_smio_cntl = RREG32(S3_VID_LOWER_SMIO_CNTL); |
1569 | break; |
1570 | case 2: |
1571 | vid_smio_cntl = RREG32(S2_VID_LOWER_SMIO_CNTL); |
1572 | break; |
1573 | case 1: |
1574 | vid_smio_cntl = RREG32(S1_VID_LOWER_SMIO_CNTL); |
1575 | break; |
1576 | case 0: |
1577 | return; |
1578 | default: |
1579 | vid_smio_cntl = pi->s0_vid_lower_smio_cntl; |
1580 | break; |
1581 | } |
1582 | |
1583 | WREG32(S0_VID_LOWER_SMIO_CNTL, vid_smio_cntl); |
1584 | WREG32_P(GENERAL_PWRMGT, SW_SMIO_INDEX(0), ~SW_SMIO_INDEX_MASK); |
1585 | } |
1586 | |
1587 | void rv770_get_memory_type(struct radeon_device *rdev) |
1588 | { |
1589 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1590 | u32 tmp; |
1591 | |
1592 | tmp = RREG32(MC_SEQ_MISC0); |
1593 | |
1594 | if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) == |
1595 | MC_SEQ_MISC0_GDDR5_VALUE) |
1596 | pi->mem_gddr5 = true; |
1597 | else |
1598 | pi->mem_gddr5 = false; |
1599 | |
1600 | } |
1601 | |
1602 | void rv770_get_pcie_gen2_status(struct radeon_device *rdev) |
1603 | { |
1604 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1605 | u32 tmp; |
1606 | |
1607 | tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); |
1608 | |
1609 | if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) && |
1610 | (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) |
1611 | pi->pcie_gen2 = true; |
1612 | else |
1613 | pi->pcie_gen2 = false; |
1614 | |
1615 | if (pi->pcie_gen2) { |
1616 | if (tmp & LC_CURRENT_DATA_RATE) |
1617 | pi->boot_in_gen2 = true; |
1618 | else |
1619 | pi->boot_in_gen2 = false; |
1620 | } else |
1621 | pi->boot_in_gen2 = false; |
1622 | } |
1623 | |
1624 | #if 0 |
1625 | static int rv770_enter_ulp_state(struct radeon_device *rdev) |
1626 | { |
1627 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1628 | |
1629 | if (pi->gfx_clock_gating) { |
1630 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN); |
1631 | WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON); |
1632 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON); |
1633 | RREG32(GB_TILING_CONFIG); |
1634 | } |
1635 | |
1636 | WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower), |
1637 | ~HOST_SMC_MSG_MASK); |
1638 | |
1639 | udelay(7000); |
1640 | |
1641 | return 0; |
1642 | } |
1643 | |
1644 | static int rv770_exit_ulp_state(struct radeon_device *rdev) |
1645 | { |
1646 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1647 | int i; |
1648 | |
1649 | WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_ResumeFromMinimumPower), |
1650 | ~HOST_SMC_MSG_MASK); |
1651 | |
1652 | udelay(7000); |
1653 | |
1654 | for (i = 0; i < rdev->usec_timeout; i++) { |
1655 | if (((RREG32(SMC_MSG) & HOST_SMC_RESP_MASK) >> HOST_SMC_RESP_SHIFT) == 1) |
1656 | break; |
1657 | udelay(1000); |
1658 | } |
1659 | |
1660 | if (pi->gfx_clock_gating) |
1661 | WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN); |
1662 | |
1663 | return 0; |
1664 | } |
1665 | #endif |
1666 | |
1667 | static void rv770_get_mclk_odt_threshold(struct radeon_device *rdev) |
1668 | { |
1669 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1670 | u8 memory_module_index; |
1671 | struct atom_memory_info memory_info; |
1672 | |
1673 | pi->mclk_odt_threshold = 0; |
1674 | |
1675 | if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) { |
1676 | memory_module_index = rv770_get_memory_module_index(rdev); |
1677 | |
1678 | if (radeon_atom_get_memory_info(rdev, memory_module_index, &memory_info)) |
1679 | return; |
1680 | |
1681 | if (memory_info.mem_type == MEM_TYPE_DDR2 || |
1682 | memory_info.mem_type == MEM_TYPE_DDR3) |
1683 | pi->mclk_odt_threshold = 30000; |
1684 | } |
1685 | } |
1686 | |
1687 | void rv770_get_max_vddc(struct radeon_device *rdev) |
1688 | { |
1689 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1690 | u16 vddc; |
1691 | |
1692 | if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc)) |
1693 | pi->max_vddc = 0; |
1694 | else |
1695 | pi->max_vddc = vddc; |
1696 | } |
1697 | |
1698 | void rv770_program_response_times(struct radeon_device *rdev) |
1699 | { |
1700 | u32 voltage_response_time, backbias_response_time; |
1701 | u32 acpi_delay_time, vbi_time_out; |
1702 | u32 vddc_dly, bb_dly, acpi_dly, vbi_dly; |
1703 | u32 reference_clock; |
1704 | |
1705 | voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time; |
1706 | backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time; |
1707 | |
1708 | if (voltage_response_time == 0) |
1709 | voltage_response_time = 1000; |
1710 | |
1711 | if (backbias_response_time == 0) |
1712 | backbias_response_time = 1000; |
1713 | |
1714 | acpi_delay_time = 15000; |
1715 | vbi_time_out = 100000; |
1716 | |
1717 | reference_clock = radeon_get_xclk(rdev); |
1718 | |
1719 | vddc_dly = (voltage_response_time * reference_clock) / 1600; |
1720 | bb_dly = (backbias_response_time * reference_clock) / 1600; |
1721 | acpi_dly = (acpi_delay_time * reference_clock) / 1600; |
1722 | vbi_dly = (vbi_time_out * reference_clock) / 1600; |
1723 | |
1724 | rv770_write_smc_soft_register(rdev, |
1725 | RV770_SMC_SOFT_REGISTER_delay_vreg, vddc_dly); |
1726 | rv770_write_smc_soft_register(rdev, |
1727 | RV770_SMC_SOFT_REGISTER_delay_bbias, bb_dly); |
1728 | rv770_write_smc_soft_register(rdev, |
1729 | RV770_SMC_SOFT_REGISTER_delay_acpi, acpi_dly); |
1730 | rv770_write_smc_soft_register(rdev, |
1731 | RV770_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly); |
1732 | #if 0 |
1733 | /* XXX look up hw revision */ |
1734 | if (WEKIVA_A21) |
1735 | rv770_write_smc_soft_register(rdev, |
1736 | RV770_SMC_SOFT_REGISTER_baby_step_timer, |
1737 | 0x10); |
1738 | #endif |
1739 | } |
1740 | |
1741 | static void rv770_program_dcodt_before_state_switch(struct radeon_device *rdev, |
1742 | struct radeon_ps *radeon_new_state, |
1743 | struct radeon_ps *radeon_current_state) |
1744 | { |
1745 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1746 | struct rv7xx_ps *new_state = rv770_get_ps(radeon_new_state); |
1747 | struct rv7xx_ps *current_state = rv770_get_ps(radeon_current_state); |
1748 | bool current_use_dc = false; |
1749 | bool new_use_dc = false; |
1750 | |
1751 | if (pi->mclk_odt_threshold == 0) |
1752 | return; |
1753 | |
1754 | if (current_state->high.mclk <= pi->mclk_odt_threshold) |
1755 | current_use_dc = true; |
1756 | |
1757 | if (new_state->high.mclk <= pi->mclk_odt_threshold) |
1758 | new_use_dc = true; |
1759 | |
1760 | if (current_use_dc == new_use_dc) |
1761 | return; |
1762 | |
1763 | if (!current_use_dc && new_use_dc) |
1764 | return; |
1765 | |
1766 | if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) |
1767 | rv730_program_dcodt(rdev, new_use_dc); |
1768 | } |
1769 | |
1770 | static void rv770_program_dcodt_after_state_switch(struct radeon_device *rdev, |
1771 | struct radeon_ps *radeon_new_state, |
1772 | struct radeon_ps *radeon_current_state) |
1773 | { |
1774 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1775 | struct rv7xx_ps *new_state = rv770_get_ps(radeon_new_state); |
1776 | struct rv7xx_ps *current_state = rv770_get_ps(radeon_current_state); |
1777 | bool current_use_dc = false; |
1778 | bool new_use_dc = false; |
1779 | |
1780 | if (pi->mclk_odt_threshold == 0) |
1781 | return; |
1782 | |
1783 | if (current_state->high.mclk <= pi->mclk_odt_threshold) |
1784 | current_use_dc = true; |
1785 | |
1786 | if (new_state->high.mclk <= pi->mclk_odt_threshold) |
1787 | new_use_dc = true; |
1788 | |
1789 | if (current_use_dc == new_use_dc) |
1790 | return; |
1791 | |
1792 | if (current_use_dc && !new_use_dc) |
1793 | return; |
1794 | |
1795 | if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) |
1796 | rv730_program_dcodt(rdev, new_use_dc); |
1797 | } |
1798 | |
1799 | static void rv770_retrieve_odt_values(struct radeon_device *rdev) |
1800 | { |
1801 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1802 | |
1803 | if (pi->mclk_odt_threshold == 0) |
1804 | return; |
1805 | |
1806 | if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) |
1807 | rv730_get_odt_values(rdev); |
1808 | } |
1809 | |
1810 | static void rv770_set_dpm_event_sources(struct radeon_device *rdev, u32 sources) |
1811 | { |
1812 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1813 | bool want_thermal_protection; |
1814 | enum radeon_dpm_event_src dpm_event_src; |
1815 | |
1816 | switch (sources) { |
1817 | case 0: |
1818 | default: |
1819 | want_thermal_protection = false; |
1820 | break; |
1821 | case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL): |
1822 | want_thermal_protection = true; |
1823 | dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL; |
1824 | break; |
1825 | |
1826 | case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL): |
1827 | want_thermal_protection = true; |
1828 | dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL; |
1829 | break; |
1830 | |
1831 | case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | |
1832 | (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)): |
1833 | want_thermal_protection = true; |
1834 | dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; |
1835 | break; |
1836 | } |
1837 | |
1838 | if (want_thermal_protection) { |
1839 | WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK); |
1840 | if (pi->thermal_protection) |
1841 | WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); |
1842 | } else { |
1843 | WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); |
1844 | } |
1845 | } |
1846 | |
1847 | void rv770_enable_auto_throttle_source(struct radeon_device *rdev, |
1848 | enum radeon_dpm_auto_throttle_src source, |
1849 | bool enable) |
1850 | { |
1851 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1852 | |
1853 | if (enable) { |
1854 | if (!(pi->active_auto_throttle_sources & (1 << source))) { |
1855 | pi->active_auto_throttle_sources |= 1 << source; |
1856 | rv770_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources); |
1857 | } |
1858 | } else { |
1859 | if (pi->active_auto_throttle_sources & (1 << source)) { |
1860 | pi->active_auto_throttle_sources &= ~(1 << source); |
1861 | rv770_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources); |
1862 | } |
1863 | } |
1864 | } |
1865 | |
1866 | static int rv770_set_thermal_temperature_range(struct radeon_device *rdev, |
1867 | int min_temp, int max_temp) |
1868 | { |
1869 | int low_temp = 0 * 1000; |
1870 | int high_temp = 255 * 1000; |
1871 | |
1872 | if (low_temp < min_temp) |
1873 | low_temp = min_temp; |
1874 | if (high_temp > max_temp) |
1875 | high_temp = max_temp; |
1876 | if (high_temp < low_temp) { |
1877 | DRM_ERROR("invalid thermal range: %d - %d\n" , low_temp, high_temp); |
1878 | return -EINVAL; |
1879 | } |
1880 | |
1881 | WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK); |
1882 | WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK); |
1883 | WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK); |
1884 | |
1885 | rdev->pm.dpm.thermal.min_temp = low_temp; |
1886 | rdev->pm.dpm.thermal.max_temp = high_temp; |
1887 | |
1888 | return 0; |
1889 | } |
1890 | |
1891 | int rv770_dpm_enable(struct radeon_device *rdev) |
1892 | { |
1893 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1894 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; |
1895 | int ret; |
1896 | |
1897 | if (pi->gfx_clock_gating) |
1898 | rv770_restore_cgcg(rdev); |
1899 | |
1900 | if (rv770_dpm_enabled(rdev)) |
1901 | return -EINVAL; |
1902 | |
1903 | if (pi->voltage_control) { |
1904 | rv770_enable_voltage_control(rdev, true); |
1905 | ret = rv770_construct_vddc_table(rdev); |
1906 | if (ret) { |
1907 | DRM_ERROR("rv770_construct_vddc_table failed\n" ); |
1908 | return ret; |
1909 | } |
1910 | } |
1911 | |
1912 | if (pi->dcodt) |
1913 | rv770_retrieve_odt_values(rdev); |
1914 | |
1915 | if (pi->mvdd_control) { |
1916 | ret = rv770_get_mvdd_configuration(rdev); |
1917 | if (ret) { |
1918 | DRM_ERROR("rv770_get_mvdd_configuration failed\n" ); |
1919 | return ret; |
1920 | } |
1921 | } |
1922 | |
1923 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS) |
1924 | rv770_enable_backbias(rdev, true); |
1925 | |
1926 | rv770_enable_spread_spectrum(rdev, true); |
1927 | |
1928 | if (pi->thermal_protection) |
1929 | rv770_enable_thermal_protection(rdev, true); |
1930 | |
1931 | rv770_program_mpll_timing_parameters(rdev); |
1932 | rv770_setup_bsp(rdev); |
1933 | rv770_program_git(rdev); |
1934 | rv770_program_tp(rdev); |
1935 | rv770_program_tpp(rdev); |
1936 | rv770_program_sstp(rdev); |
1937 | rv770_program_engine_speed_parameters(rdev); |
1938 | rv770_enable_display_gap(rdev); |
1939 | rv770_program_vc(rdev); |
1940 | |
1941 | if (pi->dynamic_pcie_gen2) |
1942 | rv770_enable_dynamic_pcie_gen2(rdev, true); |
1943 | |
1944 | ret = rv770_upload_firmware(rdev); |
1945 | if (ret) { |
1946 | DRM_ERROR("rv770_upload_firmware failed\n" ); |
1947 | return ret; |
1948 | } |
1949 | ret = rv770_init_smc_table(rdev, boot_ps); |
1950 | if (ret) { |
1951 | DRM_ERROR("rv770_init_smc_table failed\n" ); |
1952 | return ret; |
1953 | } |
1954 | |
1955 | rv770_program_response_times(rdev); |
1956 | r7xx_start_smc(rdev); |
1957 | |
1958 | if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) |
1959 | rv730_start_dpm(rdev); |
1960 | else |
1961 | rv770_start_dpm(rdev); |
1962 | |
1963 | if (pi->gfx_clock_gating) |
1964 | rv770_gfx_clock_gating_enable(rdev, true); |
1965 | |
1966 | if (pi->mg_clock_gating) |
1967 | rv770_mg_clock_gating_enable(rdev, true); |
1968 | |
1969 | rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
1970 | |
1971 | return 0; |
1972 | } |
1973 | |
1974 | int rv770_dpm_late_enable(struct radeon_device *rdev) |
1975 | { |
1976 | int ret; |
1977 | |
1978 | if (rdev->irq.installed && |
1979 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
1980 | PPSMC_Result result; |
1981 | |
1982 | ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); |
1983 | if (ret) |
1984 | return ret; |
1985 | rdev->irq.dpm_thermal = true; |
1986 | radeon_irq_set(rdev); |
1987 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); |
1988 | |
1989 | if (result != PPSMC_Result_OK) |
1990 | DRM_DEBUG_KMS("Could not enable thermal interrupts.\n" ); |
1991 | } |
1992 | |
1993 | return 0; |
1994 | } |
1995 | |
1996 | void rv770_dpm_disable(struct radeon_device *rdev) |
1997 | { |
1998 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
1999 | |
2000 | if (!rv770_dpm_enabled(rdev)) |
2001 | return; |
2002 | |
2003 | rv770_clear_vc(rdev); |
2004 | |
2005 | if (pi->thermal_protection) |
2006 | rv770_enable_thermal_protection(rdev, false); |
2007 | |
2008 | rv770_enable_spread_spectrum(rdev, false); |
2009 | |
2010 | if (pi->dynamic_pcie_gen2) |
2011 | rv770_enable_dynamic_pcie_gen2(rdev, false); |
2012 | |
2013 | if (rdev->irq.installed && |
2014 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
2015 | rdev->irq.dpm_thermal = false; |
2016 | radeon_irq_set(rdev); |
2017 | } |
2018 | |
2019 | if (pi->gfx_clock_gating) |
2020 | rv770_gfx_clock_gating_enable(rdev, false); |
2021 | |
2022 | if (pi->mg_clock_gating) |
2023 | rv770_mg_clock_gating_enable(rdev, false); |
2024 | |
2025 | if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) |
2026 | rv730_stop_dpm(rdev); |
2027 | else |
2028 | rv770_stop_dpm(rdev); |
2029 | |
2030 | r7xx_stop_smc(rdev); |
2031 | rv770_reset_smio_status(rdev); |
2032 | } |
2033 | |
2034 | int rv770_dpm_set_power_state(struct radeon_device *rdev) |
2035 | { |
2036 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
2037 | struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps; |
2038 | struct radeon_ps *old_ps = rdev->pm.dpm.current_ps; |
2039 | int ret; |
2040 | |
2041 | ret = rv770_restrict_performance_levels_before_switch(rdev); |
2042 | if (ret) { |
2043 | DRM_ERROR("rv770_restrict_performance_levels_before_switch failed\n" ); |
2044 | return ret; |
2045 | } |
2046 | rv770_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); |
2047 | ret = rv770_halt_smc(rdev); |
2048 | if (ret) { |
2049 | DRM_ERROR("rv770_halt_smc failed\n" ); |
2050 | return ret; |
2051 | } |
2052 | ret = rv770_upload_sw_state(rdev, new_ps); |
2053 | if (ret) { |
2054 | DRM_ERROR("rv770_upload_sw_state failed\n" ); |
2055 | return ret; |
2056 | } |
2057 | r7xx_program_memory_timing_parameters(rdev, new_ps); |
2058 | if (pi->dcodt) |
2059 | rv770_program_dcodt_before_state_switch(rdev, new_ps, old_ps); |
2060 | ret = rv770_resume_smc(rdev); |
2061 | if (ret) { |
2062 | DRM_ERROR("rv770_resume_smc failed\n" ); |
2063 | return ret; |
2064 | } |
2065 | ret = rv770_set_sw_state(rdev); |
2066 | if (ret) { |
2067 | DRM_ERROR("rv770_set_sw_state failed\n" ); |
2068 | return ret; |
2069 | } |
2070 | if (pi->dcodt) |
2071 | rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps); |
2072 | rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); |
2073 | |
2074 | return 0; |
2075 | } |
2076 | |
2077 | void rv770_dpm_reset_asic(struct radeon_device *rdev) |
2078 | { |
2079 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
2080 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; |
2081 | |
2082 | rv770_restrict_performance_levels_before_switch(rdev); |
2083 | if (pi->dcodt) |
2084 | rv770_program_dcodt_before_state_switch(rdev, boot_ps, boot_ps); |
2085 | rv770_set_boot_state(rdev); |
2086 | if (pi->dcodt) |
2087 | rv770_program_dcodt_after_state_switch(rdev, boot_ps, boot_ps); |
2088 | } |
2089 | |
2090 | void rv770_dpm_setup_asic(struct radeon_device *rdev) |
2091 | { |
2092 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
2093 | |
2094 | r7xx_read_clock_registers(rdev); |
2095 | rv770_read_voltage_smio_registers(rdev); |
2096 | rv770_get_memory_type(rdev); |
2097 | if (pi->dcodt) |
2098 | rv770_get_mclk_odt_threshold(rdev); |
2099 | rv770_get_pcie_gen2_status(rdev); |
2100 | |
2101 | rv770_enable_acpi_pm(rdev); |
2102 | |
2103 | if (radeon_aspm != 0) { |
2104 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s) |
2105 | rv770_enable_l0s(rdev); |
2106 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1) |
2107 | rv770_enable_l1(rdev); |
2108 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1) |
2109 | rv770_enable_pll_sleep_in_l1(rdev); |
2110 | } |
2111 | } |
2112 | |
2113 | void rv770_dpm_display_configuration_changed(struct radeon_device *rdev) |
2114 | { |
2115 | rv770_program_display_gap(rdev); |
2116 | } |
2117 | |
2118 | union power_info { |
2119 | struct _ATOM_POWERPLAY_INFO info; |
2120 | struct _ATOM_POWERPLAY_INFO_V2 info_2; |
2121 | struct _ATOM_POWERPLAY_INFO_V3 info_3; |
2122 | struct _ATOM_PPLIB_POWERPLAYTABLE pplib; |
2123 | struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; |
2124 | struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; |
2125 | }; |
2126 | |
2127 | union pplib_clock_info { |
2128 | struct _ATOM_PPLIB_R600_CLOCK_INFO r600; |
2129 | struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; |
2130 | struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; |
2131 | struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; |
2132 | }; |
2133 | |
2134 | union pplib_power_state { |
2135 | struct _ATOM_PPLIB_STATE v1; |
2136 | struct _ATOM_PPLIB_STATE_V2 v2; |
2137 | }; |
2138 | |
2139 | static void rv7xx_parse_pplib_non_clock_info(struct radeon_device *rdev, |
2140 | struct radeon_ps *rps, |
2141 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, |
2142 | u8 table_rev) |
2143 | { |
2144 | rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); |
2145 | rps->class = le16_to_cpu(non_clock_info->usClassification); |
2146 | rps->class2 = le16_to_cpu(non_clock_info->usClassification2); |
2147 | |
2148 | if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { |
2149 | rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); |
2150 | rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); |
2151 | } else { |
2152 | rps->vclk = 0; |
2153 | rps->dclk = 0; |
2154 | } |
2155 | |
2156 | if (r600_is_uvd_state(rps->class, rps->class2)) { |
2157 | if ((rps->vclk == 0) || (rps->dclk == 0)) { |
2158 | rps->vclk = RV770_DEFAULT_VCLK_FREQ; |
2159 | rps->dclk = RV770_DEFAULT_DCLK_FREQ; |
2160 | } |
2161 | } |
2162 | |
2163 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) |
2164 | rdev->pm.dpm.boot_ps = rps; |
2165 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) |
2166 | rdev->pm.dpm.uvd_ps = rps; |
2167 | } |
2168 | |
2169 | static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev, |
2170 | struct radeon_ps *rps, int index, |
2171 | union pplib_clock_info *clock_info) |
2172 | { |
2173 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
2174 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
2175 | struct rv7xx_ps *ps = rv770_get_ps(rps); |
2176 | u32 sclk, mclk; |
2177 | struct rv7xx_pl *pl; |
2178 | |
2179 | switch (index) { |
2180 | case 0: |
2181 | pl = &ps->low; |
2182 | break; |
2183 | case 1: |
2184 | pl = &ps->medium; |
2185 | break; |
2186 | case 2: |
2187 | default: |
2188 | pl = &ps->high; |
2189 | break; |
2190 | } |
2191 | |
2192 | if (rdev->family >= CHIP_CEDAR) { |
2193 | sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow); |
2194 | sclk |= clock_info->evergreen.ucEngineClockHigh << 16; |
2195 | mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow); |
2196 | mclk |= clock_info->evergreen.ucMemoryClockHigh << 16; |
2197 | |
2198 | pl->vddc = le16_to_cpu(clock_info->evergreen.usVDDC); |
2199 | pl->vddci = le16_to_cpu(clock_info->evergreen.usVDDCI); |
2200 | pl->flags = le32_to_cpu(clock_info->evergreen.ulFlags); |
2201 | } else { |
2202 | sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); |
2203 | sclk |= clock_info->r600.ucEngineClockHigh << 16; |
2204 | mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow); |
2205 | mclk |= clock_info->r600.ucMemoryClockHigh << 16; |
2206 | |
2207 | pl->vddc = le16_to_cpu(clock_info->r600.usVDDC); |
2208 | pl->flags = le32_to_cpu(clock_info->r600.ulFlags); |
2209 | } |
2210 | |
2211 | pl->mclk = mclk; |
2212 | pl->sclk = sclk; |
2213 | |
2214 | /* patch up vddc if necessary */ |
2215 | if (pl->vddc == 0xff01) { |
2216 | if (pi->max_vddc) |
2217 | pl->vddc = pi->max_vddc; |
2218 | } |
2219 | |
2220 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { |
2221 | pi->acpi_vddc = pl->vddc; |
2222 | if (rdev->family >= CHIP_CEDAR) |
2223 | eg_pi->acpi_vddci = pl->vddci; |
2224 | if (ps->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) |
2225 | pi->acpi_pcie_gen2 = true; |
2226 | else |
2227 | pi->acpi_pcie_gen2 = false; |
2228 | } |
2229 | |
2230 | if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) { |
2231 | if (rdev->family >= CHIP_BARTS) { |
2232 | eg_pi->ulv.supported = true; |
2233 | eg_pi->ulv.pl = pl; |
2234 | } |
2235 | } |
2236 | |
2237 | if (pi->min_vddc_in_table > pl->vddc) |
2238 | pi->min_vddc_in_table = pl->vddc; |
2239 | |
2240 | if (pi->max_vddc_in_table < pl->vddc) |
2241 | pi->max_vddc_in_table = pl->vddc; |
2242 | |
2243 | /* patch up boot state */ |
2244 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { |
2245 | u16 vddc, vddci, mvdd; |
2246 | radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd); |
2247 | pl->mclk = rdev->clock.default_mclk; |
2248 | pl->sclk = rdev->clock.default_sclk; |
2249 | pl->vddc = vddc; |
2250 | pl->vddci = vddci; |
2251 | } |
2252 | |
2253 | if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == |
2254 | ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { |
2255 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk; |
2256 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk; |
2257 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc; |
2258 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci; |
2259 | } |
2260 | } |
2261 | |
2262 | int rv7xx_parse_power_table(struct radeon_device *rdev) |
2263 | { |
2264 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
2265 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; |
2266 | union pplib_power_state *power_state; |
2267 | int i, j; |
2268 | union pplib_clock_info *clock_info; |
2269 | union power_info *power_info; |
2270 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
2271 | u16 data_offset; |
2272 | u8 frev, crev; |
2273 | struct rv7xx_ps *ps; |
2274 | |
2275 | if (!atom_parse_data_header(mode_info->atom_context, index, NULL, |
2276 | &frev, &crev, &data_offset)) |
2277 | return -EINVAL; |
2278 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); |
2279 | |
2280 | rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * |
2281 | power_info->pplib.ucNumStates, GFP_KERNEL); |
2282 | if (!rdev->pm.dpm.ps) |
2283 | return -ENOMEM; |
2284 | |
2285 | for (i = 0; i < power_info->pplib.ucNumStates; i++) { |
2286 | power_state = (union pplib_power_state *) |
2287 | (mode_info->atom_context->bios + data_offset + |
2288 | le16_to_cpu(power_info->pplib.usStateArrayOffset) + |
2289 | i * power_info->pplib.ucStateEntrySize); |
2290 | non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) |
2291 | (mode_info->atom_context->bios + data_offset + |
2292 | le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + |
2293 | (power_state->v1.ucNonClockStateIndex * |
2294 | power_info->pplib.ucNonClockSize)); |
2295 | if (power_info->pplib.ucStateEntrySize - 1) { |
2296 | u8 *idx; |
2297 | ps = kzalloc(sizeof(struct rv7xx_ps), GFP_KERNEL); |
2298 | if (ps == NULL) { |
2299 | kfree(rdev->pm.dpm.ps); |
2300 | return -ENOMEM; |
2301 | } |
2302 | rdev->pm.dpm.ps[i].ps_priv = ps; |
2303 | rv7xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], |
2304 | non_clock_info, |
2305 | power_info->pplib.ucNonClockSize); |
2306 | idx = (u8 *)&power_state->v1.ucClockStateIndices[0]; |
2307 | for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { |
2308 | clock_info = (union pplib_clock_info *) |
2309 | (mode_info->atom_context->bios + data_offset + |
2310 | le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + |
2311 | (idx[j] * power_info->pplib.ucClockInfoSize)); |
2312 | rv7xx_parse_pplib_clock_info(rdev, |
2313 | &rdev->pm.dpm.ps[i], j, |
2314 | clock_info); |
2315 | } |
2316 | } |
2317 | } |
2318 | rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates; |
2319 | return 0; |
2320 | } |
2321 | |
2322 | void rv770_get_engine_memory_ss(struct radeon_device *rdev) |
2323 | { |
2324 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
2325 | struct radeon_atom_ss ss; |
2326 | |
2327 | pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, |
2328 | ASIC_INTERNAL_ENGINE_SS, 0); |
2329 | pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, |
2330 | ASIC_INTERNAL_MEMORY_SS, 0); |
2331 | |
2332 | /* disable ss, causes hangs on some cayman boards */ |
2333 | if (rdev->family == CHIP_CAYMAN) { |
2334 | pi->sclk_ss = false; |
2335 | pi->mclk_ss = false; |
2336 | } |
2337 | |
2338 | if (pi->sclk_ss || pi->mclk_ss) |
2339 | pi->dynamic_ss = true; |
2340 | else |
2341 | pi->dynamic_ss = false; |
2342 | } |
2343 | |
2344 | int rv770_dpm_init(struct radeon_device *rdev) |
2345 | { |
2346 | struct rv7xx_power_info *pi; |
2347 | struct atom_clock_dividers dividers; |
2348 | int ret; |
2349 | |
2350 | pi = kzalloc(sizeof(struct rv7xx_power_info), GFP_KERNEL); |
2351 | if (pi == NULL) |
2352 | return -ENOMEM; |
2353 | rdev->pm.dpm.priv = pi; |
2354 | |
2355 | rv770_get_max_vddc(rdev); |
2356 | |
2357 | pi->acpi_vddc = 0; |
2358 | pi->min_vddc_in_table = 0; |
2359 | pi->max_vddc_in_table = 0; |
2360 | |
2361 | ret = r600_get_platform_caps(rdev); |
2362 | if (ret) |
2363 | return ret; |
2364 | |
2365 | ret = rv7xx_parse_power_table(rdev); |
2366 | if (ret) |
2367 | return ret; |
2368 | |
2369 | if (rdev->pm.dpm.voltage_response_time == 0) |
2370 | rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT; |
2371 | if (rdev->pm.dpm.backbias_response_time == 0) |
2372 | rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT; |
2373 | |
2374 | ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
2375 | 0, false, ÷rs); |
2376 | if (ret) |
2377 | pi->ref_div = dividers.ref_div + 1; |
2378 | else |
2379 | pi->ref_div = R600_REFERENCEDIVIDER_DFLT; |
2380 | |
2381 | pi->mclk_strobe_mode_threshold = 30000; |
2382 | pi->mclk_edc_enable_threshold = 30000; |
2383 | |
2384 | pi->rlp = RV770_RLP_DFLT; |
2385 | pi->rmp = RV770_RMP_DFLT; |
2386 | pi->lhp = RV770_LHP_DFLT; |
2387 | pi->lmp = RV770_LMP_DFLT; |
2388 | |
2389 | pi->voltage_control = |
2390 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0); |
2391 | |
2392 | pi->mvdd_control = |
2393 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0); |
2394 | |
2395 | rv770_get_engine_memory_ss(rdev); |
2396 | |
2397 | pi->asi = RV770_ASI_DFLT; |
2398 | pi->pasi = RV770_HASI_DFLT; |
2399 | pi->vrc = RV770_VRC_DFLT; |
2400 | |
2401 | pi->power_gating = false; |
2402 | |
2403 | pi->gfx_clock_gating = true; |
2404 | |
2405 | pi->mg_clock_gating = true; |
2406 | pi->mgcgtssm = true; |
2407 | |
2408 | pi->dynamic_pcie_gen2 = true; |
2409 | |
2410 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
2411 | pi->thermal_protection = true; |
2412 | else |
2413 | pi->thermal_protection = false; |
2414 | |
2415 | pi->display_gap = true; |
2416 | |
2417 | if (rdev->flags & RADEON_IS_MOBILITY) |
2418 | pi->dcodt = true; |
2419 | else |
2420 | pi->dcodt = false; |
2421 | |
2422 | pi->ulps = true; |
2423 | |
2424 | pi->mclk_stutter_mode_threshold = 0; |
2425 | |
2426 | pi->sram_end = SMC_RAM_END; |
2427 | pi->state_table_start = RV770_SMC_TABLE_ADDRESS; |
2428 | pi->soft_regs_start = RV770_SMC_SOFT_REGISTERS_START; |
2429 | |
2430 | return 0; |
2431 | } |
2432 | |
2433 | void rv770_dpm_print_power_state(struct radeon_device *rdev, |
2434 | struct radeon_ps *rps) |
2435 | { |
2436 | struct rv7xx_ps *ps = rv770_get_ps(rps); |
2437 | struct rv7xx_pl *pl; |
2438 | |
2439 | r600_dpm_print_class_info(rps->class, rps->class2); |
2440 | r600_dpm_print_cap_info(rps->caps); |
2441 | printk("\tuvd vclk: %d dclk: %d\n" , rps->vclk, rps->dclk); |
2442 | if (rdev->family >= CHIP_CEDAR) { |
2443 | pl = &ps->low; |
2444 | printk("\t\tpower level 0 sclk: %u mclk: %u vddc: %u vddci: %u\n" , |
2445 | pl->sclk, pl->mclk, pl->vddc, pl->vddci); |
2446 | pl = &ps->medium; |
2447 | printk("\t\tpower level 1 sclk: %u mclk: %u vddc: %u vddci: %u\n" , |
2448 | pl->sclk, pl->mclk, pl->vddc, pl->vddci); |
2449 | pl = &ps->high; |
2450 | printk("\t\tpower level 2 sclk: %u mclk: %u vddc: %u vddci: %u\n" , |
2451 | pl->sclk, pl->mclk, pl->vddc, pl->vddci); |
2452 | } else { |
2453 | pl = &ps->low; |
2454 | printk("\t\tpower level 0 sclk: %u mclk: %u vddc: %u\n" , |
2455 | pl->sclk, pl->mclk, pl->vddc); |
2456 | pl = &ps->medium; |
2457 | printk("\t\tpower level 1 sclk: %u mclk: %u vddc: %u\n" , |
2458 | pl->sclk, pl->mclk, pl->vddc); |
2459 | pl = &ps->high; |
2460 | printk("\t\tpower level 2 sclk: %u mclk: %u vddc: %u\n" , |
2461 | pl->sclk, pl->mclk, pl->vddc); |
2462 | } |
2463 | r600_dpm_print_ps_status(rdev, rps); |
2464 | } |
2465 | |
2466 | #ifdef CONFIG_DEBUG_FS |
2467 | void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
2468 | struct seq_file *m) |
2469 | { |
2470 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; |
2471 | struct rv7xx_ps *ps = rv770_get_ps(rps); |
2472 | struct rv7xx_pl *pl; |
2473 | u32 current_index = |
2474 | (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> |
2475 | CURRENT_PROFILE_INDEX_SHIFT; |
2476 | |
2477 | if (current_index > 2) { |
2478 | seq_printf(m, "invalid dpm profile %d\n" , current_index); |
2479 | } else { |
2480 | if (current_index == 0) |
2481 | pl = &ps->low; |
2482 | else if (current_index == 1) |
2483 | pl = &ps->medium; |
2484 | else /* current_index == 2 */ |
2485 | pl = &ps->high; |
2486 | seq_printf(m, "uvd vclk: %d dclk: %d\n" , rps->vclk, rps->dclk); |
2487 | if (rdev->family >= CHIP_CEDAR) { |
2488 | seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n" , |
2489 | current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci); |
2490 | } else { |
2491 | seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n" , |
2492 | current_index, pl->sclk, pl->mclk, pl->vddc); |
2493 | } |
2494 | } |
2495 | } |
2496 | #endif |
2497 | |
2498 | void rv770_dpm_fini(struct radeon_device *rdev) |
2499 | { |
2500 | int i; |
2501 | |
2502 | for (i = 0; i < rdev->pm.dpm.num_ps; i++) { |
2503 | kfree(rdev->pm.dpm.ps[i].ps_priv); |
2504 | } |
2505 | kfree(rdev->pm.dpm.ps); |
2506 | kfree(rdev->pm.dpm.priv); |
2507 | } |
2508 | |
2509 | u32 rv770_dpm_get_sclk(struct radeon_device *rdev, bool low) |
2510 | { |
2511 | struct rv7xx_ps *requested_state = rv770_get_ps(rdev->pm.dpm.requested_ps); |
2512 | |
2513 | if (low) |
2514 | return requested_state->low.sclk; |
2515 | else |
2516 | return requested_state->high.sclk; |
2517 | } |
2518 | |
2519 | u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low) |
2520 | { |
2521 | struct rv7xx_ps *requested_state = rv770_get_ps(rdev->pm.dpm.requested_ps); |
2522 | |
2523 | if (low) |
2524 | return requested_state->low.mclk; |
2525 | else |
2526 | return requested_state->high.mclk; |
2527 | } |
2528 | |
2529 | bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) |
2530 | { |
2531 | u32 vblank_time = r600_dpm_get_vblank_time(rdev); |
2532 | u32 switch_limit = 200; /* 300 */ |
2533 | |
2534 | /* RV770 */ |
2535 | /* mclk switching doesn't seem to work reliably on desktop RV770s */ |
2536 | if ((rdev->family == CHIP_RV770) && |
2537 | !(rdev->flags & RADEON_IS_MOBILITY)) |
2538 | switch_limit = 0xffffffff; /* disable mclk switching */ |
2539 | |
2540 | if (vblank_time < switch_limit) |
2541 | return true; |
2542 | else |
2543 | return false; |
2544 | |
2545 | } |
2546 | |