1/*
2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 * Alex Deucher <alexdeucher@gmail.com>
22 */
23#include <drm/drmP.h>
24#include "radeon.h"
25#include "avivod.h"
26#include "atom.h"
27#include <linux/power_supply.h>
28#include <linux/hwmon.h>
29#include <linux/hwmon-sysfs.h>
30
31#define RADEON_IDLE_LOOP_MS 100
32#define RADEON_RECLOCK_DELAY_MS 200
33#define RADEON_WAIT_VBLANK_TIMEOUT 200
34
35static const char *radeon_pm_state_type_name[5] = {
36 "",
37 "Powersave",
38 "Battery",
39 "Balanced",
40 "Performance",
41};
42
43static void radeon_dynpm_idle_work_handler(struct work_struct *work);
44static int radeon_debugfs_pm_init(struct radeon_device *rdev);
45static bool radeon_pm_in_vbl(struct radeon_device *rdev);
46static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
47static void radeon_pm_update_profile(struct radeon_device *rdev);
48static void radeon_pm_set_clocks(struct radeon_device *rdev);
49
50int radeon_pm_get_type_index(struct radeon_device *rdev,
51 enum radeon_pm_state_type ps_type,
52 int instance)
53{
54 int i;
55 int found_instance = -1;
56
57 for (i = 0; i < rdev->pm.num_power_states; i++) {
58 if (rdev->pm.power_state[i].type == ps_type) {
59 found_instance++;
60 if (found_instance == instance)
61 return i;
62 }
63 }
64 /* return default if no match */
65 return rdev->pm.default_power_state_index;
66}
67
68void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
69{
70 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
71 mutex_lock(&rdev->pm.mutex);
72 if (power_supply_is_system_supplied() > 0)
73 rdev->pm.dpm.ac_power = true;
74 else
75 rdev->pm.dpm.ac_power = false;
76 if (rdev->asic->dpm.enable_bapm)
77 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
78 mutex_unlock(&rdev->pm.mutex);
79 } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
80 if (rdev->pm.profile == PM_PROFILE_AUTO) {
81 mutex_lock(&rdev->pm.mutex);
82 radeon_pm_update_profile(rdev);
83 radeon_pm_set_clocks(rdev);
84 mutex_unlock(&rdev->pm.mutex);
85 }
86 }
87}
88
89static void radeon_pm_update_profile(struct radeon_device *rdev)
90{
91 switch (rdev->pm.profile) {
92 case PM_PROFILE_DEFAULT:
93 rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
94 break;
95 case PM_PROFILE_AUTO:
96 if (power_supply_is_system_supplied() > 0) {
97 if (rdev->pm.active_crtc_count > 1)
98 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
99 else
100 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
101 } else {
102 if (rdev->pm.active_crtc_count > 1)
103 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
104 else
105 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
106 }
107 break;
108 case PM_PROFILE_LOW:
109 if (rdev->pm.active_crtc_count > 1)
110 rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
111 else
112 rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
113 break;
114 case PM_PROFILE_MID:
115 if (rdev->pm.active_crtc_count > 1)
116 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
117 else
118 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
119 break;
120 case PM_PROFILE_HIGH:
121 if (rdev->pm.active_crtc_count > 1)
122 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
123 else
124 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
125 break;
126 }
127
128 if (rdev->pm.active_crtc_count == 0) {
129 rdev->pm.requested_power_state_index =
130 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
131 rdev->pm.requested_clock_mode_index =
132 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
133 } else {
134 rdev->pm.requested_power_state_index =
135 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
136 rdev->pm.requested_clock_mode_index =
137 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
138 }
139}
140
141static void radeon_unmap_vram_bos(struct radeon_device *rdev)
142{
143 struct radeon_bo *bo, *n;
144
145 if (list_empty(&rdev->gem.objects))
146 return;
147
148 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
149 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
150 ttm_bo_unmap_virtual(&bo->tbo);
151 }
152}
153
154static void radeon_sync_with_vblank(struct radeon_device *rdev)
155{
156 if (rdev->pm.active_crtcs) {
157#ifdef __NetBSD__
158 int ret __unused;
159
160 spin_lock(&rdev->irq.vblank_lock);
161 rdev->pm.vblank_sync = false;
162 DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, &rdev->irq.vblank_queue,
163 &rdev->irq.vblank_lock,
164 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT),
165 rdev->pm.vblank_sync);
166 spin_unlock(&rdev->irq.vblank_lock);
167#else
168 rdev->pm.vblank_sync = false;
169 wait_event_timeout(
170 rdev->irq.vblank_queue, rdev->pm.vblank_sync,
171 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
172#endif
173 }
174}
175
176static void radeon_set_power_state(struct radeon_device *rdev)
177{
178 u32 sclk, mclk;
179 bool misc_after = false;
180
181 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
182 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
183 return;
184
185 if (radeon_gui_idle(rdev)) {
186 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
187 clock_info[rdev->pm.requested_clock_mode_index].sclk;
188 if (sclk > rdev->pm.default_sclk)
189 sclk = rdev->pm.default_sclk;
190
191 /* starting with BTC, there is one state that is used for both
192 * MH and SH. Difference is that we always use the high clock index for
193 * mclk and vddci.
194 */
195 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
196 (rdev->family >= CHIP_BARTS) &&
197 rdev->pm.active_crtc_count &&
198 ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
199 (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
200 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
201 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
202 else
203 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
204 clock_info[rdev->pm.requested_clock_mode_index].mclk;
205
206 if (mclk > rdev->pm.default_mclk)
207 mclk = rdev->pm.default_mclk;
208
209 /* upvolt before raising clocks, downvolt after lowering clocks */
210 if (sclk < rdev->pm.current_sclk)
211 misc_after = true;
212
213 radeon_sync_with_vblank(rdev);
214
215 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
216 if (!radeon_pm_in_vbl(rdev))
217 return;
218 }
219
220 radeon_pm_prepare(rdev);
221
222 if (!misc_after)
223 /* voltage, pcie lanes, etc.*/
224 radeon_pm_misc(rdev);
225
226 /* set engine clock */
227 if (sclk != rdev->pm.current_sclk) {
228 radeon_pm_debug_check_in_vbl(rdev, false);
229 radeon_set_engine_clock(rdev, sclk);
230 radeon_pm_debug_check_in_vbl(rdev, true);
231 rdev->pm.current_sclk = sclk;
232 DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
233 }
234
235 /* set memory clock */
236 if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
237 radeon_pm_debug_check_in_vbl(rdev, false);
238 radeon_set_memory_clock(rdev, mclk);
239 radeon_pm_debug_check_in_vbl(rdev, true);
240 rdev->pm.current_mclk = mclk;
241 DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
242 }
243
244 if (misc_after)
245 /* voltage, pcie lanes, etc.*/
246 radeon_pm_misc(rdev);
247
248 radeon_pm_finish(rdev);
249
250 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
251 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
252 } else
253 DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
254}
255
256static void radeon_pm_set_clocks(struct radeon_device *rdev)
257{
258 int i, r;
259
260 /* no need to take locks, etc. if nothing's going to change */
261 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
262 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
263 return;
264
265 mutex_lock(&rdev->ddev->struct_mutex);
266 down_write(&rdev->pm.mclk_lock);
267 mutex_lock(&rdev->ring_lock);
268
269 /* wait for the rings to drain */
270 for (i = 0; i < RADEON_NUM_RINGS; i++) {
271 struct radeon_ring *ring = &rdev->ring[i];
272 if (!ring->ready) {
273 continue;
274 }
275 r = radeon_fence_wait_empty(rdev, i);
276 if (r) {
277 /* needs a GPU reset dont reset here */
278 mutex_unlock(&rdev->ring_lock);
279 up_write(&rdev->pm.mclk_lock);
280 mutex_unlock(&rdev->ddev->struct_mutex);
281 return;
282 }
283 }
284
285 radeon_unmap_vram_bos(rdev);
286
287 if (rdev->irq.installed) {
288 for (i = 0; i < rdev->num_crtc; i++) {
289 if (rdev->pm.active_crtcs & (1 << i)) {
290 rdev->pm.req_vblank |= (1 << i);
291 drm_vblank_get(rdev->ddev, i);
292 }
293 }
294 }
295
296 radeon_set_power_state(rdev);
297
298 if (rdev->irq.installed) {
299 for (i = 0; i < rdev->num_crtc; i++) {
300 if (rdev->pm.req_vblank & (1 << i)) {
301 rdev->pm.req_vblank &= ~(1 << i);
302 drm_vblank_put(rdev->ddev, i);
303 }
304 }
305 }
306
307 /* update display watermarks based on new power state */
308 radeon_update_bandwidth_info(rdev);
309 if (rdev->pm.active_crtc_count)
310 radeon_bandwidth_update(rdev);
311
312 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
313
314 mutex_unlock(&rdev->ring_lock);
315 up_write(&rdev->pm.mclk_lock);
316 mutex_unlock(&rdev->ddev->struct_mutex);
317}
318
319static void radeon_pm_print_states(struct radeon_device *rdev)
320{
321 int i, j;
322 struct radeon_power_state *power_state;
323 struct radeon_pm_clock_info *clock_info;
324
325 DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
326 for (i = 0; i < rdev->pm.num_power_states; i++) {
327 power_state = &rdev->pm.power_state[i];
328 DRM_DEBUG_DRIVER("State %d: %s\n", i,
329 radeon_pm_state_type_name[power_state->type]);
330 if (i == rdev->pm.default_power_state_index)
331 DRM_DEBUG_DRIVER("\tDefault");
332 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
333 DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
334 if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
335 DRM_DEBUG_DRIVER("\tSingle display only\n");
336 DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
337 for (j = 0; j < power_state->num_clock_modes; j++) {
338 clock_info = &(power_state->clock_info[j]);
339 if (rdev->flags & RADEON_IS_IGP)
340 DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
341 j,
342 clock_info->sclk * 10);
343 else
344 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
345 j,
346 clock_info->sclk * 10,
347 clock_info->mclk * 10,
348 clock_info->voltage.voltage);
349 }
350 }
351}
352
353#ifndef __NetBSD__ /* XXX radeon power */
354static ssize_t radeon_get_pm_profile(struct device *dev,
355 struct device_attribute *attr,
356 char *buf)
357{
358 struct drm_device *ddev = dev_get_drvdata(dev);
359 struct radeon_device *rdev = ddev->dev_private;
360 int cp = rdev->pm.profile;
361
362 return snprintf(buf, PAGE_SIZE, "%s\n",
363 (cp == PM_PROFILE_AUTO) ? "auto" :
364 (cp == PM_PROFILE_LOW) ? "low" :
365 (cp == PM_PROFILE_MID) ? "mid" :
366 (cp == PM_PROFILE_HIGH) ? "high" : "default");
367}
368
369static ssize_t radeon_set_pm_profile(struct device *dev,
370 struct device_attribute *attr,
371 const char *buf,
372 size_t count)
373{
374 struct drm_device *ddev = dev_get_drvdata(dev);
375 struct radeon_device *rdev = ddev->dev_private;
376
377 /* Can't set profile when the card is off */
378 if ((rdev->flags & RADEON_IS_PX) &&
379 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
380 return -EINVAL;
381
382 mutex_lock(&rdev->pm.mutex);
383 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
384 if (strncmp("default", buf, strlen("default")) == 0)
385 rdev->pm.profile = PM_PROFILE_DEFAULT;
386 else if (strncmp("auto", buf, strlen("auto")) == 0)
387 rdev->pm.profile = PM_PROFILE_AUTO;
388 else if (strncmp("low", buf, strlen("low")) == 0)
389 rdev->pm.profile = PM_PROFILE_LOW;
390 else if (strncmp("mid", buf, strlen("mid")) == 0)
391 rdev->pm.profile = PM_PROFILE_MID;
392 else if (strncmp("high", buf, strlen("high")) == 0)
393 rdev->pm.profile = PM_PROFILE_HIGH;
394 else {
395 count = -EINVAL;
396 goto fail;
397 }
398 radeon_pm_update_profile(rdev);
399 radeon_pm_set_clocks(rdev);
400 } else
401 count = -EINVAL;
402
403fail:
404 mutex_unlock(&rdev->pm.mutex);
405
406 return count;
407}
408
409static ssize_t radeon_get_pm_method(struct device *dev,
410 struct device_attribute *attr,
411 char *buf)
412{
413 struct drm_device *ddev = dev_get_drvdata(dev);
414 struct radeon_device *rdev = ddev->dev_private;
415 int pm = rdev->pm.pm_method;
416
417 return snprintf(buf, PAGE_SIZE, "%s\n",
418 (pm == PM_METHOD_DYNPM) ? "dynpm" :
419 (pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
420}
421
422static ssize_t radeon_set_pm_method(struct device *dev,
423 struct device_attribute *attr,
424 const char *buf,
425 size_t count)
426{
427 struct drm_device *ddev = dev_get_drvdata(dev);
428 struct radeon_device *rdev = ddev->dev_private;
429
430 /* Can't set method when the card is off */
431 if ((rdev->flags & RADEON_IS_PX) &&
432 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
433 count = -EINVAL;
434 goto fail;
435 }
436
437 /* we don't support the legacy modes with dpm */
438 if (rdev->pm.pm_method == PM_METHOD_DPM) {
439 count = -EINVAL;
440 goto fail;
441 }
442
443 if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
444 mutex_lock(&rdev->pm.mutex);
445 rdev->pm.pm_method = PM_METHOD_DYNPM;
446 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
447 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
448 mutex_unlock(&rdev->pm.mutex);
449 } else if (strncmp("profile", buf, strlen("profile")) == 0) {
450 mutex_lock(&rdev->pm.mutex);
451 /* disable dynpm */
452 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
453 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
454 rdev->pm.pm_method = PM_METHOD_PROFILE;
455 mutex_unlock(&rdev->pm.mutex);
456 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
457 } else {
458 count = -EINVAL;
459 goto fail;
460 }
461 radeon_pm_compute_clocks(rdev);
462fail:
463 return count;
464}
465
466static ssize_t radeon_get_dpm_state(struct device *dev,
467 struct device_attribute *attr,
468 char *buf)
469{
470 struct drm_device *ddev = dev_get_drvdata(dev);
471 struct radeon_device *rdev = ddev->dev_private;
472 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
473
474 if ((rdev->flags & RADEON_IS_PX) &&
475 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
476 return snprintf(buf, PAGE_SIZE, "off\n");
477
478 return snprintf(buf, PAGE_SIZE, "%s\n",
479 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
480 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
481}
482
483static ssize_t radeon_set_dpm_state(struct device *dev,
484 struct device_attribute *attr,
485 const char *buf,
486 size_t count)
487{
488 struct drm_device *ddev = dev_get_drvdata(dev);
489 struct radeon_device *rdev = ddev->dev_private;
490
491 /* Can't set dpm state when the card is off */
492 if ((rdev->flags & RADEON_IS_PX) &&
493 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
494 return -EINVAL;
495
496 mutex_lock(&rdev->pm.mutex);
497 if (strncmp("battery", buf, strlen("battery")) == 0)
498 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
499 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
500 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
501 else if (strncmp("performance", buf, strlen("performance")) == 0)
502 rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
503 else {
504 mutex_unlock(&rdev->pm.mutex);
505 count = -EINVAL;
506 goto fail;
507 }
508 mutex_unlock(&rdev->pm.mutex);
509 radeon_pm_compute_clocks(rdev);
510fail:
511 return count;
512}
513
514static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
515 struct device_attribute *attr,
516 char *buf)
517{
518 struct drm_device *ddev = dev_get_drvdata(dev);
519 struct radeon_device *rdev = ddev->dev_private;
520 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
521
522 if ((rdev->flags & RADEON_IS_PX) &&
523 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
524 return snprintf(buf, PAGE_SIZE, "off\n");
525
526 return snprintf(buf, PAGE_SIZE, "%s\n",
527 (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
528 (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
529}
530
531static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
532 struct device_attribute *attr,
533 const char *buf,
534 size_t count)
535{
536 struct drm_device *ddev = dev_get_drvdata(dev);
537 struct radeon_device *rdev = ddev->dev_private;
538 enum radeon_dpm_forced_level level;
539 int ret = 0;
540
541 /* Can't force performance level when the card is off */
542 if ((rdev->flags & RADEON_IS_PX) &&
543 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
544 return -EINVAL;
545
546 mutex_lock(&rdev->pm.mutex);
547 if (strncmp("low", buf, strlen("low")) == 0) {
548 level = RADEON_DPM_FORCED_LEVEL_LOW;
549 } else if (strncmp("high", buf, strlen("high")) == 0) {
550 level = RADEON_DPM_FORCED_LEVEL_HIGH;
551 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
552 level = RADEON_DPM_FORCED_LEVEL_AUTO;
553 } else {
554 count = -EINVAL;
555 goto fail;
556 }
557 if (rdev->asic->dpm.force_performance_level) {
558 if (rdev->pm.dpm.thermal_active) {
559 count = -EINVAL;
560 goto fail;
561 }
562 ret = radeon_dpm_force_performance_level(rdev, level);
563 if (ret)
564 count = -EINVAL;
565 }
566fail:
567 mutex_unlock(&rdev->pm.mutex);
568
569 return count;
570}
571
572static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
573static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
574static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
575static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
576 radeon_get_dpm_forced_performance_level,
577 radeon_set_dpm_forced_performance_level);
578#endif
579
580#ifndef __NetBSD__ /* XXX radeon hwmon */
581static ssize_t radeon_hwmon_show_temp(struct device *dev,
582 struct device_attribute *attr,
583 char *buf)
584{
585 struct radeon_device *rdev = dev_get_drvdata(dev);
586 struct drm_device *ddev = rdev->ddev;
587 int temp;
588
589 /* Can't get temperature when the card is off */
590 if ((rdev->flags & RADEON_IS_PX) &&
591 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
592 return -EINVAL;
593
594 if (rdev->asic->pm.get_temperature)
595 temp = radeon_get_temperature(rdev);
596 else
597 temp = 0;
598
599 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
600}
601
602static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
603 struct device_attribute *attr,
604 char *buf)
605{
606 struct radeon_device *rdev = dev_get_drvdata(dev);
607 int hyst = to_sensor_dev_attr(attr)->index;
608 int temp;
609
610 if (hyst)
611 temp = rdev->pm.dpm.thermal.min_temp;
612 else
613 temp = rdev->pm.dpm.thermal.max_temp;
614
615 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
616}
617
618static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
619static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
620static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
621
622static struct attribute *hwmon_attributes[] = {
623 &sensor_dev_attr_temp1_input.dev_attr.attr,
624 &sensor_dev_attr_temp1_crit.dev_attr.attr,
625 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
626 NULL
627};
628
629static umode_t hwmon_attributes_visible(struct kobject *kobj,
630 struct attribute *attr, int index)
631{
632 struct device *dev = container_of(kobj, struct device, kobj);
633 struct radeon_device *rdev = dev_get_drvdata(dev);
634
635 /* Skip limit attributes if DPM is not enabled */
636 if (rdev->pm.pm_method != PM_METHOD_DPM &&
637 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
638 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
639 return 0;
640
641 return attr->mode;
642}
643
644static const struct attribute_group hwmon_attrgroup = {
645 .attrs = hwmon_attributes,
646 .is_visible = hwmon_attributes_visible,
647};
648
649static const struct attribute_group *hwmon_groups[] = {
650 &hwmon_attrgroup,
651 NULL
652};
653#endif
654
655static int radeon_hwmon_init(struct radeon_device *rdev)
656{
657 int err = 0;
658
659#ifndef __NetBSD__ /* XXX radeon hwmon */
660 switch (rdev->pm.int_thermal_type) {
661 case THERMAL_TYPE_RV6XX:
662 case THERMAL_TYPE_RV770:
663 case THERMAL_TYPE_EVERGREEN:
664 case THERMAL_TYPE_NI:
665 case THERMAL_TYPE_SUMO:
666 case THERMAL_TYPE_SI:
667 case THERMAL_TYPE_CI:
668 case THERMAL_TYPE_KV:
669 if (rdev->asic->pm.get_temperature == NULL)
670 return err;
671 rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
672 "radeon", rdev,
673 hwmon_groups);
674 if (IS_ERR(rdev->pm.int_hwmon_dev)) {
675 err = PTR_ERR(rdev->pm.int_hwmon_dev);
676 dev_err(rdev->dev,
677 "Unable to register hwmon device: %d\n", err);
678 }
679 break;
680 default:
681 break;
682 }
683#endif
684
685 return err;
686}
687
688static void radeon_hwmon_fini(struct radeon_device *rdev)
689{
690#ifndef __NetBSD__ /* XXX radeon hwmon */
691 if (rdev->pm.int_hwmon_dev)
692 hwmon_device_unregister(rdev->pm.int_hwmon_dev);
693#endif
694}
695
696static void radeon_dpm_thermal_work_handler(struct work_struct *work)
697{
698 struct radeon_device *rdev =
699 container_of(work, struct radeon_device,
700 pm.dpm.thermal.work);
701 /* switch to the thermal state */
702 enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
703
704 if (!rdev->pm.dpm_enabled)
705 return;
706
707 if (rdev->asic->pm.get_temperature) {
708 int temp = radeon_get_temperature(rdev);
709
710 if (temp < rdev->pm.dpm.thermal.min_temp)
711 /* switch back the user state */
712 dpm_state = rdev->pm.dpm.user_state;
713 } else {
714 if (rdev->pm.dpm.thermal.high_to_low)
715 /* switch back the user state */
716 dpm_state = rdev->pm.dpm.user_state;
717 }
718 mutex_lock(&rdev->pm.mutex);
719 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
720 rdev->pm.dpm.thermal_active = true;
721 else
722 rdev->pm.dpm.thermal_active = false;
723 rdev->pm.dpm.state = dpm_state;
724 mutex_unlock(&rdev->pm.mutex);
725
726 radeon_pm_compute_clocks(rdev);
727}
728
729static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
730 enum radeon_pm_state_type dpm_state)
731{
732 int i;
733 struct radeon_ps *ps;
734 u32 ui_class;
735 bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
736 true : false;
737
738 /* check if the vblank period is too short to adjust the mclk */
739 if (single_display && rdev->asic->dpm.vblank_too_short) {
740 if (radeon_dpm_vblank_too_short(rdev))
741 single_display = false;
742 }
743
744 /* certain older asics have a separare 3D performance state,
745 * so try that first if the user selected performance
746 */
747 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
748 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
749 /* balanced states don't exist at the moment */
750 if (dpm_state == POWER_STATE_TYPE_BALANCED)
751 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
752
753restart_search:
754 /* Pick the best power state based on current conditions */
755 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
756 ps = &rdev->pm.dpm.ps[i];
757 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
758 switch (dpm_state) {
759 /* user states */
760 case POWER_STATE_TYPE_BATTERY:
761 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
762 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
763 if (single_display)
764 return ps;
765 } else
766 return ps;
767 }
768 break;
769 case POWER_STATE_TYPE_BALANCED:
770 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
771 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
772 if (single_display)
773 return ps;
774 } else
775 return ps;
776 }
777 break;
778 case POWER_STATE_TYPE_PERFORMANCE:
779 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
780 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
781 if (single_display)
782 return ps;
783 } else
784 return ps;
785 }
786 break;
787 /* internal states */
788 case POWER_STATE_TYPE_INTERNAL_UVD:
789 if (rdev->pm.dpm.uvd_ps)
790 return rdev->pm.dpm.uvd_ps;
791 else
792 break;
793 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
794 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
795 return ps;
796 break;
797 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
798 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
799 return ps;
800 break;
801 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
802 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
803 return ps;
804 break;
805 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
806 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
807 return ps;
808 break;
809 case POWER_STATE_TYPE_INTERNAL_BOOT:
810 return rdev->pm.dpm.boot_ps;
811 case POWER_STATE_TYPE_INTERNAL_THERMAL:
812 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
813 return ps;
814 break;
815 case POWER_STATE_TYPE_INTERNAL_ACPI:
816 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
817 return ps;
818 break;
819 case POWER_STATE_TYPE_INTERNAL_ULV:
820 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
821 return ps;
822 break;
823 case POWER_STATE_TYPE_INTERNAL_3DPERF:
824 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
825 return ps;
826 break;
827 default:
828 break;
829 }
830 }
831 /* use a fallback state if we didn't match */
832 switch (dpm_state) {
833 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
834 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
835 goto restart_search;
836 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
837 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
838 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
839 if (rdev->pm.dpm.uvd_ps) {
840 return rdev->pm.dpm.uvd_ps;
841 } else {
842 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
843 goto restart_search;
844 }
845 case POWER_STATE_TYPE_INTERNAL_THERMAL:
846 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
847 goto restart_search;
848 case POWER_STATE_TYPE_INTERNAL_ACPI:
849 dpm_state = POWER_STATE_TYPE_BATTERY;
850 goto restart_search;
851 case POWER_STATE_TYPE_BATTERY:
852 case POWER_STATE_TYPE_BALANCED:
853 case POWER_STATE_TYPE_INTERNAL_3DPERF:
854 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
855 goto restart_search;
856 default:
857 break;
858 }
859
860 return NULL;
861}
862
863static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
864{
865 int i;
866 struct radeon_ps *ps;
867 enum radeon_pm_state_type dpm_state;
868 int ret;
869
870 /* if dpm init failed */
871 if (!rdev->pm.dpm_enabled)
872 return;
873
874 if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
875 /* add other state override checks here */
876 if ((!rdev->pm.dpm.thermal_active) &&
877 (!rdev->pm.dpm.uvd_active))
878 rdev->pm.dpm.state = rdev->pm.dpm.user_state;
879 }
880 dpm_state = rdev->pm.dpm.state;
881
882 ps = radeon_dpm_pick_power_state(rdev, dpm_state);
883 if (ps)
884 rdev->pm.dpm.requested_ps = ps;
885 else
886 return;
887
888 /* no need to reprogram if nothing changed unless we are on BTC+ */
889 if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
890 /* vce just modifies an existing state so force a change */
891 if (ps->vce_active != rdev->pm.dpm.vce_active)
892 goto force;
893 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
894 /* for pre-BTC and APUs if the num crtcs changed but state is the same,
895 * all we need to do is update the display configuration.
896 */
897 if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
898 /* update display watermarks based on new power state */
899 radeon_bandwidth_update(rdev);
900 /* update displays */
901 radeon_dpm_display_configuration_changed(rdev);
902 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
903 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
904 }
905 return;
906 } else {
907 /* for BTC+ if the num crtcs hasn't changed and state is the same,
908 * nothing to do, if the num crtcs is > 1 and state is the same,
909 * update display configuration.
910 */
911 if (rdev->pm.dpm.new_active_crtcs ==
912 rdev->pm.dpm.current_active_crtcs) {
913 return;
914 } else {
915 if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
916 (rdev->pm.dpm.new_active_crtc_count > 1)) {
917 /* update display watermarks based on new power state */
918 radeon_bandwidth_update(rdev);
919 /* update displays */
920 radeon_dpm_display_configuration_changed(rdev);
921 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
922 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
923 return;
924 }
925 }
926 }
927 }
928
929force:
930 if (radeon_dpm == 1) {
931 printk("switching from power state:\n");
932 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
933 printk("switching to power state:\n");
934 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
935 }
936
937 mutex_lock(&rdev->ddev->struct_mutex);
938 down_write(&rdev->pm.mclk_lock);
939 mutex_lock(&rdev->ring_lock);
940
941 /* update whether vce is active */
942 ps->vce_active = rdev->pm.dpm.vce_active;
943
944 ret = radeon_dpm_pre_set_power_state(rdev);
945 if (ret)
946 goto done;
947
948 /* update display watermarks based on new power state */
949 radeon_bandwidth_update(rdev);
950 /* update displays */
951 radeon_dpm_display_configuration_changed(rdev);
952
953 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
954 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
955
956 /* wait for the rings to drain */
957 for (i = 0; i < RADEON_NUM_RINGS; i++) {
958 struct radeon_ring *ring = &rdev->ring[i];
959 if (ring->ready)
960 radeon_fence_wait_empty(rdev, i);
961 }
962
963 /* program the new power state */
964 radeon_dpm_set_power_state(rdev);
965
966 /* update current power state */
967 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
968
969 radeon_dpm_post_set_power_state(rdev);
970
971 if (rdev->asic->dpm.force_performance_level) {
972 if (rdev->pm.dpm.thermal_active) {
973 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
974 /* force low perf level for thermal */
975 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
976 /* save the user's level */
977 rdev->pm.dpm.forced_level = level;
978 } else {
979 /* otherwise, user selected level */
980 radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
981 }
982 }
983
984done:
985 mutex_unlock(&rdev->ring_lock);
986 up_write(&rdev->pm.mclk_lock);
987 mutex_unlock(&rdev->ddev->struct_mutex);
988}
989
990void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
991{
992 enum radeon_pm_state_type dpm_state;
993
994 if (rdev->asic->dpm.powergate_uvd) {
995 mutex_lock(&rdev->pm.mutex);
996 /* don't powergate anything if we
997 have active but pause streams */
998 enable |= rdev->pm.dpm.sd > 0;
999 enable |= rdev->pm.dpm.hd > 0;
1000 /* enable/disable UVD */
1001 radeon_dpm_powergate_uvd(rdev, !enable);
1002 mutex_unlock(&rdev->pm.mutex);
1003 } else {
1004 if (enable) {
1005 mutex_lock(&rdev->pm.mutex);
1006 rdev->pm.dpm.uvd_active = true;
1007 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
1008 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
1009 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
1010 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1011 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
1012 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1013 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
1014 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
1015 else
1016 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
1017 rdev->pm.dpm.state = dpm_state;
1018 mutex_unlock(&rdev->pm.mutex);
1019 } else {
1020 mutex_lock(&rdev->pm.mutex);
1021 rdev->pm.dpm.uvd_active = false;
1022 mutex_unlock(&rdev->pm.mutex);
1023 }
1024
1025 radeon_pm_compute_clocks(rdev);
1026 }
1027}
1028
1029void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
1030{
1031 if (enable) {
1032 mutex_lock(&rdev->pm.mutex);
1033 rdev->pm.dpm.vce_active = true;
1034 /* XXX select vce level based on ring/task */
1035 rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
1036 mutex_unlock(&rdev->pm.mutex);
1037 } else {
1038 mutex_lock(&rdev->pm.mutex);
1039 rdev->pm.dpm.vce_active = false;
1040 mutex_unlock(&rdev->pm.mutex);
1041 }
1042
1043 radeon_pm_compute_clocks(rdev);
1044}
1045
1046static void radeon_pm_suspend_old(struct radeon_device *rdev)
1047{
1048 mutex_lock(&rdev->pm.mutex);
1049 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1050 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
1051 rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
1052 }
1053 mutex_unlock(&rdev->pm.mutex);
1054
1055 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1056}
1057
1058static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
1059{
1060 mutex_lock(&rdev->pm.mutex);
1061 /* disable dpm */
1062 radeon_dpm_disable(rdev);
1063 /* reset the power state */
1064 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1065 rdev->pm.dpm_enabled = false;
1066 mutex_unlock(&rdev->pm.mutex);
1067}
1068
1069void radeon_pm_suspend(struct radeon_device *rdev)
1070{
1071 if (rdev->pm.pm_method == PM_METHOD_DPM)
1072 radeon_pm_suspend_dpm(rdev);
1073 else
1074 radeon_pm_suspend_old(rdev);
1075}
1076
1077static void radeon_pm_resume_old(struct radeon_device *rdev)
1078{
1079 /* set up the default clocks if the MC ucode is loaded */
1080 if ((rdev->family >= CHIP_BARTS) &&
1081 (rdev->family <= CHIP_CAYMAN) &&
1082 rdev->mc_fw) {
1083 if (rdev->pm.default_vddc)
1084 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1085 SET_VOLTAGE_TYPE_ASIC_VDDC);
1086 if (rdev->pm.default_vddci)
1087 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1088 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1089 if (rdev->pm.default_sclk)
1090 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1091 if (rdev->pm.default_mclk)
1092 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1093 }
1094 /* asic init will reset the default power state */
1095 mutex_lock(&rdev->pm.mutex);
1096 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1097 rdev->pm.current_clock_mode_index = 0;
1098 rdev->pm.current_sclk = rdev->pm.default_sclk;
1099 rdev->pm.current_mclk = rdev->pm.default_mclk;
1100 if (rdev->pm.power_state) {
1101 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
1102 rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
1103 }
1104 if (rdev->pm.pm_method == PM_METHOD_DYNPM
1105 && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
1106 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1107 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1108 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1109 }
1110 mutex_unlock(&rdev->pm.mutex);
1111 radeon_pm_compute_clocks(rdev);
1112}
1113
1114static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1115{
1116 int ret;
1117
1118 /* asic init will reset to the boot state */
1119 mutex_lock(&rdev->pm.mutex);
1120 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1121 radeon_dpm_setup_asic(rdev);
1122 ret = radeon_dpm_enable(rdev);
1123 mutex_unlock(&rdev->pm.mutex);
1124 if (ret)
1125 goto dpm_resume_fail;
1126 rdev->pm.dpm_enabled = true;
1127 return;
1128
1129dpm_resume_fail:
1130 DRM_ERROR("radeon: dpm resume failed\n");
1131 if ((rdev->family >= CHIP_BARTS) &&
1132 (rdev->family <= CHIP_CAYMAN) &&
1133 rdev->mc_fw) {
1134 if (rdev->pm.default_vddc)
1135 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1136 SET_VOLTAGE_TYPE_ASIC_VDDC);
1137 if (rdev->pm.default_vddci)
1138 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1139 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1140 if (rdev->pm.default_sclk)
1141 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1142 if (rdev->pm.default_mclk)
1143 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1144 }
1145}
1146
1147void radeon_pm_resume(struct radeon_device *rdev)
1148{
1149 if (rdev->pm.pm_method == PM_METHOD_DPM)
1150 radeon_pm_resume_dpm(rdev);
1151 else
1152 radeon_pm_resume_old(rdev);
1153}
1154
1155static int radeon_pm_init_old(struct radeon_device *rdev)
1156{
1157 int ret;
1158
1159 rdev->pm.profile = PM_PROFILE_DEFAULT;
1160 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1161 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1162 rdev->pm.dynpm_can_upclock = true;
1163 rdev->pm.dynpm_can_downclock = true;
1164 rdev->pm.default_sclk = rdev->clock.default_sclk;
1165 rdev->pm.default_mclk = rdev->clock.default_mclk;
1166 rdev->pm.current_sclk = rdev->clock.default_sclk;
1167 rdev->pm.current_mclk = rdev->clock.default_mclk;
1168 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1169
1170 if (rdev->bios) {
1171 if (rdev->is_atom_bios)
1172 radeon_atombios_get_power_modes(rdev);
1173 else
1174 radeon_combios_get_power_modes(rdev);
1175 radeon_pm_print_states(rdev);
1176 radeon_pm_init_profile(rdev);
1177 /* set up the default clocks if the MC ucode is loaded */
1178 if ((rdev->family >= CHIP_BARTS) &&
1179 (rdev->family <= CHIP_CAYMAN) &&
1180 rdev->mc_fw) {
1181 if (rdev->pm.default_vddc)
1182 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1183 SET_VOLTAGE_TYPE_ASIC_VDDC);
1184 if (rdev->pm.default_vddci)
1185 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1186 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1187 if (rdev->pm.default_sclk)
1188 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1189 if (rdev->pm.default_mclk)
1190 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1191 }
1192 }
1193
1194 /* set up the internal thermal sensor if applicable */
1195 ret = radeon_hwmon_init(rdev);
1196 if (ret)
1197 return ret;
1198
1199 INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
1200
1201#ifndef __NetBSD__ /* XXX radeon power */
1202 if (rdev->pm.num_power_states > 1) {
1203 /* where's the best place to put these? */
1204 ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1205 if (ret)
1206 DRM_ERROR("failed to create device file for power profile\n");
1207 ret = device_create_file(rdev->dev, &dev_attr_power_method);
1208 if (ret)
1209 DRM_ERROR("failed to create device file for power method\n");
1210
1211 if (radeon_debugfs_pm_init(rdev)) {
1212 DRM_ERROR("Failed to register debugfs file for PM!\n");
1213 }
1214
1215 DRM_INFO("radeon: power management initialized\n");
1216 }
1217#endif
1218
1219 return 0;
1220}
1221
1222static void radeon_dpm_print_power_states(struct radeon_device *rdev)
1223{
1224 int i;
1225
1226 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1227 printk("== power state %d ==\n", i);
1228 radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
1229 }
1230}
1231
1232static int radeon_pm_init_dpm(struct radeon_device *rdev)
1233{
1234 int ret;
1235
1236 /* default to balanced state */
1237 rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1238 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1239 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1240 rdev->pm.default_sclk = rdev->clock.default_sclk;
1241 rdev->pm.default_mclk = rdev->clock.default_mclk;
1242 rdev->pm.current_sclk = rdev->clock.default_sclk;
1243 rdev->pm.current_mclk = rdev->clock.default_mclk;
1244 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1245
1246 if (rdev->bios && rdev->is_atom_bios)
1247 radeon_atombios_get_power_modes(rdev);
1248 else
1249 return -EINVAL;
1250
1251 /* set up the internal thermal sensor if applicable */
1252 ret = radeon_hwmon_init(rdev);
1253 if (ret)
1254 return ret;
1255
1256 INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
1257 mutex_lock(&rdev->pm.mutex);
1258 radeon_dpm_init(rdev);
1259 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1260 if (radeon_dpm == 1)
1261 radeon_dpm_print_power_states(rdev);
1262 radeon_dpm_setup_asic(rdev);
1263 ret = radeon_dpm_enable(rdev);
1264 mutex_unlock(&rdev->pm.mutex);
1265 if (ret)
1266 goto dpm_failed;
1267 rdev->pm.dpm_enabled = true;
1268
1269#ifndef __NetBSD__ /* XXX radeon power */
1270 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
1271 if (ret)
1272 DRM_ERROR("failed to create device file for dpm state\n");
1273 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1274 if (ret)
1275 DRM_ERROR("failed to create device file for dpm state\n");
1276 /* XXX: these are noops for dpm but are here for backwards compat */
1277 ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1278 if (ret)
1279 DRM_ERROR("failed to create device file for power profile\n");
1280 ret = device_create_file(rdev->dev, &dev_attr_power_method);
1281 if (ret)
1282 DRM_ERROR("failed to create device file for power method\n");
1283#endif
1284
1285 if (radeon_debugfs_pm_init(rdev)) {
1286 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1287 }
1288
1289 DRM_INFO("radeon: dpm initialized\n");
1290
1291 return 0;
1292
1293dpm_failed:
1294 rdev->pm.dpm_enabled = false;
1295 if ((rdev->family >= CHIP_BARTS) &&
1296 (rdev->family <= CHIP_CAYMAN) &&
1297 rdev->mc_fw) {
1298 if (rdev->pm.default_vddc)
1299 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1300 SET_VOLTAGE_TYPE_ASIC_VDDC);
1301 if (rdev->pm.default_vddci)
1302 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1303 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1304 if (rdev->pm.default_sclk)
1305 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1306 if (rdev->pm.default_mclk)
1307 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1308 }
1309 DRM_ERROR("radeon: dpm initialization failed\n");
1310 return ret;
1311}
1312
1313int radeon_pm_init(struct radeon_device *rdev)
1314{
1315 /* enable dpm on rv6xx+ */
1316 switch (rdev->family) {
1317 case CHIP_RV610:
1318 case CHIP_RV630:
1319 case CHIP_RV620:
1320 case CHIP_RV635:
1321 case CHIP_RV670:
1322 case CHIP_RS780:
1323 case CHIP_RS880:
1324 case CHIP_RV770:
1325 case CHIP_BARTS:
1326 case CHIP_TURKS:
1327 case CHIP_CAICOS:
1328 case CHIP_CAYMAN:
1329 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1330 if (!rdev->rlc_fw)
1331 rdev->pm.pm_method = PM_METHOD_PROFILE;
1332 else if ((rdev->family >= CHIP_RV770) &&
1333 (!(rdev->flags & RADEON_IS_IGP)) &&
1334 (!rdev->smc_fw))
1335 rdev->pm.pm_method = PM_METHOD_PROFILE;
1336 else if (radeon_dpm == 1)
1337 rdev->pm.pm_method = PM_METHOD_DPM;
1338 else
1339 rdev->pm.pm_method = PM_METHOD_PROFILE;
1340 break;
1341 case CHIP_RV730:
1342 case CHIP_RV710:
1343 case CHIP_RV740:
1344 case CHIP_CEDAR:
1345 case CHIP_REDWOOD:
1346 case CHIP_JUNIPER:
1347 case CHIP_CYPRESS:
1348 case CHIP_HEMLOCK:
1349 case CHIP_PALM:
1350 case CHIP_SUMO:
1351 case CHIP_SUMO2:
1352 case CHIP_ARUBA:
1353 case CHIP_TAHITI:
1354 case CHIP_PITCAIRN:
1355 case CHIP_VERDE:
1356 case CHIP_OLAND:
1357 case CHIP_HAINAN:
1358 case CHIP_BONAIRE:
1359 case CHIP_KABINI:
1360 case CHIP_KAVERI:
1361 case CHIP_HAWAII:
1362 case CHIP_MULLINS:
1363 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1364 if (!rdev->rlc_fw)
1365 rdev->pm.pm_method = PM_METHOD_PROFILE;
1366 else if ((rdev->family >= CHIP_RV770) &&
1367 (!(rdev->flags & RADEON_IS_IGP)) &&
1368 (!rdev->smc_fw))
1369 rdev->pm.pm_method = PM_METHOD_PROFILE;
1370 else if (radeon_dpm == 0)
1371 rdev->pm.pm_method = PM_METHOD_PROFILE;
1372 else
1373 rdev->pm.pm_method = PM_METHOD_DPM;
1374 break;
1375 default:
1376 /* default to profile method */
1377 rdev->pm.pm_method = PM_METHOD_PROFILE;
1378 break;
1379 }
1380
1381 if (rdev->pm.pm_method == PM_METHOD_DPM)
1382 return radeon_pm_init_dpm(rdev);
1383 else
1384 return radeon_pm_init_old(rdev);
1385}
1386
1387int radeon_pm_late_init(struct radeon_device *rdev)
1388{
1389 int ret = 0;
1390
1391 if (rdev->pm.pm_method == PM_METHOD_DPM) {
1392 mutex_lock(&rdev->pm.mutex);
1393 ret = radeon_dpm_late_enable(rdev);
1394 mutex_unlock(&rdev->pm.mutex);
1395 }
1396 return ret;
1397}
1398
1399static void radeon_pm_fini_old(struct radeon_device *rdev)
1400{
1401 if (rdev->pm.num_power_states > 1) {
1402 mutex_lock(&rdev->pm.mutex);
1403 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1404 rdev->pm.profile = PM_PROFILE_DEFAULT;
1405 radeon_pm_update_profile(rdev);
1406 radeon_pm_set_clocks(rdev);
1407 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1408 /* reset default clocks */
1409 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1410 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1411 radeon_pm_set_clocks(rdev);
1412 }
1413 mutex_unlock(&rdev->pm.mutex);
1414
1415 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1416
1417#ifndef __NetBSD__ /* XXX radeon power */
1418 device_remove_file(rdev->dev, &dev_attr_power_profile);
1419 device_remove_file(rdev->dev, &dev_attr_power_method);
1420#endif
1421 }
1422
1423 radeon_hwmon_fini(rdev);
1424
1425 if (rdev->pm.power_state)
1426 kfree(rdev->pm.power_state);
1427}
1428
1429static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1430{
1431 if (rdev->pm.num_power_states > 1) {
1432 mutex_lock(&rdev->pm.mutex);
1433 radeon_dpm_disable(rdev);
1434 mutex_unlock(&rdev->pm.mutex);
1435
1436#ifndef __NetBSD__ /* XXX radeon power */
1437 device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
1438 device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1439 /* XXX backwards compat */
1440 device_remove_file(rdev->dev, &dev_attr_power_profile);
1441 device_remove_file(rdev->dev, &dev_attr_power_method);
1442#endif
1443 }
1444 radeon_dpm_fini(rdev);
1445
1446 radeon_hwmon_fini(rdev);
1447
1448 if (rdev->pm.power_state)
1449 kfree(rdev->pm.power_state);
1450}
1451
1452void radeon_pm_fini(struct radeon_device *rdev)
1453{
1454 if (rdev->pm.pm_method == PM_METHOD_DPM)
1455 radeon_pm_fini_dpm(rdev);
1456 else
1457 radeon_pm_fini_old(rdev);
1458}
1459
1460static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
1461{
1462 struct drm_device *ddev = rdev->ddev;
1463 struct drm_crtc *crtc;
1464 struct radeon_crtc *radeon_crtc;
1465
1466 if (rdev->pm.num_power_states < 2)
1467 return;
1468
1469 mutex_lock(&rdev->pm.mutex);
1470
1471 rdev->pm.active_crtcs = 0;
1472 rdev->pm.active_crtc_count = 0;
1473 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1474 list_for_each_entry(crtc,
1475 &ddev->mode_config.crtc_list, head) {
1476 radeon_crtc = to_radeon_crtc(crtc);
1477 if (radeon_crtc->enabled) {
1478 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1479 rdev->pm.active_crtc_count++;
1480 }
1481 }
1482 }
1483
1484 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1485 radeon_pm_update_profile(rdev);
1486 radeon_pm_set_clocks(rdev);
1487 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1488 if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
1489 if (rdev->pm.active_crtc_count > 1) {
1490 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1491 cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1492
1493 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
1494 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1495 radeon_pm_get_dynpm_state(rdev);
1496 radeon_pm_set_clocks(rdev);
1497
1498 DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1499 }
1500 } else if (rdev->pm.active_crtc_count == 1) {
1501 /* TODO: Increase clocks if needed for current mode */
1502
1503 if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
1504 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1505 rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
1506 radeon_pm_get_dynpm_state(rdev);
1507 radeon_pm_set_clocks(rdev);
1508
1509 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1510 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1511 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
1512 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1513 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1514 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1515 DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1516 }
1517 } else { /* count == 0 */
1518 if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
1519 cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1520
1521 rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
1522 rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
1523 radeon_pm_get_dynpm_state(rdev);
1524 radeon_pm_set_clocks(rdev);
1525 }
1526 }
1527 }
1528 }
1529
1530 mutex_unlock(&rdev->pm.mutex);
1531}
1532
1533static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1534{
1535 struct drm_device *ddev = rdev->ddev;
1536 struct drm_crtc *crtc;
1537 struct radeon_crtc *radeon_crtc;
1538
1539 if (!rdev->pm.dpm_enabled)
1540 return;
1541
1542 mutex_lock(&rdev->pm.mutex);
1543
1544 /* update active crtc counts */
1545 rdev->pm.dpm.new_active_crtcs = 0;
1546 rdev->pm.dpm.new_active_crtc_count = 0;
1547 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1548 list_for_each_entry(crtc,
1549 &ddev->mode_config.crtc_list, head) {
1550 radeon_crtc = to_radeon_crtc(crtc);
1551 if (crtc->enabled) {
1552 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1553 rdev->pm.dpm.new_active_crtc_count++;
1554 }
1555 }
1556 }
1557
1558 /* update battery/ac status */
1559 if (power_supply_is_system_supplied() > 0)
1560 rdev->pm.dpm.ac_power = true;
1561 else
1562 rdev->pm.dpm.ac_power = false;
1563
1564 radeon_dpm_change_power_state_locked(rdev);
1565
1566 mutex_unlock(&rdev->pm.mutex);
1567
1568}
1569
1570void radeon_pm_compute_clocks(struct radeon_device *rdev)
1571{
1572 if (rdev->pm.pm_method == PM_METHOD_DPM)
1573 radeon_pm_compute_clocks_dpm(rdev);
1574 else
1575 radeon_pm_compute_clocks_old(rdev);
1576}
1577
1578static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1579{
1580 int crtc, vpos, hpos, vbl_status;
1581 bool in_vbl = true;
1582
1583 /* Iterate over all active crtc's. All crtc's must be in vblank,
1584 * otherwise return in_vbl == false.
1585 */
1586 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1587 if (rdev->pm.active_crtcs & (1 << crtc)) {
1588 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL);
1589 if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
1590 !(vbl_status & DRM_SCANOUTPOS_INVBL))
1591 in_vbl = false;
1592 }
1593 }
1594
1595 return in_vbl;
1596}
1597
1598static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1599{
1600 u32 stat_crtc = 0;
1601 bool in_vbl = radeon_pm_in_vbl(rdev);
1602
1603 if (in_vbl == false)
1604 DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
1605 finish ? "exit" : "entry");
1606 return in_vbl;
1607}
1608
1609static void radeon_dynpm_idle_work_handler(struct work_struct *work)
1610{
1611 struct radeon_device *rdev;
1612 int resched;
1613 rdev = container_of(work, struct radeon_device,
1614 pm.dynpm_idle_work.work);
1615
1616 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1617 mutex_lock(&rdev->pm.mutex);
1618 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1619 int not_processed = 0;
1620 int i;
1621
1622 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1623 struct radeon_ring *ring = &rdev->ring[i];
1624
1625 if (ring->ready) {
1626 not_processed += radeon_fence_count_emitted(rdev, i);
1627 if (not_processed >= 3)
1628 break;
1629 }
1630 }
1631
1632 if (not_processed >= 3) { /* should upclock */
1633 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
1634 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1635 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1636 rdev->pm.dynpm_can_upclock) {
1637 rdev->pm.dynpm_planned_action =
1638 DYNPM_ACTION_UPCLOCK;
1639 rdev->pm.dynpm_action_timeout = jiffies +
1640 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1641 }
1642 } else if (not_processed == 0) { /* should downclock */
1643 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
1644 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1645 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1646 rdev->pm.dynpm_can_downclock) {
1647 rdev->pm.dynpm_planned_action =
1648 DYNPM_ACTION_DOWNCLOCK;
1649 rdev->pm.dynpm_action_timeout = jiffies +
1650 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1651 }
1652 }
1653
1654 /* Note, radeon_pm_set_clocks is called with static_switch set
1655 * to false since we want to wait for vbl to avoid flicker.
1656 */
1657 if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
1658 jiffies > rdev->pm.dynpm_action_timeout) {
1659 radeon_pm_get_dynpm_state(rdev);
1660 radeon_pm_set_clocks(rdev);
1661 }
1662
1663 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1664 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1665 }
1666 mutex_unlock(&rdev->pm.mutex);
1667 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1668}
1669
1670/*
1671 * Debugfs info
1672 */
1673#if defined(CONFIG_DEBUG_FS)
1674
1675static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1676{
1677 struct drm_info_node *node = (struct drm_info_node *) m->private;
1678 struct drm_device *dev = node->minor->dev;
1679 struct radeon_device *rdev = dev->dev_private;
1680 struct drm_device *ddev = rdev->ddev;
1681
1682 if ((rdev->flags & RADEON_IS_PX) &&
1683 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1684 seq_printf(m, "PX asic powered off\n");
1685 } else if (rdev->pm.dpm_enabled) {
1686 mutex_lock(&rdev->pm.mutex);
1687 if (rdev->asic->dpm.debugfs_print_current_performance_level)
1688 radeon_dpm_debugfs_print_current_performance_level(rdev, m);
1689 else
1690 seq_printf(m, "Debugfs support not implemented for this asic\n");
1691 mutex_unlock(&rdev->pm.mutex);
1692 } else {
1693 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1694 /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
1695 if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1696 seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
1697 else
1698 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1699 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1700 if (rdev->asic->pm.get_memory_clock)
1701 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1702 if (rdev->pm.current_vddc)
1703 seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1704 if (rdev->asic->pm.get_pcie_lanes)
1705 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1706 }
1707
1708 return 0;
1709}
1710
1711static struct drm_info_list radeon_pm_info_list[] = {
1712 {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
1713};
1714#endif
1715
1716static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1717{
1718#if defined(CONFIG_DEBUG_FS)
1719 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
1720#else
1721 return 0;
1722#endif
1723}
1724