1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
26 | * Jerome Glisse |
27 | */ |
28 | #include <drm/drmP.h> |
29 | #include "radeon.h" |
30 | #include <drm/radeon_drm.h> |
31 | #include "radeon_asic.h" |
32 | |
33 | #include <linux/vga_switcheroo.h> |
34 | #include <linux/slab.h> |
35 | #include <linux/pm_runtime.h> |
36 | |
37 | #if defined(CONFIG_VGA_SWITCHEROO) |
38 | bool radeon_has_atpx(void); |
39 | #else |
40 | static inline bool radeon_has_atpx(void) { return false; } |
41 | #endif |
42 | |
43 | /** |
44 | * radeon_driver_unload_kms - Main unload function for KMS. |
45 | * |
46 | * @dev: drm dev pointer |
47 | * |
48 | * This is the main unload function for KMS (all asics). |
49 | * It calls radeon_modeset_fini() to tear down the |
50 | * displays, and radeon_device_fini() to tear down |
51 | * the rest of the device (CP, writeback, etc.). |
52 | * Returns 0 on success. |
53 | */ |
54 | int radeon_driver_unload_kms(struct drm_device *dev) |
55 | { |
56 | struct radeon_device *rdev = dev->dev_private; |
57 | |
58 | if (rdev == NULL) |
59 | return 0; |
60 | |
61 | #ifdef __NetBSD__ |
62 | /* XXX ugh */ |
63 | if (rdev->rmmio_size) |
64 | goto done_free; |
65 | #else |
66 | if (rdev->rmmio == NULL) |
67 | goto done_free; |
68 | #endif |
69 | |
70 | pm_runtime_get_sync(dev->dev); |
71 | |
72 | radeon_acpi_fini(rdev); |
73 | |
74 | radeon_modeset_fini(rdev); |
75 | radeon_device_fini(rdev); |
76 | |
77 | done_free: |
78 | kfree(rdev); |
79 | dev->dev_private = NULL; |
80 | return 0; |
81 | } |
82 | |
83 | /** |
84 | * radeon_driver_load_kms - Main load function for KMS. |
85 | * |
86 | * @dev: drm dev pointer |
87 | * @flags: device flags |
88 | * |
89 | * This is the main load function for KMS (all asics). |
90 | * It calls radeon_device_init() to set up the non-display |
91 | * parts of the chip (asic init, CP, writeback, etc.), and |
92 | * radeon_modeset_init() to set up the display parts |
93 | * (crtcs, encoders, hotplug detect, etc.). |
94 | * Returns 0 on success, error on failure. |
95 | */ |
96 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) |
97 | { |
98 | struct radeon_device *rdev; |
99 | int r, acpi_status; |
100 | |
101 | rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); |
102 | if (rdev == NULL) { |
103 | return -ENOMEM; |
104 | } |
105 | dev->dev_private = (void *)rdev; |
106 | |
107 | /* update BUS flag */ |
108 | if (drm_pci_device_is_agp(dev)) { |
109 | flags |= RADEON_IS_AGP; |
110 | } else if (pci_is_pcie(dev->pdev)) { |
111 | flags |= RADEON_IS_PCIE; |
112 | } else { |
113 | flags |= RADEON_IS_PCI; |
114 | } |
115 | |
116 | if ((radeon_runtime_pm != 0) && |
117 | radeon_has_atpx() && |
118 | ((flags & RADEON_IS_IGP) == 0)) |
119 | flags |= RADEON_IS_PX; |
120 | |
121 | /* radeon_device_init should report only fatal error |
122 | * like memory allocation failure or iomapping failure, |
123 | * or memory manager initialization failure, it must |
124 | * properly initialize the GPU MC controller and permit |
125 | * VRAM allocation |
126 | */ |
127 | r = radeon_device_init(rdev, dev, dev->pdev, flags); |
128 | if (r) { |
129 | dev_err(dev->dev, "Fatal error during GPU init\n" ); |
130 | goto out; |
131 | } |
132 | |
133 | /* Again modeset_init should fail only on fatal error |
134 | * otherwise it should provide enough functionalities |
135 | * for shadowfb to run |
136 | */ |
137 | r = radeon_modeset_init(rdev); |
138 | if (r) |
139 | dev_err(dev->dev, "Fatal error during modeset init\n" ); |
140 | |
141 | /* Call ACPI methods: require modeset init |
142 | * but failure is not fatal |
143 | */ |
144 | if (!r) { |
145 | acpi_status = radeon_acpi_init(rdev); |
146 | if (acpi_status) |
147 | dev_dbg(dev->dev, |
148 | "Error during ACPI methods call\n" ); |
149 | } |
150 | |
151 | if (radeon_is_px(dev)) { |
152 | pm_runtime_use_autosuspend(dev->dev); |
153 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); |
154 | pm_runtime_set_active(dev->dev); |
155 | pm_runtime_allow(dev->dev); |
156 | pm_runtime_mark_last_busy(dev->dev); |
157 | pm_runtime_put_autosuspend(dev->dev); |
158 | } |
159 | |
160 | out: |
161 | if (r) |
162 | radeon_driver_unload_kms(dev); |
163 | |
164 | |
165 | return r; |
166 | } |
167 | |
168 | /** |
169 | * radeon_set_filp_rights - Set filp right. |
170 | * |
171 | * @dev: drm dev pointer |
172 | * @owner: drm file |
173 | * @applier: drm file |
174 | * @value: value |
175 | * |
176 | * Sets the filp rights for the device (all asics). |
177 | */ |
178 | static void radeon_set_filp_rights(struct drm_device *dev, |
179 | struct drm_file **owner, |
180 | struct drm_file *applier, |
181 | uint32_t *value) |
182 | { |
183 | mutex_lock(&dev->struct_mutex); |
184 | if (*value == 1) { |
185 | /* wants rights */ |
186 | if (!*owner) |
187 | *owner = applier; |
188 | } else if (*value == 0) { |
189 | /* revokes rights */ |
190 | if (*owner == applier) |
191 | *owner = NULL; |
192 | } |
193 | *value = *owner == applier ? 1 : 0; |
194 | mutex_unlock(&dev->struct_mutex); |
195 | } |
196 | |
197 | /* |
198 | * Userspace get information ioctl |
199 | */ |
200 | /** |
201 | * radeon_info_ioctl - answer a device specific request. |
202 | * |
203 | * @rdev: radeon device pointer |
204 | * @data: request object |
205 | * @filp: drm filp |
206 | * |
207 | * This function is used to pass device specific parameters to the userspace |
208 | * drivers. Examples include: pci device id, pipeline parms, tiling params, |
209 | * etc. (all asics). |
210 | * Returns 0 on success, -EINVAL on failure. |
211 | */ |
212 | static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
213 | { |
214 | struct radeon_device *rdev = dev->dev_private; |
215 | struct drm_radeon_info *info = data; |
216 | struct radeon_mode_info *minfo = &rdev->mode_info; |
217 | uint32_t *value, value_tmp, *value_ptr, value_size; |
218 | uint64_t value64; |
219 | struct drm_crtc *crtc; |
220 | int i, found; |
221 | |
222 | value_ptr = (uint32_t *)((unsigned long)info->value); |
223 | value = &value_tmp; |
224 | value_size = sizeof(uint32_t); |
225 | |
226 | switch (info->request) { |
227 | case RADEON_INFO_DEVICE_ID: |
228 | *value = dev->pdev->device; |
229 | break; |
230 | case RADEON_INFO_NUM_GB_PIPES: |
231 | *value = rdev->num_gb_pipes; |
232 | break; |
233 | case RADEON_INFO_NUM_Z_PIPES: |
234 | *value = rdev->num_z_pipes; |
235 | break; |
236 | case RADEON_INFO_ACCEL_WORKING: |
237 | /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ |
238 | if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) |
239 | *value = false; |
240 | else |
241 | *value = rdev->accel_working; |
242 | break; |
243 | case RADEON_INFO_CRTC_FROM_ID: |
244 | if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { |
245 | DRM_ERROR("copy_from_user %s:%u\n" , __func__, __LINE__); |
246 | return -EFAULT; |
247 | } |
248 | for (i = 0, found = 0; i < rdev->num_crtc; i++) { |
249 | crtc = (struct drm_crtc *)minfo->crtcs[i]; |
250 | if (crtc && crtc->base.id == *value) { |
251 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
252 | *value = radeon_crtc->crtc_id; |
253 | found = 1; |
254 | break; |
255 | } |
256 | } |
257 | if (!found) { |
258 | DRM_DEBUG_KMS("unknown crtc id %d\n" , *value); |
259 | return -EINVAL; |
260 | } |
261 | break; |
262 | case RADEON_INFO_ACCEL_WORKING2: |
263 | *value = rdev->accel_working; |
264 | break; |
265 | case RADEON_INFO_TILING_CONFIG: |
266 | if (rdev->family >= CHIP_BONAIRE) |
267 | *value = rdev->config.cik.tile_config; |
268 | else if (rdev->family >= CHIP_TAHITI) |
269 | *value = rdev->config.si.tile_config; |
270 | else if (rdev->family >= CHIP_CAYMAN) |
271 | *value = rdev->config.cayman.tile_config; |
272 | else if (rdev->family >= CHIP_CEDAR) |
273 | *value = rdev->config.evergreen.tile_config; |
274 | else if (rdev->family >= CHIP_RV770) |
275 | *value = rdev->config.rv770.tile_config; |
276 | else if (rdev->family >= CHIP_R600) |
277 | *value = rdev->config.r600.tile_config; |
278 | else { |
279 | DRM_DEBUG_KMS("tiling config is r6xx+ only!\n" ); |
280 | return -EINVAL; |
281 | } |
282 | break; |
283 | case RADEON_INFO_WANT_HYPERZ: |
284 | /* The "value" here is both an input and output parameter. |
285 | * If the input value is 1, filp requests hyper-z access. |
286 | * If the input value is 0, filp revokes its hyper-z access. |
287 | * |
288 | * When returning, the value is 1 if filp owns hyper-z access, |
289 | * 0 otherwise. */ |
290 | if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { |
291 | DRM_ERROR("copy_from_user %s:%u\n" , __func__, __LINE__); |
292 | return -EFAULT; |
293 | } |
294 | if (*value >= 2) { |
295 | DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n" , *value); |
296 | return -EINVAL; |
297 | } |
298 | radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value); |
299 | break; |
300 | case RADEON_INFO_WANT_CMASK: |
301 | /* The same logic as Hyper-Z. */ |
302 | if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { |
303 | DRM_ERROR("copy_from_user %s:%u\n" , __func__, __LINE__); |
304 | return -EFAULT; |
305 | } |
306 | if (*value >= 2) { |
307 | DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n" , *value); |
308 | return -EINVAL; |
309 | } |
310 | radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value); |
311 | break; |
312 | case RADEON_INFO_CLOCK_CRYSTAL_FREQ: |
313 | /* return clock value in KHz */ |
314 | if (rdev->asic->get_xclk) |
315 | *value = radeon_get_xclk(rdev) * 10; |
316 | else |
317 | *value = rdev->clock.spll.reference_freq * 10; |
318 | break; |
319 | case RADEON_INFO_NUM_BACKENDS: |
320 | if (rdev->family >= CHIP_BONAIRE) |
321 | *value = rdev->config.cik.max_backends_per_se * |
322 | rdev->config.cik.max_shader_engines; |
323 | else if (rdev->family >= CHIP_TAHITI) |
324 | *value = rdev->config.si.max_backends_per_se * |
325 | rdev->config.si.max_shader_engines; |
326 | else if (rdev->family >= CHIP_CAYMAN) |
327 | *value = rdev->config.cayman.max_backends_per_se * |
328 | rdev->config.cayman.max_shader_engines; |
329 | else if (rdev->family >= CHIP_CEDAR) |
330 | *value = rdev->config.evergreen.max_backends; |
331 | else if (rdev->family >= CHIP_RV770) |
332 | *value = rdev->config.rv770.max_backends; |
333 | else if (rdev->family >= CHIP_R600) |
334 | *value = rdev->config.r600.max_backends; |
335 | else { |
336 | return -EINVAL; |
337 | } |
338 | break; |
339 | case RADEON_INFO_NUM_TILE_PIPES: |
340 | if (rdev->family >= CHIP_BONAIRE) |
341 | *value = rdev->config.cik.max_tile_pipes; |
342 | else if (rdev->family >= CHIP_TAHITI) |
343 | *value = rdev->config.si.max_tile_pipes; |
344 | else if (rdev->family >= CHIP_CAYMAN) |
345 | *value = rdev->config.cayman.max_tile_pipes; |
346 | else if (rdev->family >= CHIP_CEDAR) |
347 | *value = rdev->config.evergreen.max_tile_pipes; |
348 | else if (rdev->family >= CHIP_RV770) |
349 | *value = rdev->config.rv770.max_tile_pipes; |
350 | else if (rdev->family >= CHIP_R600) |
351 | *value = rdev->config.r600.max_tile_pipes; |
352 | else { |
353 | return -EINVAL; |
354 | } |
355 | break; |
356 | case RADEON_INFO_FUSION_GART_WORKING: |
357 | *value = 1; |
358 | break; |
359 | case RADEON_INFO_BACKEND_MAP: |
360 | if (rdev->family >= CHIP_BONAIRE) |
361 | *value = rdev->config.cik.backend_map; |
362 | else if (rdev->family >= CHIP_TAHITI) |
363 | *value = rdev->config.si.backend_map; |
364 | else if (rdev->family >= CHIP_CAYMAN) |
365 | *value = rdev->config.cayman.backend_map; |
366 | else if (rdev->family >= CHIP_CEDAR) |
367 | *value = rdev->config.evergreen.backend_map; |
368 | else if (rdev->family >= CHIP_RV770) |
369 | *value = rdev->config.rv770.backend_map; |
370 | else if (rdev->family >= CHIP_R600) |
371 | *value = rdev->config.r600.backend_map; |
372 | else { |
373 | return -EINVAL; |
374 | } |
375 | break; |
376 | case RADEON_INFO_VA_START: |
377 | /* this is where we report if vm is supported or not */ |
378 | if (rdev->family < CHIP_CAYMAN) |
379 | return -EINVAL; |
380 | *value = RADEON_VA_RESERVED_SIZE; |
381 | break; |
382 | case RADEON_INFO_IB_VM_MAX_SIZE: |
383 | /* this is where we report if vm is supported or not */ |
384 | if (rdev->family < CHIP_CAYMAN) |
385 | return -EINVAL; |
386 | *value = RADEON_IB_VM_MAX_SIZE; |
387 | break; |
388 | case RADEON_INFO_MAX_PIPES: |
389 | if (rdev->family >= CHIP_BONAIRE) |
390 | *value = rdev->config.cik.max_cu_per_sh; |
391 | else if (rdev->family >= CHIP_TAHITI) |
392 | *value = rdev->config.si.max_cu_per_sh; |
393 | else if (rdev->family >= CHIP_CAYMAN) |
394 | *value = rdev->config.cayman.max_pipes_per_simd; |
395 | else if (rdev->family >= CHIP_CEDAR) |
396 | *value = rdev->config.evergreen.max_pipes; |
397 | else if (rdev->family >= CHIP_RV770) |
398 | *value = rdev->config.rv770.max_pipes; |
399 | else if (rdev->family >= CHIP_R600) |
400 | *value = rdev->config.r600.max_pipes; |
401 | else { |
402 | return -EINVAL; |
403 | } |
404 | break; |
405 | case RADEON_INFO_TIMESTAMP: |
406 | if (rdev->family < CHIP_R600) { |
407 | DRM_DEBUG_KMS("timestamp is r6xx+ only!\n" ); |
408 | return -EINVAL; |
409 | } |
410 | value = (uint32_t*)&value64; |
411 | value_size = sizeof(uint64_t); |
412 | value64 = radeon_get_gpu_clock_counter(rdev); |
413 | break; |
414 | case RADEON_INFO_MAX_SE: |
415 | if (rdev->family >= CHIP_BONAIRE) |
416 | *value = rdev->config.cik.max_shader_engines; |
417 | else if (rdev->family >= CHIP_TAHITI) |
418 | *value = rdev->config.si.max_shader_engines; |
419 | else if (rdev->family >= CHIP_CAYMAN) |
420 | *value = rdev->config.cayman.max_shader_engines; |
421 | else if (rdev->family >= CHIP_CEDAR) |
422 | *value = rdev->config.evergreen.num_ses; |
423 | else |
424 | *value = 1; |
425 | break; |
426 | case RADEON_INFO_MAX_SH_PER_SE: |
427 | if (rdev->family >= CHIP_BONAIRE) |
428 | *value = rdev->config.cik.max_sh_per_se; |
429 | else if (rdev->family >= CHIP_TAHITI) |
430 | *value = rdev->config.si.max_sh_per_se; |
431 | else |
432 | return -EINVAL; |
433 | break; |
434 | case RADEON_INFO_FASTFB_WORKING: |
435 | *value = rdev->fastfb_working; |
436 | break; |
437 | case RADEON_INFO_RING_WORKING: |
438 | if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { |
439 | DRM_ERROR("copy_from_user %s:%u\n" , __func__, __LINE__); |
440 | return -EFAULT; |
441 | } |
442 | switch (*value) { |
443 | case RADEON_CS_RING_GFX: |
444 | case RADEON_CS_RING_COMPUTE: |
445 | *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready; |
446 | break; |
447 | case RADEON_CS_RING_DMA: |
448 | *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready; |
449 | *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready; |
450 | break; |
451 | case RADEON_CS_RING_UVD: |
452 | *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready; |
453 | break; |
454 | case RADEON_CS_RING_VCE: |
455 | *value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready; |
456 | break; |
457 | default: |
458 | return -EINVAL; |
459 | } |
460 | break; |
461 | case RADEON_INFO_SI_TILE_MODE_ARRAY: |
462 | if (rdev->family >= CHIP_BONAIRE) { |
463 | value = rdev->config.cik.tile_mode_array; |
464 | value_size = sizeof(uint32_t)*32; |
465 | } else if (rdev->family >= CHIP_TAHITI) { |
466 | value = rdev->config.si.tile_mode_array; |
467 | value_size = sizeof(uint32_t)*32; |
468 | } else { |
469 | DRM_DEBUG_KMS("tile mode array is si+ only!\n" ); |
470 | return -EINVAL; |
471 | } |
472 | break; |
473 | case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY: |
474 | if (rdev->family >= CHIP_BONAIRE) { |
475 | value = rdev->config.cik.macrotile_mode_array; |
476 | value_size = sizeof(uint32_t)*16; |
477 | } else { |
478 | DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n" ); |
479 | return -EINVAL; |
480 | } |
481 | break; |
482 | case RADEON_INFO_SI_CP_DMA_COMPUTE: |
483 | *value = 1; |
484 | break; |
485 | case RADEON_INFO_SI_BACKEND_ENABLED_MASK: |
486 | if (rdev->family >= CHIP_BONAIRE) { |
487 | *value = rdev->config.cik.backend_enable_mask; |
488 | } else if (rdev->family >= CHIP_TAHITI) { |
489 | *value = rdev->config.si.backend_enable_mask; |
490 | } else { |
491 | DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n" ); |
492 | } |
493 | break; |
494 | case RADEON_INFO_MAX_SCLK: |
495 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && |
496 | rdev->pm.dpm_enabled) |
497 | *value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10; |
498 | else |
499 | *value = rdev->pm.default_sclk * 10; |
500 | break; |
501 | case RADEON_INFO_VCE_FW_VERSION: |
502 | *value = rdev->vce.fw_version; |
503 | break; |
504 | case RADEON_INFO_VCE_FB_VERSION: |
505 | *value = rdev->vce.fb_version; |
506 | break; |
507 | case RADEON_INFO_NUM_BYTES_MOVED: |
508 | value = (uint32_t*)&value64; |
509 | value_size = sizeof(uint64_t); |
510 | value64 = atomic64_read(&rdev->num_bytes_moved); |
511 | break; |
512 | case RADEON_INFO_VRAM_USAGE: |
513 | value = (uint32_t*)&value64; |
514 | value_size = sizeof(uint64_t); |
515 | value64 = atomic64_read(&rdev->vram_usage); |
516 | break; |
517 | case RADEON_INFO_GTT_USAGE: |
518 | value = (uint32_t*)&value64; |
519 | value_size = sizeof(uint64_t); |
520 | value64 = atomic64_read(&rdev->gtt_usage); |
521 | break; |
522 | default: |
523 | DRM_DEBUG_KMS("Invalid request %d\n" , info->request); |
524 | return -EINVAL; |
525 | } |
526 | if (copy_to_user(value_ptr, (char*)value, value_size)) { |
527 | DRM_ERROR("copy_to_user %s:%u\n" , __func__, __LINE__); |
528 | return -EFAULT; |
529 | } |
530 | return 0; |
531 | } |
532 | |
533 | |
534 | /* |
535 | * Outdated mess for old drm with Xorg being in charge (void function now). |
536 | */ |
537 | /** |
538 | * radeon_driver_firstopen_kms - drm callback for last close |
539 | * |
540 | * @dev: drm dev pointer |
541 | * |
542 | * Switch vga switcheroo state after last close (all asics). |
543 | */ |
544 | void radeon_driver_lastclose_kms(struct drm_device *dev) |
545 | { |
546 | #ifndef __NetBSD__ /* XXX radeon vga */ |
547 | vga_switcheroo_process_delayed_switch(); |
548 | #endif |
549 | } |
550 | |
551 | /** |
552 | * radeon_driver_open_kms - drm callback for open |
553 | * |
554 | * @dev: drm dev pointer |
555 | * @file_priv: drm file |
556 | * |
557 | * On device open, init vm on cayman+ (all asics). |
558 | * Returns 0 on success, error on failure. |
559 | */ |
560 | int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) |
561 | { |
562 | struct radeon_device *rdev = dev->dev_private; |
563 | int r; |
564 | |
565 | file_priv->driver_priv = NULL; |
566 | |
567 | r = pm_runtime_get_sync(dev->dev); |
568 | if (r < 0) |
569 | return r; |
570 | |
571 | /* new gpu have virtual address space support */ |
572 | if (rdev->family >= CHIP_CAYMAN) { |
573 | struct radeon_fpriv *fpriv; |
574 | struct radeon_bo_va *bo_va; |
575 | |
576 | fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); |
577 | if (unlikely(!fpriv)) { |
578 | return -ENOMEM; |
579 | } |
580 | |
581 | r = radeon_vm_init(rdev, &fpriv->vm); |
582 | if (r) { |
583 | kfree(fpriv); |
584 | return r; |
585 | } |
586 | |
587 | if (rdev->accel_working) { |
588 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); |
589 | if (r) { |
590 | radeon_vm_fini(rdev, &fpriv->vm); |
591 | kfree(fpriv); |
592 | return r; |
593 | } |
594 | |
595 | /* map the ib pool buffer read only into |
596 | * virtual address space */ |
597 | bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, |
598 | rdev->ring_tmp_bo.bo); |
599 | r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, |
600 | RADEON_VM_PAGE_READABLE | |
601 | RADEON_VM_PAGE_SNOOPED); |
602 | |
603 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); |
604 | if (r) { |
605 | radeon_vm_fini(rdev, &fpriv->vm); |
606 | kfree(fpriv); |
607 | return r; |
608 | } |
609 | } |
610 | file_priv->driver_priv = fpriv; |
611 | } |
612 | |
613 | pm_runtime_mark_last_busy(dev->dev); |
614 | pm_runtime_put_autosuspend(dev->dev); |
615 | return 0; |
616 | } |
617 | |
618 | /** |
619 | * radeon_driver_postclose_kms - drm callback for post close |
620 | * |
621 | * @dev: drm dev pointer |
622 | * @file_priv: drm file |
623 | * |
624 | * On device post close, tear down vm on cayman+ (all asics). |
625 | */ |
626 | void radeon_driver_postclose_kms(struct drm_device *dev, |
627 | struct drm_file *file_priv) |
628 | { |
629 | struct radeon_device *rdev = dev->dev_private; |
630 | |
631 | /* new gpu have virtual address space support */ |
632 | if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { |
633 | struct radeon_fpriv *fpriv = file_priv->driver_priv; |
634 | struct radeon_bo_va *bo_va; |
635 | int r; |
636 | |
637 | if (rdev->accel_working) { |
638 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); |
639 | if (!r) { |
640 | bo_va = radeon_vm_bo_find(&fpriv->vm, |
641 | rdev->ring_tmp_bo.bo); |
642 | if (bo_va) |
643 | radeon_vm_bo_rmv(rdev, bo_va); |
644 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); |
645 | } |
646 | } |
647 | |
648 | radeon_vm_fini(rdev, &fpriv->vm); |
649 | kfree(fpriv); |
650 | file_priv->driver_priv = NULL; |
651 | } |
652 | } |
653 | |
654 | /** |
655 | * radeon_driver_preclose_kms - drm callback for pre close |
656 | * |
657 | * @dev: drm dev pointer |
658 | * @file_priv: drm file |
659 | * |
660 | * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx |
661 | * (all asics). |
662 | */ |
663 | void radeon_driver_preclose_kms(struct drm_device *dev, |
664 | struct drm_file *file_priv) |
665 | { |
666 | struct radeon_device *rdev = dev->dev_private; |
667 | if (rdev->hyperz_filp == file_priv) |
668 | rdev->hyperz_filp = NULL; |
669 | if (rdev->cmask_filp == file_priv) |
670 | rdev->cmask_filp = NULL; |
671 | radeon_uvd_free_handles(rdev, file_priv); |
672 | radeon_vce_free_handles(rdev, file_priv); |
673 | } |
674 | |
675 | /* |
676 | * VBlank related functions. |
677 | */ |
678 | /** |
679 | * radeon_get_vblank_counter_kms - get frame count |
680 | * |
681 | * @dev: drm dev pointer |
682 | * @crtc: crtc to get the frame count from |
683 | * |
684 | * Gets the frame count on the requested crtc (all asics). |
685 | * Returns frame count on success, -EINVAL on failure. |
686 | */ |
687 | u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) |
688 | { |
689 | struct radeon_device *rdev = dev->dev_private; |
690 | |
691 | if (crtc < 0 || crtc >= rdev->num_crtc) { |
692 | DRM_ERROR("Invalid crtc %d\n" , crtc); |
693 | return -EINVAL; |
694 | } |
695 | |
696 | return radeon_get_vblank_counter(rdev, crtc); |
697 | } |
698 | |
699 | /** |
700 | * radeon_enable_vblank_kms - enable vblank interrupt |
701 | * |
702 | * @dev: drm dev pointer |
703 | * @crtc: crtc to enable vblank interrupt for |
704 | * |
705 | * Enable the interrupt on the requested crtc (all asics). |
706 | * Returns 0 on success, -EINVAL on failure. |
707 | */ |
708 | int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) |
709 | { |
710 | struct radeon_device *rdev = dev->dev_private; |
711 | unsigned long irqflags; |
712 | int r; |
713 | |
714 | if (crtc < 0 || crtc >= rdev->num_crtc) { |
715 | DRM_ERROR("Invalid crtc %d\n" , crtc); |
716 | return -EINVAL; |
717 | } |
718 | |
719 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
720 | rdev->irq.crtc_vblank_int[crtc] = true; |
721 | r = radeon_irq_set(rdev); |
722 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
723 | return r; |
724 | } |
725 | |
726 | /** |
727 | * radeon_disable_vblank_kms - disable vblank interrupt |
728 | * |
729 | * @dev: drm dev pointer |
730 | * @crtc: crtc to disable vblank interrupt for |
731 | * |
732 | * Disable the interrupt on the requested crtc (all asics). |
733 | */ |
734 | void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) |
735 | { |
736 | struct radeon_device *rdev = dev->dev_private; |
737 | unsigned long irqflags; |
738 | |
739 | if (crtc < 0 || crtc >= rdev->num_crtc) { |
740 | DRM_ERROR("Invalid crtc %d\n" , crtc); |
741 | return; |
742 | } |
743 | |
744 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
745 | rdev->irq.crtc_vblank_int[crtc] = false; |
746 | radeon_irq_set(rdev); |
747 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
748 | } |
749 | |
750 | /** |
751 | * radeon_get_vblank_timestamp_kms - get vblank timestamp |
752 | * |
753 | * @dev: drm dev pointer |
754 | * @crtc: crtc to get the timestamp for |
755 | * @max_error: max error |
756 | * @vblank_time: time value |
757 | * @flags: flags passed to the driver |
758 | * |
759 | * Gets the timestamp on the requested crtc based on the |
760 | * scanout position. (all asics). |
761 | * Returns postive status flags on success, negative error on failure. |
762 | */ |
763 | int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, |
764 | int *max_error, |
765 | struct timeval *vblank_time, |
766 | unsigned flags) |
767 | { |
768 | struct drm_crtc *drmcrtc; |
769 | struct radeon_device *rdev = dev->dev_private; |
770 | |
771 | if (crtc < 0 || crtc >= dev->num_crtcs) { |
772 | DRM_ERROR("Invalid crtc %d\n" , crtc); |
773 | return -EINVAL; |
774 | } |
775 | |
776 | /* Get associated drm_crtc: */ |
777 | drmcrtc = &rdev->mode_info.crtcs[crtc]->base; |
778 | |
779 | /* Helper routine in DRM core does all the work: */ |
780 | return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, |
781 | vblank_time, flags, |
782 | drmcrtc, &drmcrtc->hwmode); |
783 | } |
784 | |
785 | #define KMS_INVALID_IOCTL(name) \ |
786 | static int name(struct drm_device *dev, void *data, struct drm_file \ |
787 | *file_priv) \ |
788 | { \ |
789 | DRM_ERROR("invalid ioctl with kms %s\n", __func__); \ |
790 | return -EINVAL; \ |
791 | } |
792 | |
793 | /* |
794 | * All these ioctls are invalid in kms world. |
795 | */ |
796 | KMS_INVALID_IOCTL(radeon_cp_init_kms) |
797 | KMS_INVALID_IOCTL(radeon_cp_start_kms) |
798 | KMS_INVALID_IOCTL(radeon_cp_stop_kms) |
799 | KMS_INVALID_IOCTL(radeon_cp_reset_kms) |
800 | KMS_INVALID_IOCTL(radeon_cp_idle_kms) |
801 | KMS_INVALID_IOCTL(radeon_cp_resume_kms) |
802 | KMS_INVALID_IOCTL(radeon_engine_reset_kms) |
803 | KMS_INVALID_IOCTL(radeon_fullscreen_kms) |
804 | KMS_INVALID_IOCTL(radeon_cp_swap_kms) |
805 | KMS_INVALID_IOCTL(radeon_cp_clear_kms) |
806 | KMS_INVALID_IOCTL(radeon_cp_vertex_kms) |
807 | KMS_INVALID_IOCTL(radeon_cp_indices_kms) |
808 | KMS_INVALID_IOCTL(radeon_cp_texture_kms) |
809 | KMS_INVALID_IOCTL(radeon_cp_stipple_kms) |
810 | KMS_INVALID_IOCTL(radeon_cp_indirect_kms) |
811 | KMS_INVALID_IOCTL(radeon_cp_vertex2_kms) |
812 | KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms) |
813 | KMS_INVALID_IOCTL(radeon_cp_getparam_kms) |
814 | KMS_INVALID_IOCTL(radeon_cp_flip_kms) |
815 | KMS_INVALID_IOCTL(radeon_mem_alloc_kms) |
816 | KMS_INVALID_IOCTL(radeon_mem_free_kms) |
817 | KMS_INVALID_IOCTL(radeon_mem_init_heap_kms) |
818 | KMS_INVALID_IOCTL(radeon_irq_emit_kms) |
819 | KMS_INVALID_IOCTL(radeon_irq_wait_kms) |
820 | KMS_INVALID_IOCTL(radeon_cp_setparam_kms) |
821 | KMS_INVALID_IOCTL(radeon_surface_alloc_kms) |
822 | KMS_INVALID_IOCTL(radeon_surface_free_kms) |
823 | |
824 | |
825 | const struct drm_ioctl_desc radeon_ioctls_kms[] = { |
826 | DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
827 | DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
828 | DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
829 | DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
830 | DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH), |
831 | DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH), |
832 | DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH), |
833 | DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH), |
834 | DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH), |
835 | DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH), |
836 | DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH), |
837 | DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH), |
838 | DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH), |
839 | DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH), |
840 | DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
841 | DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH), |
842 | DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH), |
843 | DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH), |
844 | DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH), |
845 | DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH), |
846 | DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH), |
847 | DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
848 | DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH), |
849 | DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH), |
850 | DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH), |
851 | DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), |
852 | DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), |
853 | /* KMS */ |
854 | DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
855 | DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
856 | DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
857 | DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
858 | DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), |
859 | DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), |
860 | DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
861 | DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
862 | DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
863 | DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
864 | DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
865 | DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
866 | DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
867 | DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
868 | }; |
869 | int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); |
870 | |