1 | /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- |
2 | */ |
3 | /* |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
5 | * All Rights Reserved. |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a |
8 | * copy of this software and associated documentation files (the |
9 | * "Software"), to deal in the Software without restriction, including |
10 | * without limitation the rights to use, copy, modify, merge, publish, |
11 | * distribute, sub license, and/or sell copies of the Software, and to |
12 | * permit persons to whom the Software is furnished to do so, subject to |
13 | * the following conditions: |
14 | * |
15 | * The above copyright notice and this permission notice (including the |
16 | * next paragraph) shall be included in all copies or substantial portions |
17 | * of the Software. |
18 | * |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
26 | * |
27 | */ |
28 | |
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
30 | |
31 | #include <drm/drmP.h> |
32 | #include <drm/drm_crtc_helper.h> |
33 | #include <drm/drm_fb_helper.h> |
34 | #include "intel_drv.h" |
35 | #include <drm/i915_drm.h> |
36 | #include "i915_drv.h" |
37 | #include "i915_trace.h" |
38 | #include <linux/pci.h> |
39 | #include <linux/vgaarb.h> |
40 | #include <linux/acpi.h> |
41 | #include <linux/pnp.h> |
42 | #include <linux/vga_switcheroo.h> |
43 | #include <linux/slab.h> |
44 | #include <acpi/video.h> |
45 | #include <linux/pm.h> |
46 | #include <linux/pm_runtime.h> |
47 | |
48 | #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) |
49 | |
50 | #define BEGIN_LP_RING(n) \ |
51 | intel_ring_begin(LP_RING(dev_priv), (n)) |
52 | |
53 | #define OUT_RING(x) \ |
54 | intel_ring_emit(LP_RING(dev_priv), x) |
55 | |
56 | #define ADVANCE_LP_RING() \ |
57 | __intel_ring_advance(LP_RING(dev_priv)) |
58 | |
59 | /** |
60 | * Lock test for when it's just for synchronization of ring access. |
61 | * |
62 | * In that case, we don't need to do it when GEM is initialized as nobody else |
63 | * has access to the ring. |
64 | */ |
65 | #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ |
66 | if (LP_RING(dev->dev_private)->obj == NULL) \ |
67 | LOCK_TEST_WITH_RETURN(dev, file); \ |
68 | } while (0) |
69 | |
70 | static inline u32 |
71 | intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg) |
72 | { |
73 | if (I915_NEED_GFX_HWS(dev_priv->dev)) |
74 | #ifdef __NetBSD__ |
75 | return bus_space_read_4(dev_priv->dev->pdev->pd_pa.pa_memt, |
76 | dev_priv->dri1.gfx_hws_cpu_bsh, reg); |
77 | #else |
78 | return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg); |
79 | #endif |
80 | else |
81 | return intel_read_status_page(LP_RING(dev_priv), reg); |
82 | } |
83 | |
84 | #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg) |
85 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) |
86 | #define I915_BREADCRUMB_INDEX 0x21 |
87 | |
88 | void i915_update_dri1_breadcrumb(struct drm_device *dev) |
89 | { |
90 | struct drm_i915_private *dev_priv = dev->dev_private; |
91 | struct drm_i915_master_private *master_priv; |
92 | |
93 | /* |
94 | * The dri breadcrumb update races against the drm master disappearing. |
95 | * Instead of trying to fix this (this is by far not the only ums issue) |
96 | * just don't do the update in kms mode. |
97 | */ |
98 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
99 | return; |
100 | |
101 | if (dev->primary->master) { |
102 | master_priv = dev->primary->master->driver_priv; |
103 | if (master_priv->sarea_priv) |
104 | master_priv->sarea_priv->last_dispatch = |
105 | READ_BREADCRUMB(dev_priv); |
106 | } |
107 | } |
108 | |
109 | static void i915_write_hws_pga(struct drm_device *dev) |
110 | { |
111 | struct drm_i915_private *dev_priv = dev->dev_private; |
112 | u32 addr; |
113 | |
114 | addr = dev_priv->status_page_dmah->busaddr; |
115 | if (INTEL_INFO(dev)->gen >= 4) |
116 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; |
117 | I915_WRITE(HWS_PGA, addr); |
118 | } |
119 | |
120 | /** |
121 | * Frees the hardware status page, whether it's a physical address or a virtual |
122 | * address set up by the X Server. |
123 | */ |
124 | static void i915_free_hws(struct drm_device *dev) |
125 | { |
126 | struct drm_i915_private *dev_priv = dev->dev_private; |
127 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
128 | |
129 | if (dev_priv->status_page_dmah) { |
130 | drm_pci_free(dev, dev_priv->status_page_dmah); |
131 | dev_priv->status_page_dmah = NULL; |
132 | } |
133 | |
134 | if (ring->status_page.gfx_addr) { |
135 | ring->status_page.gfx_addr = 0; |
136 | #ifdef __NetBSD__ |
137 | bus_space_unmap(dev->pdev->pd_pa.pa_memt, |
138 | dev_priv->dri1.gfx_hws_cpu_bsh, 4096); |
139 | #else |
140 | iounmap(dev_priv->dri1.gfx_hws_cpu_addr); |
141 | #endif |
142 | } |
143 | |
144 | /* Need to rewrite hardware status page */ |
145 | I915_WRITE(HWS_PGA, 0x1ffff000); |
146 | } |
147 | |
148 | void i915_kernel_lost_context(struct drm_device * dev) |
149 | { |
150 | struct drm_i915_private *dev_priv = dev->dev_private; |
151 | struct drm_i915_master_private *master_priv; |
152 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
153 | |
154 | /* |
155 | * We should never lose context on the ring with modesetting |
156 | * as we don't expose it to userspace |
157 | */ |
158 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
159 | return; |
160 | |
161 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
162 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
163 | ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE); |
164 | if (ring->space < 0) |
165 | ring->space += ring->size; |
166 | |
167 | if (!dev->primary->master) |
168 | return; |
169 | |
170 | master_priv = dev->primary->master->driver_priv; |
171 | if (ring->head == ring->tail && master_priv->sarea_priv) |
172 | master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; |
173 | } |
174 | |
175 | static int i915_dma_cleanup(struct drm_device * dev) |
176 | { |
177 | struct drm_i915_private *dev_priv = dev->dev_private; |
178 | int i; |
179 | |
180 | /* Make sure interrupts are disabled here because the uninstall ioctl |
181 | * may not have been called from userspace and after dev_private |
182 | * is freed, it's too late. |
183 | */ |
184 | if (dev->irq_enabled) |
185 | drm_irq_uninstall(dev); |
186 | |
187 | mutex_lock(&dev->struct_mutex); |
188 | for (i = 0; i < I915_NUM_RINGS; i++) |
189 | intel_cleanup_ring_buffer(&dev_priv->ring[i]); |
190 | mutex_unlock(&dev->struct_mutex); |
191 | |
192 | /* Clear the HWS virtual address at teardown */ |
193 | if (I915_NEED_GFX_HWS(dev)) |
194 | i915_free_hws(dev); |
195 | |
196 | return 0; |
197 | } |
198 | |
199 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) |
200 | { |
201 | struct drm_i915_private *dev_priv = dev->dev_private; |
202 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
203 | int ret; |
204 | |
205 | master_priv->sarea = drm_getsarea(dev); |
206 | if (master_priv->sarea) { |
207 | master_priv->sarea_priv = (drm_i915_sarea_t *) |
208 | ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); |
209 | } else { |
210 | DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n" ); |
211 | } |
212 | |
213 | if (init->ring_size != 0) { |
214 | if (LP_RING(dev_priv)->obj != NULL) { |
215 | i915_dma_cleanup(dev); |
216 | DRM_ERROR("Client tried to initialize ringbuffer in " |
217 | "GEM mode\n" ); |
218 | return -EINVAL; |
219 | } |
220 | |
221 | ret = intel_render_ring_init_dri(dev, |
222 | init->ring_start, |
223 | init->ring_size); |
224 | if (ret) { |
225 | i915_dma_cleanup(dev); |
226 | return ret; |
227 | } |
228 | } |
229 | |
230 | dev_priv->dri1.cpp = init->cpp; |
231 | dev_priv->dri1.back_offset = init->back_offset; |
232 | dev_priv->dri1.front_offset = init->front_offset; |
233 | dev_priv->dri1.current_page = 0; |
234 | if (master_priv->sarea_priv) |
235 | master_priv->sarea_priv->pf_current_page = 0; |
236 | |
237 | /* Allow hardware batchbuffers unless told otherwise. |
238 | */ |
239 | dev_priv->dri1.allow_batchbuffer = 1; |
240 | |
241 | return 0; |
242 | } |
243 | |
244 | static int i915_dma_resume(struct drm_device * dev) |
245 | { |
246 | struct drm_i915_private *dev_priv = dev->dev_private; |
247 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
248 | |
249 | DRM_DEBUG_DRIVER("%s\n" , __func__); |
250 | |
251 | #ifndef __NetBSD__ /* XXX crufty legacy dri crap */ |
252 | if (ring->virtual_start == NULL) { |
253 | DRM_ERROR("can not ioremap virtual address for" |
254 | " ring buffer\n" ); |
255 | return -ENOMEM; |
256 | } |
257 | #endif |
258 | |
259 | /* Program Hardware Status Page */ |
260 | if (!ring->status_page.page_addr) { |
261 | DRM_ERROR("Can not find hardware status page\n" ); |
262 | return -EINVAL; |
263 | } |
264 | DRM_DEBUG_DRIVER("hw status page @ %p\n" , |
265 | ring->status_page.page_addr); |
266 | if (ring->status_page.gfx_addr != 0) |
267 | intel_ring_setup_status_page(ring); |
268 | else |
269 | i915_write_hws_pga(dev); |
270 | |
271 | DRM_DEBUG_DRIVER("Enabled hardware status page\n" ); |
272 | |
273 | return 0; |
274 | } |
275 | |
276 | static int i915_dma_init(struct drm_device *dev, void *data, |
277 | struct drm_file *file_priv) |
278 | { |
279 | drm_i915_init_t *init = data; |
280 | int retcode = 0; |
281 | |
282 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
283 | return -ENODEV; |
284 | |
285 | switch (init->func) { |
286 | case I915_INIT_DMA: |
287 | retcode = i915_initialize(dev, init); |
288 | break; |
289 | case I915_CLEANUP_DMA: |
290 | retcode = i915_dma_cleanup(dev); |
291 | break; |
292 | case I915_RESUME_DMA: |
293 | retcode = i915_dma_resume(dev); |
294 | break; |
295 | default: |
296 | retcode = -EINVAL; |
297 | break; |
298 | } |
299 | |
300 | return retcode; |
301 | } |
302 | |
303 | /* Implement basically the same security restrictions as hardware does |
304 | * for MI_BATCH_NON_SECURE. These can be made stricter at any time. |
305 | * |
306 | * Most of the calculations below involve calculating the size of a |
307 | * particular instruction. It's important to get the size right as |
308 | * that tells us where the next instruction to check is. Any illegal |
309 | * instruction detected will be given a size of zero, which is a |
310 | * signal to abort the rest of the buffer. |
311 | */ |
312 | static int validate_cmd(int cmd) |
313 | { |
314 | switch (((cmd >> 29) & 0x7)) { |
315 | case 0x0: |
316 | switch ((cmd >> 23) & 0x3f) { |
317 | case 0x0: |
318 | return 1; /* MI_NOOP */ |
319 | case 0x4: |
320 | return 1; /* MI_FLUSH */ |
321 | default: |
322 | return 0; /* disallow everything else */ |
323 | } |
324 | break; |
325 | case 0x1: |
326 | return 0; /* reserved */ |
327 | case 0x2: |
328 | return (cmd & 0xff) + 2; /* 2d commands */ |
329 | case 0x3: |
330 | if (((cmd >> 24) & 0x1f) <= 0x18) |
331 | return 1; |
332 | |
333 | switch ((cmd >> 24) & 0x1f) { |
334 | case 0x1c: |
335 | return 1; |
336 | case 0x1d: |
337 | switch ((cmd >> 16) & 0xff) { |
338 | case 0x3: |
339 | return (cmd & 0x1f) + 2; |
340 | case 0x4: |
341 | return (cmd & 0xf) + 2; |
342 | default: |
343 | return (cmd & 0xffff) + 2; |
344 | } |
345 | case 0x1e: |
346 | if (cmd & (1 << 23)) |
347 | return (cmd & 0xffff) + 1; |
348 | else |
349 | return 1; |
350 | case 0x1f: |
351 | if ((cmd & (1 << 23)) == 0) /* inline vertices */ |
352 | return (cmd & 0x1ffff) + 2; |
353 | else if (cmd & (1 << 17)) /* indirect random */ |
354 | if ((cmd & 0xffff) == 0) |
355 | return 0; /* unknown length, too hard */ |
356 | else |
357 | return (((cmd & 0xffff) + 1) / 2) + 1; |
358 | else |
359 | return 2; /* indirect sequential */ |
360 | default: |
361 | return 0; |
362 | } |
363 | default: |
364 | return 0; |
365 | } |
366 | |
367 | return 0; |
368 | } |
369 | |
370 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) |
371 | { |
372 | struct drm_i915_private *dev_priv = dev->dev_private; |
373 | int i, ret; |
374 | |
375 | if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) |
376 | return -EINVAL; |
377 | |
378 | for (i = 0; i < dwords;) { |
379 | int sz = validate_cmd(buffer[i]); |
380 | if (sz == 0 || i + sz > dwords) |
381 | return -EINVAL; |
382 | i += sz; |
383 | } |
384 | |
385 | ret = BEGIN_LP_RING((dwords+1)&~1); |
386 | if (ret) |
387 | return ret; |
388 | |
389 | for (i = 0; i < dwords; i++) |
390 | OUT_RING(buffer[i]); |
391 | if (dwords & 1) |
392 | OUT_RING(0); |
393 | |
394 | ADVANCE_LP_RING(); |
395 | |
396 | return 0; |
397 | } |
398 | |
399 | int |
400 | i915_emit_box(struct drm_device *dev, |
401 | struct drm_clip_rect *box, |
402 | int DR1, int DR4) |
403 | { |
404 | struct drm_i915_private *dev_priv = dev->dev_private; |
405 | int ret; |
406 | |
407 | if (box->y2 <= box->y1 || box->x2 <= box->x1 || |
408 | box->y2 <= 0 || box->x2 <= 0) { |
409 | DRM_ERROR("Bad box %d,%d..%d,%d\n" , |
410 | box->x1, box->y1, box->x2, box->y2); |
411 | return -EINVAL; |
412 | } |
413 | |
414 | if (INTEL_INFO(dev)->gen >= 4) { |
415 | ret = BEGIN_LP_RING(4); |
416 | if (ret) |
417 | return ret; |
418 | |
419 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); |
420 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
421 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
422 | OUT_RING(DR4); |
423 | } else { |
424 | ret = BEGIN_LP_RING(6); |
425 | if (ret) |
426 | return ret; |
427 | |
428 | OUT_RING(GFX_OP_DRAWRECT_INFO); |
429 | OUT_RING(DR1); |
430 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
431 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
432 | OUT_RING(DR4); |
433 | OUT_RING(0); |
434 | } |
435 | ADVANCE_LP_RING(); |
436 | |
437 | return 0; |
438 | } |
439 | |
440 | /* XXX: Emitting the counter should really be moved to part of the IRQ |
441 | * emit. For now, do it in both places: |
442 | */ |
443 | |
444 | static void i915_emit_breadcrumb(struct drm_device *dev) |
445 | { |
446 | struct drm_i915_private *dev_priv = dev->dev_private; |
447 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
448 | |
449 | dev_priv->dri1.counter++; |
450 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) |
451 | dev_priv->dri1.counter = 0; |
452 | if (master_priv->sarea_priv) |
453 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; |
454 | |
455 | if (BEGIN_LP_RING(4) == 0) { |
456 | OUT_RING(MI_STORE_DWORD_INDEX); |
457 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
458 | OUT_RING(dev_priv->dri1.counter); |
459 | OUT_RING(0); |
460 | ADVANCE_LP_RING(); |
461 | } |
462 | } |
463 | |
464 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, |
465 | drm_i915_cmdbuffer_t *cmd, |
466 | struct drm_clip_rect *cliprects, |
467 | void *cmdbuf) |
468 | { |
469 | int nbox = cmd->num_cliprects; |
470 | int i = 0, count, ret; |
471 | |
472 | if (cmd->sz & 0x3) { |
473 | DRM_ERROR("alignment" ); |
474 | return -EINVAL; |
475 | } |
476 | |
477 | i915_kernel_lost_context(dev); |
478 | |
479 | count = nbox ? nbox : 1; |
480 | |
481 | for (i = 0; i < count; i++) { |
482 | if (i < nbox) { |
483 | ret = i915_emit_box(dev, &cliprects[i], |
484 | cmd->DR1, cmd->DR4); |
485 | if (ret) |
486 | return ret; |
487 | } |
488 | |
489 | ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); |
490 | if (ret) |
491 | return ret; |
492 | } |
493 | |
494 | i915_emit_breadcrumb(dev); |
495 | return 0; |
496 | } |
497 | |
498 | static int i915_dispatch_batchbuffer(struct drm_device * dev, |
499 | drm_i915_batchbuffer_t * batch, |
500 | struct drm_clip_rect *cliprects) |
501 | { |
502 | struct drm_i915_private *dev_priv = dev->dev_private; |
503 | int nbox = batch->num_cliprects; |
504 | int i, count, ret; |
505 | |
506 | if ((batch->start | batch->used) & 0x7) { |
507 | DRM_ERROR("alignment" ); |
508 | return -EINVAL; |
509 | } |
510 | |
511 | i915_kernel_lost_context(dev); |
512 | |
513 | count = nbox ? nbox : 1; |
514 | for (i = 0; i < count; i++) { |
515 | if (i < nbox) { |
516 | ret = i915_emit_box(dev, &cliprects[i], |
517 | batch->DR1, batch->DR4); |
518 | if (ret) |
519 | return ret; |
520 | } |
521 | |
522 | if (!IS_I830(dev) && !IS_845G(dev)) { |
523 | ret = BEGIN_LP_RING(2); |
524 | if (ret) |
525 | return ret; |
526 | |
527 | if (INTEL_INFO(dev)->gen >= 4) { |
528 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); |
529 | OUT_RING(batch->start); |
530 | } else { |
531 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); |
532 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
533 | } |
534 | } else { |
535 | ret = BEGIN_LP_RING(4); |
536 | if (ret) |
537 | return ret; |
538 | |
539 | OUT_RING(MI_BATCH_BUFFER); |
540 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
541 | OUT_RING(batch->start + batch->used - 4); |
542 | OUT_RING(0); |
543 | } |
544 | ADVANCE_LP_RING(); |
545 | } |
546 | |
547 | |
548 | if (IS_G4X(dev) || IS_GEN5(dev)) { |
549 | if (BEGIN_LP_RING(2) == 0) { |
550 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); |
551 | OUT_RING(MI_NOOP); |
552 | ADVANCE_LP_RING(); |
553 | } |
554 | } |
555 | |
556 | i915_emit_breadcrumb(dev); |
557 | return 0; |
558 | } |
559 | |
560 | static int i915_dispatch_flip(struct drm_device * dev) |
561 | { |
562 | struct drm_i915_private *dev_priv = dev->dev_private; |
563 | struct drm_i915_master_private *master_priv = |
564 | dev->primary->master->driver_priv; |
565 | int ret; |
566 | |
567 | if (!master_priv->sarea_priv) |
568 | return -EINVAL; |
569 | |
570 | DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n" , |
571 | __func__, |
572 | dev_priv->dri1.current_page, |
573 | master_priv->sarea_priv->pf_current_page); |
574 | |
575 | i915_kernel_lost_context(dev); |
576 | |
577 | ret = BEGIN_LP_RING(10); |
578 | if (ret) |
579 | return ret; |
580 | |
581 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); |
582 | OUT_RING(0); |
583 | |
584 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); |
585 | OUT_RING(0); |
586 | if (dev_priv->dri1.current_page == 0) { |
587 | OUT_RING(dev_priv->dri1.back_offset); |
588 | dev_priv->dri1.current_page = 1; |
589 | } else { |
590 | OUT_RING(dev_priv->dri1.front_offset); |
591 | dev_priv->dri1.current_page = 0; |
592 | } |
593 | OUT_RING(0); |
594 | |
595 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); |
596 | OUT_RING(0); |
597 | |
598 | ADVANCE_LP_RING(); |
599 | |
600 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++; |
601 | |
602 | if (BEGIN_LP_RING(4) == 0) { |
603 | OUT_RING(MI_STORE_DWORD_INDEX); |
604 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
605 | OUT_RING(dev_priv->dri1.counter); |
606 | OUT_RING(0); |
607 | ADVANCE_LP_RING(); |
608 | } |
609 | |
610 | master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page; |
611 | return 0; |
612 | } |
613 | |
614 | static int i915_quiescent(struct drm_device *dev) |
615 | { |
616 | i915_kernel_lost_context(dev); |
617 | return intel_ring_idle(LP_RING(dev->dev_private)); |
618 | } |
619 | |
620 | static int i915_flush_ioctl(struct drm_device *dev, void *data, |
621 | struct drm_file *file_priv) |
622 | { |
623 | int ret; |
624 | |
625 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
626 | return -ENODEV; |
627 | |
628 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
629 | |
630 | mutex_lock(&dev->struct_mutex); |
631 | ret = i915_quiescent(dev); |
632 | mutex_unlock(&dev->struct_mutex); |
633 | |
634 | return ret; |
635 | } |
636 | |
637 | static int i915_batchbuffer(struct drm_device *dev, void *data, |
638 | struct drm_file *file_priv) |
639 | { |
640 | struct drm_i915_private *dev_priv = dev->dev_private; |
641 | struct drm_i915_master_private *master_priv; |
642 | drm_i915_sarea_t *sarea_priv; |
643 | drm_i915_batchbuffer_t *batch = data; |
644 | int ret; |
645 | struct drm_clip_rect *cliprects = NULL; |
646 | |
647 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
648 | return -ENODEV; |
649 | |
650 | master_priv = dev->primary->master->driver_priv; |
651 | sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv; |
652 | |
653 | if (!dev_priv->dri1.allow_batchbuffer) { |
654 | DRM_ERROR("Batchbuffer ioctl disabled\n" ); |
655 | return -EINVAL; |
656 | } |
657 | |
658 | DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n" , |
659 | batch->start, batch->used, batch->num_cliprects); |
660 | |
661 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
662 | |
663 | if (batch->num_cliprects < 0) |
664 | return -EINVAL; |
665 | |
666 | if (batch->num_cliprects) { |
667 | cliprects = kcalloc(batch->num_cliprects, |
668 | sizeof(*cliprects), |
669 | GFP_KERNEL); |
670 | if (cliprects == NULL) |
671 | return -ENOMEM; |
672 | |
673 | ret = copy_from_user(cliprects, batch->cliprects, |
674 | batch->num_cliprects * |
675 | sizeof(struct drm_clip_rect)); |
676 | if (ret != 0) { |
677 | ret = -EFAULT; |
678 | goto fail_free; |
679 | } |
680 | } |
681 | |
682 | mutex_lock(&dev->struct_mutex); |
683 | ret = i915_dispatch_batchbuffer(dev, batch, cliprects); |
684 | mutex_unlock(&dev->struct_mutex); |
685 | |
686 | if (sarea_priv) |
687 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
688 | |
689 | fail_free: |
690 | kfree(cliprects); |
691 | |
692 | return ret; |
693 | } |
694 | |
695 | static int i915_cmdbuffer(struct drm_device *dev, void *data, |
696 | struct drm_file *file_priv) |
697 | { |
698 | struct drm_i915_private *dev_priv = dev->dev_private; |
699 | struct drm_i915_master_private *master_priv; |
700 | drm_i915_sarea_t *sarea_priv; |
701 | drm_i915_cmdbuffer_t *cmdbuf = data; |
702 | struct drm_clip_rect *cliprects = NULL; |
703 | void *batch_data; |
704 | int ret; |
705 | |
706 | DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n" , |
707 | cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); |
708 | |
709 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
710 | return -ENODEV; |
711 | |
712 | master_priv = dev->primary->master->driver_priv; |
713 | sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv; |
714 | |
715 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
716 | |
717 | if (cmdbuf->num_cliprects < 0) |
718 | return -EINVAL; |
719 | |
720 | batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); |
721 | if (batch_data == NULL) |
722 | return -ENOMEM; |
723 | |
724 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); |
725 | if (ret != 0) { |
726 | ret = -EFAULT; |
727 | goto fail_batch_free; |
728 | } |
729 | |
730 | if (cmdbuf->num_cliprects) { |
731 | cliprects = kcalloc(cmdbuf->num_cliprects, |
732 | sizeof(*cliprects), GFP_KERNEL); |
733 | if (cliprects == NULL) { |
734 | ret = -ENOMEM; |
735 | goto fail_batch_free; |
736 | } |
737 | |
738 | ret = copy_from_user(cliprects, cmdbuf->cliprects, |
739 | cmdbuf->num_cliprects * |
740 | sizeof(struct drm_clip_rect)); |
741 | if (ret != 0) { |
742 | ret = -EFAULT; |
743 | goto fail_clip_free; |
744 | } |
745 | } |
746 | |
747 | mutex_lock(&dev->struct_mutex); |
748 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); |
749 | mutex_unlock(&dev->struct_mutex); |
750 | if (ret) { |
751 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n" ); |
752 | goto fail_clip_free; |
753 | } |
754 | |
755 | if (sarea_priv) |
756 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
757 | |
758 | fail_clip_free: |
759 | kfree(cliprects); |
760 | fail_batch_free: |
761 | kfree(batch_data); |
762 | |
763 | return ret; |
764 | } |
765 | |
766 | static int i915_emit_irq(struct drm_device * dev) |
767 | { |
768 | struct drm_i915_private *dev_priv = dev->dev_private; |
769 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
770 | |
771 | i915_kernel_lost_context(dev); |
772 | |
773 | DRM_DEBUG_DRIVER("\n" ); |
774 | |
775 | dev_priv->dri1.counter++; |
776 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) |
777 | dev_priv->dri1.counter = 1; |
778 | if (master_priv->sarea_priv) |
779 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; |
780 | |
781 | if (BEGIN_LP_RING(4) == 0) { |
782 | OUT_RING(MI_STORE_DWORD_INDEX); |
783 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
784 | OUT_RING(dev_priv->dri1.counter); |
785 | OUT_RING(MI_USER_INTERRUPT); |
786 | ADVANCE_LP_RING(); |
787 | } |
788 | |
789 | return dev_priv->dri1.counter; |
790 | } |
791 | |
792 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
793 | { |
794 | struct drm_i915_private *dev_priv = dev->dev_private; |
795 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
796 | int ret = 0; |
797 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
798 | |
799 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n" , irq_nr, |
800 | READ_BREADCRUMB(dev_priv)); |
801 | |
802 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) { |
803 | if (master_priv->sarea_priv) |
804 | master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
805 | return 0; |
806 | } |
807 | |
808 | if (master_priv->sarea_priv) |
809 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
810 | |
811 | if (ring->irq_get(ring)) { |
812 | #ifdef __NetBSD__ |
813 | unsigned long flags; |
814 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
815 | DRM_SPIN_WAIT_ON(ret, &ring->irq_queue, &dev_priv->irq_lock, |
816 | 3 * DRM_HZ, |
817 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
818 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
819 | #else |
820 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ, |
821 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
822 | #endif |
823 | ring->irq_put(ring); |
824 | } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) |
825 | ret = -EBUSY; |
826 | |
827 | if (ret == -EBUSY) { |
828 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n" , |
829 | READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter); |
830 | } |
831 | |
832 | return ret; |
833 | } |
834 | |
835 | /* Needs the lock as it touches the ring. |
836 | */ |
837 | static int i915_irq_emit(struct drm_device *dev, void *data, |
838 | struct drm_file *file_priv) |
839 | { |
840 | #ifndef __NetBSD__ |
841 | struct drm_i915_private *dev_priv = dev->dev_private; |
842 | #endif |
843 | drm_i915_irq_emit_t *emit = data; |
844 | int result; |
845 | |
846 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
847 | return -ENODEV; |
848 | |
849 | #ifndef __NetBSD__ /* XXX crufty legacy dri crap */ |
850 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { |
851 | DRM_ERROR("called with no initialization\n" ); |
852 | return -EINVAL; |
853 | } |
854 | #endif |
855 | |
856 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
857 | |
858 | mutex_lock(&dev->struct_mutex); |
859 | result = i915_emit_irq(dev); |
860 | mutex_unlock(&dev->struct_mutex); |
861 | |
862 | if (copy_to_user(emit->irq_seq, &result, sizeof(int))) { |
863 | DRM_ERROR("copy_to_user\n" ); |
864 | return -EFAULT; |
865 | } |
866 | |
867 | return 0; |
868 | } |
869 | |
870 | /* Doesn't need the hardware lock. |
871 | */ |
872 | static int i915_irq_wait(struct drm_device *dev, void *data, |
873 | struct drm_file *file_priv) |
874 | { |
875 | struct drm_i915_private *dev_priv = dev->dev_private; |
876 | drm_i915_irq_wait_t *irqwait = data; |
877 | |
878 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
879 | return -ENODEV; |
880 | |
881 | if (!dev_priv) { |
882 | DRM_ERROR("called with no initialization\n" ); |
883 | return -EINVAL; |
884 | } |
885 | |
886 | return i915_wait_irq(dev, irqwait->irq_seq); |
887 | } |
888 | |
889 | static int i915_vblank_pipe_get(struct drm_device *dev, void *data, |
890 | struct drm_file *file_priv) |
891 | { |
892 | struct drm_i915_private *dev_priv = dev->dev_private; |
893 | drm_i915_vblank_pipe_t *pipe = data; |
894 | |
895 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
896 | return -ENODEV; |
897 | |
898 | if (!dev_priv) { |
899 | DRM_ERROR("called with no initialization\n" ); |
900 | return -EINVAL; |
901 | } |
902 | |
903 | pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; |
904 | |
905 | return 0; |
906 | } |
907 | |
908 | /** |
909 | * Schedule buffer swap at given vertical blank. |
910 | */ |
911 | static int i915_vblank_swap(struct drm_device *dev, void *data, |
912 | struct drm_file *file_priv) |
913 | { |
914 | /* The delayed swap mechanism was fundamentally racy, and has been |
915 | * removed. The model was that the client requested a delayed flip/swap |
916 | * from the kernel, then waited for vblank before continuing to perform |
917 | * rendering. The problem was that the kernel might wake the client |
918 | * up before it dispatched the vblank swap (since the lock has to be |
919 | * held while touching the ringbuffer), in which case the client would |
920 | * clear and start the next frame before the swap occurred, and |
921 | * flicker would occur in addition to likely missing the vblank. |
922 | * |
923 | * In the absence of this ioctl, userland falls back to a correct path |
924 | * of waiting for a vblank, then dispatching the swap on its own. |
925 | * Context switching to userland and back is plenty fast enough for |
926 | * meeting the requirements of vblank swapping. |
927 | */ |
928 | return -EINVAL; |
929 | } |
930 | |
931 | static int i915_flip_bufs(struct drm_device *dev, void *data, |
932 | struct drm_file *file_priv) |
933 | { |
934 | int ret; |
935 | |
936 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
937 | return -ENODEV; |
938 | |
939 | DRM_DEBUG_DRIVER("%s\n" , __func__); |
940 | |
941 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
942 | |
943 | mutex_lock(&dev->struct_mutex); |
944 | ret = i915_dispatch_flip(dev); |
945 | mutex_unlock(&dev->struct_mutex); |
946 | |
947 | return ret; |
948 | } |
949 | |
950 | static int i915_getparam(struct drm_device *dev, void *data, |
951 | struct drm_file *file_priv) |
952 | { |
953 | struct drm_i915_private *dev_priv = dev->dev_private; |
954 | drm_i915_getparam_t *param = data; |
955 | int value; |
956 | |
957 | if (!dev_priv) { |
958 | DRM_ERROR("called with no initialization\n" ); |
959 | return -EINVAL; |
960 | } |
961 | |
962 | switch (param->param) { |
963 | case I915_PARAM_IRQ_ACTIVE: |
964 | #ifdef __NetBSD__ |
965 | /* XXX This is the old code; why was it changed upstream? */ |
966 | value = dev->irq_enabled ? 1 : 0; |
967 | #else |
968 | value = dev->pdev->irq ? 1 : 0; |
969 | #endif |
970 | break; |
971 | case I915_PARAM_ALLOW_BATCHBUFFER: |
972 | value = dev_priv->dri1.allow_batchbuffer ? 1 : 0; |
973 | break; |
974 | case I915_PARAM_LAST_DISPATCH: |
975 | value = READ_BREADCRUMB(dev_priv); |
976 | break; |
977 | case I915_PARAM_CHIPSET_ID: |
978 | value = dev->pdev->device; |
979 | break; |
980 | case I915_PARAM_HAS_GEM: |
981 | value = 1; |
982 | break; |
983 | case I915_PARAM_NUM_FENCES_AVAIL: |
984 | value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; |
985 | break; |
986 | case I915_PARAM_HAS_OVERLAY: |
987 | value = dev_priv->overlay ? 1 : 0; |
988 | break; |
989 | case I915_PARAM_HAS_PAGEFLIPPING: |
990 | value = 1; |
991 | break; |
992 | case I915_PARAM_HAS_EXECBUF2: |
993 | /* depends on GEM */ |
994 | value = 1; |
995 | break; |
996 | case I915_PARAM_HAS_BSD: |
997 | value = intel_ring_initialized(&dev_priv->ring[VCS]); |
998 | break; |
999 | case I915_PARAM_HAS_BLT: |
1000 | value = intel_ring_initialized(&dev_priv->ring[BCS]); |
1001 | break; |
1002 | case I915_PARAM_HAS_VEBOX: |
1003 | value = intel_ring_initialized(&dev_priv->ring[VECS]); |
1004 | break; |
1005 | case I915_PARAM_HAS_RELAXED_FENCING: |
1006 | value = 1; |
1007 | break; |
1008 | case I915_PARAM_HAS_COHERENT_RINGS: |
1009 | value = 1; |
1010 | break; |
1011 | case I915_PARAM_HAS_EXEC_CONSTANTS: |
1012 | value = INTEL_INFO(dev)->gen >= 4; |
1013 | break; |
1014 | case I915_PARAM_HAS_RELAXED_DELTA: |
1015 | value = 1; |
1016 | break; |
1017 | case I915_PARAM_HAS_GEN7_SOL_RESET: |
1018 | value = 1; |
1019 | break; |
1020 | case I915_PARAM_HAS_LLC: |
1021 | value = HAS_LLC(dev); |
1022 | break; |
1023 | case I915_PARAM_HAS_WT: |
1024 | value = HAS_WT(dev); |
1025 | break; |
1026 | case I915_PARAM_HAS_ALIASING_PPGTT: |
1027 | value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev); |
1028 | break; |
1029 | case I915_PARAM_HAS_WAIT_TIMEOUT: |
1030 | value = 1; |
1031 | break; |
1032 | case I915_PARAM_HAS_SEMAPHORES: |
1033 | value = i915_semaphore_is_enabled(dev); |
1034 | break; |
1035 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: |
1036 | value = 1; |
1037 | break; |
1038 | case I915_PARAM_HAS_SECURE_BATCHES: |
1039 | #ifdef __NetBSD__ |
1040 | value = DRM_SUSER(); |
1041 | #else |
1042 | value = capable(CAP_SYS_ADMIN); |
1043 | #endif |
1044 | break; |
1045 | case I915_PARAM_HAS_PINNED_BATCHES: |
1046 | value = 1; |
1047 | break; |
1048 | case I915_PARAM_HAS_EXEC_NO_RELOC: |
1049 | value = 1; |
1050 | break; |
1051 | case I915_PARAM_HAS_EXEC_HANDLE_LUT: |
1052 | value = 1; |
1053 | break; |
1054 | default: |
1055 | DRM_DEBUG("Unknown parameter %d\n" , param->param); |
1056 | return -EINVAL; |
1057 | } |
1058 | |
1059 | if (copy_to_user(param->value, &value, sizeof(int))) { |
1060 | DRM_ERROR("copy_to_user failed\n" ); |
1061 | return -EFAULT; |
1062 | } |
1063 | |
1064 | return 0; |
1065 | } |
1066 | |
1067 | static int i915_setparam(struct drm_device *dev, void *data, |
1068 | struct drm_file *file_priv) |
1069 | { |
1070 | struct drm_i915_private *dev_priv = dev->dev_private; |
1071 | drm_i915_setparam_t *param = data; |
1072 | |
1073 | if (!dev_priv) { |
1074 | DRM_ERROR("called with no initialization\n" ); |
1075 | return -EINVAL; |
1076 | } |
1077 | |
1078 | switch (param->param) { |
1079 | case I915_SETPARAM_USE_MI_BATCHBUFFER_START: |
1080 | break; |
1081 | case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: |
1082 | break; |
1083 | case I915_SETPARAM_ALLOW_BATCHBUFFER: |
1084 | dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; |
1085 | break; |
1086 | case I915_SETPARAM_NUM_USED_FENCES: |
1087 | if (param->value > dev_priv->num_fence_regs || |
1088 | param->value < 0) |
1089 | return -EINVAL; |
1090 | /* Userspace can use first N regs */ |
1091 | dev_priv->fence_reg_start = param->value; |
1092 | break; |
1093 | default: |
1094 | DRM_DEBUG_DRIVER("unknown parameter %d\n" , |
1095 | param->param); |
1096 | return -EINVAL; |
1097 | } |
1098 | |
1099 | return 0; |
1100 | } |
1101 | |
1102 | static int i915_set_status_page(struct drm_device *dev, void *data, |
1103 | struct drm_file *file_priv) |
1104 | { |
1105 | struct drm_i915_private *dev_priv = dev->dev_private; |
1106 | drm_i915_hws_addr_t *hws = data; |
1107 | struct intel_ring_buffer *ring; |
1108 | #ifdef __NetBSD__ |
1109 | int ret; |
1110 | #endif |
1111 | |
1112 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
1113 | return -ENODEV; |
1114 | |
1115 | if (!I915_NEED_GFX_HWS(dev)) |
1116 | return -EINVAL; |
1117 | |
1118 | if (!dev_priv) { |
1119 | DRM_ERROR("called with no initialization\n" ); |
1120 | return -EINVAL; |
1121 | } |
1122 | |
1123 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1124 | WARN(1, "tried to set status page when mode setting active\n" ); |
1125 | return 0; |
1126 | } |
1127 | |
1128 | DRM_DEBUG_DRIVER("set status page addr 0x%08x\n" , (u32)hws->addr); |
1129 | |
1130 | ring = LP_RING(dev_priv); |
1131 | ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); |
1132 | |
1133 | #ifdef __NetBSD__ |
1134 | /* XXX errno NetBSD->Linux */ |
1135 | ret = bus_space_map(dev->pdev->pd_pa.pa_memt, |
1136 | (dev_priv->gtt.mappable_base + hws->addr), 4096, |
1137 | BUS_SPACE_MAP_PREFETCHABLE, |
1138 | &dev_priv->dri1.gfx_hws_cpu_bsh); |
1139 | if (ret) { |
1140 | ring->status_page.gfx_addr = 0; |
1141 | i915_dma_cleanup(dev); |
1142 | DRM_ERROR("can not ioremap virtual address for" |
1143 | " G33 hw status page, error %d\n" , ret); |
1144 | return -ret; |
1145 | } |
1146 | |
1147 | __CTASSERT(PAGE_SIZE == 4096); |
1148 | bus_space_set_region_1(dev->pdev->pd_pa.pa_memt, |
1149 | dev_priv->dri1.gfx_hws_cpu_bsh, 0, 0, PAGE_SIZE); |
1150 | #else |
1151 | dev_priv->dri1.gfx_hws_cpu_addr = |
1152 | ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096); |
1153 | if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { |
1154 | ring->status_page.gfx_addr = 0; |
1155 | i915_dma_cleanup(dev); |
1156 | DRM_ERROR("can not ioremap virtual address for" |
1157 | " G33 hw status page\n" ); |
1158 | return -ENOMEM; |
1159 | } |
1160 | |
1161 | memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE); |
1162 | #endif |
1163 | |
1164 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); |
1165 | |
1166 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n" , |
1167 | ring->status_page.gfx_addr); |
1168 | DRM_DEBUG_DRIVER("load hws at %p\n" , |
1169 | ring->status_page.page_addr); |
1170 | return 0; |
1171 | } |
1172 | |
1173 | static int i915_get_bridge_dev(struct drm_device *dev) |
1174 | { |
1175 | struct drm_i915_private *dev_priv = dev->dev_private; |
1176 | |
1177 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); |
1178 | if (!dev_priv->bridge_dev) { |
1179 | DRM_ERROR("bridge device not found\n" ); |
1180 | return -1; |
1181 | } |
1182 | return 0; |
1183 | } |
1184 | |
1185 | #define MCHBAR_I915 0x44 |
1186 | #define MCHBAR_I965 0x48 |
1187 | #define MCHBAR_SIZE (4*4096) |
1188 | |
1189 | #define DEVEN_REG 0x54 |
1190 | #define DEVEN_MCHBAR_EN (1 << 28) |
1191 | |
1192 | /* Allocate space for the MCH regs if needed, return nonzero on error */ |
1193 | static int |
1194 | intel_alloc_mchbar_resource(struct drm_device *dev) |
1195 | { |
1196 | struct drm_i915_private *dev_priv = dev->dev_private; |
1197 | int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
1198 | #ifdef CONFIG_PNP |
1199 | u32 temp_lo, temp_hi = 0; |
1200 | u64 mchbar_addr; |
1201 | #endif |
1202 | int ret; |
1203 | |
1204 | #ifdef CONFIG_PNP |
1205 | if (INTEL_INFO(dev)->gen >= 4) |
1206 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); |
1207 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); |
1208 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; |
1209 | |
1210 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ |
1211 | if (mchbar_addr && |
1212 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) |
1213 | return 0; |
1214 | #endif |
1215 | |
1216 | /* Get some space for it */ |
1217 | dev_priv->mch_res.name = "i915 MCHBAR" ; |
1218 | dev_priv->mch_res.flags = IORESOURCE_MEM; |
1219 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, |
1220 | &dev_priv->mch_res, |
1221 | MCHBAR_SIZE, MCHBAR_SIZE, |
1222 | PCIBIOS_MIN_MEM, |
1223 | 0, pcibios_align_resource, |
1224 | dev_priv->bridge_dev); |
1225 | if (ret) { |
1226 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n" , ret); |
1227 | dev_priv->mch_res.start = 0; |
1228 | return ret; |
1229 | } |
1230 | |
1231 | if (INTEL_INFO(dev)->gen >= 4) |
1232 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, |
1233 | upper_32_bits(dev_priv->mch_res.start)); |
1234 | |
1235 | pci_write_config_dword(dev_priv->bridge_dev, reg, |
1236 | lower_32_bits(dev_priv->mch_res.start)); |
1237 | return 0; |
1238 | } |
1239 | |
1240 | /* Setup MCHBAR if possible, return true if we should disable it again */ |
1241 | static void |
1242 | intel_setup_mchbar(struct drm_device *dev) |
1243 | { |
1244 | struct drm_i915_private *dev_priv = dev->dev_private; |
1245 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
1246 | u32 temp; |
1247 | bool enabled; |
1248 | |
1249 | if (IS_VALLEYVIEW(dev)) |
1250 | return; |
1251 | |
1252 | dev_priv->mchbar_need_disable = false; |
1253 | |
1254 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
1255 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); |
1256 | enabled = !!(temp & DEVEN_MCHBAR_EN); |
1257 | } else { |
1258 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
1259 | enabled = temp & 1; |
1260 | } |
1261 | |
1262 | /* If it's already enabled, don't have to do anything */ |
1263 | if (enabled) |
1264 | return; |
1265 | |
1266 | if (intel_alloc_mchbar_resource(dev)) |
1267 | return; |
1268 | |
1269 | dev_priv->mchbar_need_disable = true; |
1270 | |
1271 | /* Space is allocated or reserved, so enable it. */ |
1272 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
1273 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, |
1274 | temp | DEVEN_MCHBAR_EN); |
1275 | } else { |
1276 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
1277 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); |
1278 | } |
1279 | } |
1280 | |
1281 | static void |
1282 | intel_teardown_mchbar(struct drm_device *dev) |
1283 | { |
1284 | struct drm_i915_private *dev_priv = dev->dev_private; |
1285 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
1286 | u32 temp; |
1287 | |
1288 | if (dev_priv->mchbar_need_disable) { |
1289 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
1290 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); |
1291 | temp &= ~DEVEN_MCHBAR_EN; |
1292 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); |
1293 | } else { |
1294 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
1295 | temp &= ~1; |
1296 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); |
1297 | } |
1298 | } |
1299 | |
1300 | if (dev_priv->mch_res.start) |
1301 | release_resource(&dev_priv->mch_res); |
1302 | } |
1303 | |
1304 | #ifndef __NetBSD__ /* XXX vga */ |
1305 | /* true = enable decode, false = disable decoder */ |
1306 | static unsigned int i915_vga_set_decode(void *cookie, bool state) |
1307 | { |
1308 | struct drm_device *dev = cookie; |
1309 | |
1310 | intel_modeset_vga_set_state(dev, state); |
1311 | if (state) |
1312 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
1313 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
1314 | else |
1315 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
1316 | } |
1317 | |
1318 | static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) |
1319 | { |
1320 | struct drm_device *dev = pci_get_drvdata(pdev); |
1321 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; |
1322 | if (state == VGA_SWITCHEROO_ON) { |
1323 | pr_info("switched on\n" ); |
1324 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
1325 | /* i915 resume handler doesn't set to D0 */ |
1326 | pci_set_power_state(dev->pdev, PCI_D0); |
1327 | i915_resume(dev); |
1328 | dev->switch_power_state = DRM_SWITCH_POWER_ON; |
1329 | } else { |
1330 | pr_err("switched off\n" ); |
1331 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
1332 | i915_suspend(dev, pmm); |
1333 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; |
1334 | } |
1335 | } |
1336 | |
1337 | static bool i915_switcheroo_can_switch(struct pci_dev *pdev) |
1338 | { |
1339 | struct drm_device *dev = pci_get_drvdata(pdev); |
1340 | bool can_switch; |
1341 | |
1342 | spin_lock(&dev->count_lock); |
1343 | can_switch = (dev->open_count == 0); |
1344 | spin_unlock(&dev->count_lock); |
1345 | return can_switch; |
1346 | } |
1347 | |
1348 | static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { |
1349 | .set_gpu_state = i915_switcheroo_set_state, |
1350 | .reprobe = NULL, |
1351 | .can_switch = i915_switcheroo_can_switch, |
1352 | }; |
1353 | #endif |
1354 | |
1355 | static int i915_load_modeset_init(struct drm_device *dev) |
1356 | { |
1357 | struct drm_i915_private *dev_priv = dev->dev_private; |
1358 | int ret; |
1359 | |
1360 | ret = intel_parse_bios(dev); |
1361 | if (ret) |
1362 | DRM_INFO("failed to find VBIOS tables\n" ); |
1363 | |
1364 | #ifndef __NetBSD__ /* XXX vga */ |
1365 | /* If we have > 1 VGA cards, then we need to arbitrate access |
1366 | * to the common VGA resources. |
1367 | * |
1368 | * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), |
1369 | * then we do not take part in VGA arbitration and the |
1370 | * vga_client_register() fails with -ENODEV. |
1371 | */ |
1372 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); |
1373 | if (ret && ret != -ENODEV) |
1374 | goto out; |
1375 | #endif |
1376 | |
1377 | #ifdef __NetBSD__ |
1378 | intel_register_dsm_handler(dev); |
1379 | #else |
1380 | intel_register_dsm_handler(); |
1381 | #endif |
1382 | |
1383 | #ifndef __NetBSD__ /* XXX vga */ |
1384 | ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false); |
1385 | if (ret) |
1386 | goto cleanup_vga_client; |
1387 | #endif |
1388 | |
1389 | /* Initialise stolen first so that we may reserve preallocated |
1390 | * objects for the BIOS to KMS transition. |
1391 | */ |
1392 | ret = i915_gem_init_stolen(dev); |
1393 | if (ret) |
1394 | goto cleanup_vga_switcheroo; |
1395 | |
1396 | intel_power_domains_init_hw(dev_priv); |
1397 | |
1398 | ret = drm_irq_install(dev); |
1399 | if (ret) |
1400 | goto cleanup_gem_stolen; |
1401 | |
1402 | /* Important: The output setup functions called by modeset_init need |
1403 | * working irqs for e.g. gmbus and dp aux transfers. */ |
1404 | intel_modeset_init(dev); |
1405 | |
1406 | ret = i915_gem_init(dev); |
1407 | if (ret) |
1408 | goto cleanup_power; |
1409 | |
1410 | INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); |
1411 | |
1412 | intel_modeset_gem_init(dev); |
1413 | |
1414 | /* Always safe in the mode setting case. */ |
1415 | /* FIXME: do pre/post-mode set stuff in core KMS code */ |
1416 | dev->vblank_disable_allowed = true; |
1417 | if (INTEL_INFO(dev)->num_pipes == 0) { |
1418 | intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); |
1419 | return 0; |
1420 | } |
1421 | |
1422 | ret = intel_fbdev_init(dev); |
1423 | if (ret) |
1424 | goto cleanup_gem; |
1425 | |
1426 | /* Only enable hotplug handling once the fbdev is fully set up. */ |
1427 | intel_hpd_init(dev); |
1428 | |
1429 | /* |
1430 | * Some ports require correctly set-up hpd registers for detection to |
1431 | * work properly (leading to ghost connected connector status), e.g. VGA |
1432 | * on gm45. Hence we can only set up the initial fbdev config after hpd |
1433 | * irqs are fully enabled. Now we should scan for the initial config |
1434 | * only once hotplug handling is enabled, but due to screwed-up locking |
1435 | * around kms/fbdev init we can't protect the fdbev initial config |
1436 | * scanning against hotplug events. Hence do this first and ignore the |
1437 | * tiny window where we will loose hotplug notifactions. |
1438 | */ |
1439 | intel_fbdev_initial_config(dev); |
1440 | |
1441 | /* Only enable hotplug handling once the fbdev is fully set up. */ |
1442 | dev_priv->enable_hotplug_processing = true; |
1443 | |
1444 | drm_kms_helper_poll_init(dev); |
1445 | |
1446 | return 0; |
1447 | |
1448 | cleanup_gem: |
1449 | mutex_lock(&dev->struct_mutex); |
1450 | i915_gem_cleanup_ringbuffer(dev); |
1451 | i915_gem_context_fini(dev); |
1452 | mutex_unlock(&dev->struct_mutex); |
1453 | WARN_ON(dev_priv->mm.aliasing_ppgtt); |
1454 | drm_mm_takedown(&dev_priv->gtt.base.mm); |
1455 | cleanup_power: |
1456 | intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); |
1457 | drm_irq_uninstall(dev); |
1458 | cleanup_gem_stolen: |
1459 | intel_modeset_cleanup(dev); |
1460 | i915_gem_cleanup_stolen(dev); |
1461 | cleanup_vga_switcheroo: |
1462 | #ifndef __NetBSD__ /* XXX vga */ |
1463 | vga_switcheroo_unregister_client(dev->pdev); |
1464 | cleanup_vga_client: |
1465 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
1466 | out: |
1467 | #endif |
1468 | return ret; |
1469 | } |
1470 | |
1471 | int i915_master_create(struct drm_device *dev, struct drm_master *master) |
1472 | { |
1473 | struct drm_i915_master_private *master_priv; |
1474 | |
1475 | master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); |
1476 | if (!master_priv) |
1477 | return -ENOMEM; |
1478 | |
1479 | master->driver_priv = master_priv; |
1480 | return 0; |
1481 | } |
1482 | |
1483 | void i915_master_destroy(struct drm_device *dev, struct drm_master *master) |
1484 | { |
1485 | struct drm_i915_master_private *master_priv = master->driver_priv; |
1486 | |
1487 | if (!master_priv) |
1488 | return; |
1489 | |
1490 | kfree(master_priv); |
1491 | |
1492 | master->driver_priv = NULL; |
1493 | } |
1494 | |
1495 | #if IS_ENABLED(CONFIG_FB) |
1496 | static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) |
1497 | { |
1498 | struct apertures_struct *ap; |
1499 | struct pci_dev *pdev = dev_priv->dev->pdev; |
1500 | bool primary; |
1501 | |
1502 | ap = alloc_apertures(1); |
1503 | if (!ap) |
1504 | return; |
1505 | |
1506 | ap->ranges[0].base = dev_priv->gtt.mappable_base; |
1507 | ap->ranges[0].size = dev_priv->gtt.mappable_end; |
1508 | |
1509 | primary = |
1510 | pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; |
1511 | |
1512 | remove_conflicting_framebuffers(ap, "inteldrmfb" , primary); |
1513 | |
1514 | kfree(ap); |
1515 | } |
1516 | #else |
1517 | static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) |
1518 | { |
1519 | } |
1520 | #endif |
1521 | |
1522 | static void i915_dump_device_info(struct drm_i915_private *dev_priv) |
1523 | { |
1524 | const struct intel_device_info *info = &dev_priv->info; |
1525 | |
1526 | #define PRINT_S(name) "%s" |
1527 | #define SEP_EMPTY |
1528 | #define PRINT_FLAG(name) info->name ? #name "," : "" |
1529 | #define SEP_COMMA , |
1530 | DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" |
1531 | DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), |
1532 | info->gen, |
1533 | dev_priv->dev->pdev->device, |
1534 | DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); |
1535 | #undef PRINT_S |
1536 | #undef SEP_EMPTY |
1537 | #undef PRINT_FLAG |
1538 | #undef SEP_COMMA |
1539 | } |
1540 | |
1541 | /* |
1542 | * Determine various intel_device_info fields at runtime. |
1543 | * |
1544 | * Use it when either: |
1545 | * - it's judged too laborious to fill n static structures with the limit |
1546 | * when a simple if statement does the job, |
1547 | * - run-time checks (eg read fuse/strap registers) are needed. |
1548 | * |
1549 | * This function needs to be called: |
1550 | * - after the MMIO has been setup as we are reading registers, |
1551 | * - after the PCH has been detected, |
1552 | * - before the first usage of the fields it can tweak. |
1553 | */ |
1554 | static void intel_device_info_runtime_init(struct drm_device *dev) |
1555 | { |
1556 | struct drm_i915_private *dev_priv = dev->dev_private; |
1557 | struct intel_device_info *info; |
1558 | enum i915_pipe pipe; |
1559 | |
1560 | info = (struct intel_device_info *)&dev_priv->info; |
1561 | |
1562 | if (IS_VALLEYVIEW(dev)) |
1563 | for_each_pipe(pipe) |
1564 | info->num_sprites[pipe] = 2; |
1565 | else |
1566 | for_each_pipe(pipe) |
1567 | info->num_sprites[pipe] = 1; |
1568 | |
1569 | if (i915.disable_display) { |
1570 | DRM_INFO("Display disabled (module parameter)\n" ); |
1571 | info->num_pipes = 0; |
1572 | } else if (info->num_pipes > 0 && |
1573 | (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && |
1574 | !IS_VALLEYVIEW(dev)) { |
1575 | u32 fuse_strap = I915_READ(FUSE_STRAP); |
1576 | u32 sfuse_strap = I915_READ(SFUSE_STRAP); |
1577 | |
1578 | /* |
1579 | * SFUSE_STRAP is supposed to have a bit signalling the display |
1580 | * is fused off. Unfortunately it seems that, at least in |
1581 | * certain cases, fused off display means that PCH display |
1582 | * reads don't land anywhere. In that case, we read 0s. |
1583 | * |
1584 | * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK |
1585 | * should be set when taking over after the firmware. |
1586 | */ |
1587 | if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || |
1588 | sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || |
1589 | (dev_priv->pch_type == PCH_CPT && |
1590 | !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { |
1591 | DRM_INFO("Display fused off, disabling\n" ); |
1592 | info->num_pipes = 0; |
1593 | } |
1594 | } |
1595 | } |
1596 | |
1597 | /** |
1598 | * i915_driver_load - setup chip and create an initial config |
1599 | * @dev: DRM device |
1600 | * @flags: startup flags |
1601 | * |
1602 | * The driver load routine has to do several things: |
1603 | * - drive output discovery via intel_modeset_init() |
1604 | * - initialize the memory manager |
1605 | * - allocate initial config memory |
1606 | * - setup the DRM framebuffer with the allocated memory |
1607 | */ |
1608 | int i915_driver_load(struct drm_device *dev, unsigned long flags) |
1609 | { |
1610 | struct drm_i915_private *dev_priv; |
1611 | struct intel_device_info *info, *device_info; |
1612 | int ret = 0, mmio_bar, mmio_size; |
1613 | uint32_t aperture_size; |
1614 | |
1615 | info = (struct intel_device_info *) flags; |
1616 | |
1617 | /* Refuse to load on gen6+ without kms enabled. */ |
1618 | if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) { |
1619 | DRM_INFO("Your hardware requires kernel modesetting (KMS)\n" ); |
1620 | DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n" ); |
1621 | return -ENODEV; |
1622 | } |
1623 | |
1624 | /* UMS needs agp support. */ |
1625 | if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp) |
1626 | return -EINVAL; |
1627 | |
1628 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); |
1629 | if (dev_priv == NULL) |
1630 | return -ENOMEM; |
1631 | |
1632 | dev->dev_private = (void *)dev_priv; |
1633 | dev_priv->dev = dev; |
1634 | |
1635 | /* copy initial configuration to dev_priv->info */ |
1636 | device_info = (struct intel_device_info *)&dev_priv->info; |
1637 | *device_info = *info; |
1638 | |
1639 | spin_lock_init(&dev_priv->irq_lock); |
1640 | spin_lock_init(&dev_priv->gpu_error.lock); |
1641 | spin_lock_init(&dev_priv->backlight_lock); |
1642 | spin_lock_init(&dev_priv->uncore.lock); |
1643 | spin_lock_init(&dev_priv->mm.object_stat_lock); |
1644 | #ifdef __NetBSD__ |
1645 | linux_mutex_init(&dev_priv->dpio_lock); |
1646 | linux_mutex_init(&dev_priv->modeset_restore_lock); |
1647 | #else |
1648 | mutex_init(&dev_priv->dpio_lock); |
1649 | mutex_init(&dev_priv->modeset_restore_lock); |
1650 | #endif |
1651 | |
1652 | #ifdef __NetBSD__ |
1653 | spin_lock_init(&mchdev_lock); |
1654 | #endif |
1655 | |
1656 | intel_pm_setup(dev); |
1657 | |
1658 | intel_display_crc_init(dev); |
1659 | |
1660 | i915_dump_device_info(dev_priv); |
1661 | |
1662 | /* Not all pre-production machines fall into this category, only the |
1663 | * very first ones. Almost everything should work, except for maybe |
1664 | * suspend/resume. And we don't implement workarounds that affect only |
1665 | * pre-production machines. */ |
1666 | if (IS_HSW_EARLY_SDV(dev)) |
1667 | DRM_INFO("This is an early pre-production Haswell machine. " |
1668 | "It may not be fully functional.\n" ); |
1669 | |
1670 | if (i915_get_bridge_dev(dev)) { |
1671 | ret = -EIO; |
1672 | goto free_priv; |
1673 | } |
1674 | |
1675 | mmio_bar = IS_GEN2(dev) ? 1 : 0; |
1676 | /* Before gen4, the registers and the GTT are behind different BARs. |
1677 | * However, from gen4 onwards, the registers and the GTT are shared |
1678 | * in the same BAR, so we want to restrict this ioremap from |
1679 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, |
1680 | * the register BAR remains the same size for all the earlier |
1681 | * generations up to Ironlake. |
1682 | */ |
1683 | if (info->gen < 5) |
1684 | mmio_size = 512*1024; |
1685 | else |
1686 | mmio_size = 2*1024*1024; |
1687 | |
1688 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); |
1689 | if (!dev_priv->regs) { |
1690 | DRM_ERROR("failed to map registers\n" ); |
1691 | ret = -EIO; |
1692 | goto put_bridge; |
1693 | } |
1694 | |
1695 | #ifdef __NetBSD__ |
1696 | dev_priv->regs_bst = dev_priv->dev->pdev->pd_resources[mmio_bar].bst; |
1697 | dev_priv->regs_bsh = dev_priv->dev->pdev->pd_resources[mmio_bar].bsh; |
1698 | #endif |
1699 | |
1700 | /* This must be called before any calls to HAS_PCH_* */ |
1701 | intel_detect_pch(dev); |
1702 | |
1703 | intel_uncore_init(dev); |
1704 | |
1705 | ret = i915_gem_gtt_init(dev); |
1706 | if (ret) |
1707 | goto out_regs; |
1708 | |
1709 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
1710 | i915_kick_out_firmware_fb(dev_priv); |
1711 | |
1712 | pci_set_master(dev->pdev); |
1713 | |
1714 | #ifndef __NetBSD__ /* Handled in i915_gem_gtt. */ |
1715 | /* overlay on gen2 is broken and can't address above 1G */ |
1716 | if (IS_GEN2(dev)) |
1717 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); |
1718 | |
1719 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) |
1720 | * using 32bit addressing, overwriting memory if HWS is located |
1721 | * above 4GB. |
1722 | * |
1723 | * The documentation also mentions an issue with undefined |
1724 | * behaviour if any general state is accessed within a page above 4GB, |
1725 | * which also needs to be handled carefully. |
1726 | */ |
1727 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
1728 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); |
1729 | #endif |
1730 | |
1731 | aperture_size = dev_priv->gtt.mappable_end; |
1732 | |
1733 | #ifdef __NetBSD__ |
1734 | dev_priv->gtt.mappable = |
1735 | drm_io_mapping_create_wc(dev, dev_priv->gtt.mappable_base, |
1736 | aperture_size); |
1737 | /* Note: mappable_end is the size, not end paddr, of the aperture. */ |
1738 | pmap_pv_track(dev_priv->gtt.mappable_base, dev_priv->gtt.mappable_end); |
1739 | #else |
1740 | dev_priv->gtt.mappable = |
1741 | io_mapping_create_wc(dev_priv->gtt.mappable_base, |
1742 | aperture_size); |
1743 | #endif |
1744 | if (dev_priv->gtt.mappable == NULL) { |
1745 | ret = -EIO; |
1746 | goto out_gtt; |
1747 | } |
1748 | |
1749 | dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, |
1750 | aperture_size); |
1751 | |
1752 | /* The i915 workqueue is primarily used for batched retirement of |
1753 | * requests (and thus managing bo) once the task has been completed |
1754 | * by the GPU. i915_gem_retire_requests() is called directly when we |
1755 | * need high-priority retirement, such as waiting for an explicit |
1756 | * bo. |
1757 | * |
1758 | * It is also used for periodic low-priority events, such as |
1759 | * idle-timers and recording error state. |
1760 | * |
1761 | * All tasks on the workqueue are expected to acquire the dev mutex |
1762 | * so there is no point in running more than one instance of the |
1763 | * workqueue at any time. Use an ordered one. |
1764 | */ |
1765 | dev_priv->wq = alloc_ordered_workqueue("i915" , 0); |
1766 | if (dev_priv->wq == NULL) { |
1767 | DRM_ERROR("Failed to create our workqueue.\n" ); |
1768 | ret = -ENOMEM; |
1769 | goto out_mtrrfree; |
1770 | } |
1771 | |
1772 | intel_irq_init(dev); |
1773 | intel_uncore_sanitize(dev); |
1774 | |
1775 | /* Try to make sure MCHBAR is enabled before poking at it */ |
1776 | intel_setup_mchbar(dev); |
1777 | intel_setup_gmbus(dev); |
1778 | intel_opregion_setup(dev); |
1779 | |
1780 | intel_setup_bios(dev); |
1781 | |
1782 | i915_gem_load(dev); |
1783 | |
1784 | /* On the 945G/GM, the chipset reports the MSI capability on the |
1785 | * integrated graphics even though the support isn't actually there |
1786 | * according to the published specs. It doesn't appear to function |
1787 | * correctly in testing on 945G. |
1788 | * This may be a side effect of MSI having been made available for PEG |
1789 | * and the registers being closely associated. |
1790 | * |
1791 | * According to chipset errata, on the 965GM, MSI interrupts may |
1792 | * be lost or delayed, but we use them anyways to avoid |
1793 | * stuck interrupts on some machines. |
1794 | */ |
1795 | if (!IS_I945G(dev) && !IS_I945GM(dev)) |
1796 | pci_enable_msi(dev->pdev); |
1797 | |
1798 | intel_device_info_runtime_init(dev); |
1799 | |
1800 | if (INTEL_INFO(dev)->num_pipes) { |
1801 | ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); |
1802 | if (ret) |
1803 | goto out_gem_unload; |
1804 | } |
1805 | |
1806 | intel_power_domains_init(dev_priv); |
1807 | |
1808 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1809 | ret = i915_load_modeset_init(dev); |
1810 | if (ret < 0) { |
1811 | DRM_ERROR("failed to init modeset\n" ); |
1812 | goto out_power_well; |
1813 | } |
1814 | } else { |
1815 | /* Start out suspended in ums mode. */ |
1816 | dev_priv->ums.mm_suspended = 1; |
1817 | } |
1818 | |
1819 | i915_setup_sysfs(dev); |
1820 | |
1821 | if (INTEL_INFO(dev)->num_pipes) { |
1822 | /* Must be done after probing outputs */ |
1823 | intel_opregion_init(dev); |
1824 | acpi_video_register(); |
1825 | } |
1826 | |
1827 | if (IS_GEN5(dev)) |
1828 | intel_gpu_ips_init(dev_priv); |
1829 | |
1830 | intel_init_runtime_pm(dev_priv); |
1831 | |
1832 | return 0; |
1833 | |
1834 | out_power_well: |
1835 | intel_power_domains_remove(dev_priv); |
1836 | drm_vblank_cleanup(dev); |
1837 | out_gem_unload: |
1838 | if (dev_priv->mm.inactive_shrinker.scan_objects) |
1839 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
1840 | #ifdef __NetBSD__ |
1841 | DRM_DESTROY_WAITQUEUE(&dev_priv->pending_flip_queue); |
1842 | spin_lock_destroy(&dev_priv->pending_flip_lock); |
1843 | DRM_DESTROY_WAITQUEUE(&dev_priv->gpu_error.reset_queue); |
1844 | spin_lock_destroy(&dev_priv->gpu_error.reset_lock); |
1845 | #endif |
1846 | |
1847 | if (dev->pdev->msi_enabled) |
1848 | pci_disable_msi(dev->pdev); |
1849 | |
1850 | intel_teardown_gmbus(dev); |
1851 | intel_teardown_mchbar(dev); |
1852 | pm_qos_remove_request(&dev_priv->pm_qos); |
1853 | destroy_workqueue(dev_priv->wq); |
1854 | out_mtrrfree: |
1855 | arch_phys_wc_del(dev_priv->gtt.mtrr); |
1856 | #ifdef __NetBSD__ |
1857 | /* Note: mappable_end is the size, not end paddr, of the aperture. */ |
1858 | pmap_pv_untrack(dev_priv->gtt.mappable_base, |
1859 | dev_priv->gtt.mappable_end); |
1860 | #endif |
1861 | io_mapping_free(dev_priv->gtt.mappable); |
1862 | out_gtt: |
1863 | list_del(&dev_priv->gtt.base.global_link); |
1864 | drm_mm_takedown(&dev_priv->gtt.base.mm); |
1865 | dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); |
1866 | out_regs: |
1867 | intel_uncore_fini(dev); |
1868 | intel_uncore_destroy(dev); |
1869 | pci_iounmap(dev->pdev, dev_priv->regs); |
1870 | put_bridge: |
1871 | pci_dev_put(dev_priv->bridge_dev); |
1872 | free_priv: |
1873 | /* XXX intel_pm_fini */ |
1874 | linux_mutex_destroy(&dev_priv->rps.hw_lock); |
1875 | if (dev_priv->slab) |
1876 | kmem_cache_destroy(dev_priv->slab); |
1877 | #ifdef __NetBSD__ |
1878 | spin_lock_destroy(&mchdev_lock); |
1879 | linux_mutex_destroy(&dev_priv->modeset_restore_lock); |
1880 | linux_mutex_destroy(&dev_priv->dpio_lock); |
1881 | spin_lock_destroy(&dev_priv->mm.object_stat_lock); |
1882 | spin_lock_destroy(&dev_priv->uncore.lock); |
1883 | spin_lock_destroy(&dev_priv->backlight_lock); |
1884 | spin_lock_destroy(&dev_priv->gpu_error.lock); |
1885 | spin_lock_destroy(&dev_priv->irq_lock); |
1886 | #endif |
1887 | kfree(dev_priv); |
1888 | return ret; |
1889 | } |
1890 | |
1891 | int i915_driver_unload(struct drm_device *dev) |
1892 | { |
1893 | struct drm_i915_private *dev_priv = dev->dev_private; |
1894 | int ret; |
1895 | |
1896 | ret = i915_gem_suspend(dev); |
1897 | if (ret) { |
1898 | DRM_ERROR("failed to idle hardware: %d\n" , ret); |
1899 | return ret; |
1900 | } |
1901 | |
1902 | intel_fini_runtime_pm(dev_priv); |
1903 | |
1904 | intel_gpu_ips_teardown(); |
1905 | |
1906 | /* The i915.ko module is still not prepared to be loaded when |
1907 | * the power well is not enabled, so just enable it in case |
1908 | * we're going to unload/reload. */ |
1909 | intel_display_set_init_power(dev_priv, true); |
1910 | intel_power_domains_remove(dev_priv); |
1911 | |
1912 | i915_teardown_sysfs(dev); |
1913 | |
1914 | if (dev_priv->mm.inactive_shrinker.scan_objects) |
1915 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
1916 | |
1917 | io_mapping_free(dev_priv->gtt.mappable); |
1918 | arch_phys_wc_del(dev_priv->gtt.mtrr); |
1919 | |
1920 | acpi_video_unregister(); |
1921 | |
1922 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1923 | intel_fbdev_fini(dev); |
1924 | intel_modeset_cleanup(dev); |
1925 | cancel_work_sync(&dev_priv->console_resume_work); |
1926 | |
1927 | /* |
1928 | * free the memory space allocated for the child device |
1929 | * config parsed from VBT |
1930 | */ |
1931 | if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { |
1932 | kfree(dev_priv->vbt.child_dev); |
1933 | dev_priv->vbt.child_dev = NULL; |
1934 | dev_priv->vbt.child_dev_num = 0; |
1935 | } |
1936 | |
1937 | #ifndef __NetBSD__ /* XXX vga */ |
1938 | vga_switcheroo_unregister_client(dev->pdev); |
1939 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
1940 | #endif |
1941 | } |
1942 | |
1943 | /* Free error state after interrupts are fully disabled. */ |
1944 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); |
1945 | #ifdef __NetBSD__ |
1946 | teardown_timer(&dev_priv->gpu_error.hangcheck_timer); |
1947 | #endif |
1948 | cancel_work_sync(&dev_priv->gpu_error.work); |
1949 | i915_destroy_error_state(dev); |
1950 | |
1951 | if (dev->pdev->msi_enabled) |
1952 | pci_disable_msi(dev->pdev); |
1953 | |
1954 | intel_opregion_fini(dev); |
1955 | |
1956 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1957 | /* Flush any outstanding unpin_work. */ |
1958 | flush_workqueue(dev_priv->wq); |
1959 | |
1960 | mutex_lock(&dev->struct_mutex); |
1961 | i915_gem_cleanup_ringbuffer(dev); |
1962 | i915_gem_context_fini(dev); |
1963 | WARN_ON(dev_priv->mm.aliasing_ppgtt); |
1964 | mutex_unlock(&dev->struct_mutex); |
1965 | i915_gem_cleanup_stolen(dev); |
1966 | |
1967 | if (!I915_NEED_GFX_HWS(dev)) |
1968 | i915_free_hws(dev); |
1969 | } |
1970 | |
1971 | list_del(&dev_priv->gtt.base.global_link); |
1972 | WARN_ON(!list_empty(&dev_priv->vm_list)); |
1973 | |
1974 | drm_vblank_cleanup(dev); |
1975 | |
1976 | intel_teardown_gmbus(dev); |
1977 | intel_teardown_mchbar(dev); |
1978 | |
1979 | destroy_workqueue(dev_priv->wq); |
1980 | pm_qos_remove_request(&dev_priv->pm_qos); |
1981 | |
1982 | dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); |
1983 | |
1984 | intel_uncore_fini(dev); |
1985 | intel_uncore_destroy(dev); |
1986 | if (dev_priv->regs != NULL) |
1987 | pci_iounmap(dev->pdev, dev_priv->regs); |
1988 | |
1989 | if (dev_priv->slab) |
1990 | kmem_cache_destroy(dev_priv->slab); |
1991 | |
1992 | pci_dev_put(dev_priv->bridge_dev); |
1993 | /* XXX intel_pm_fini */ |
1994 | linux_mutex_destroy(&dev_priv->rps.hw_lock); |
1995 | #ifdef __NetBSD__ |
1996 | DRM_DESTROY_WAITQUEUE(&dev_priv->pending_flip_queue); |
1997 | spin_lock_destroy(&dev_priv->pending_flip_lock); |
1998 | DRM_DESTROY_WAITQUEUE(&dev_priv->gpu_error.reset_queue); |
1999 | spin_lock_destroy(&mchdev_lock); |
2000 | linux_mutex_destroy(&dev_priv->modeset_restore_lock); |
2001 | linux_mutex_destroy(&dev_priv->dpio_lock); |
2002 | spin_lock_destroy(&dev_priv->mm.object_stat_lock); |
2003 | spin_lock_destroy(&dev_priv->uncore.lock); |
2004 | spin_lock_destroy(&dev_priv->backlight_lock); |
2005 | spin_lock_destroy(&dev_priv->gpu_error.lock); |
2006 | spin_lock_destroy(&dev_priv->irq_lock); |
2007 | #endif |
2008 | kfree(dev->dev_private); |
2009 | |
2010 | return 0; |
2011 | } |
2012 | |
2013 | int i915_driver_open(struct drm_device *dev, struct drm_file *file) |
2014 | { |
2015 | int ret; |
2016 | |
2017 | ret = i915_gem_open(dev, file); |
2018 | if (ret) |
2019 | return ret; |
2020 | |
2021 | return 0; |
2022 | } |
2023 | |
2024 | /** |
2025 | * i915_driver_lastclose - clean up after all DRM clients have exited |
2026 | * @dev: DRM device |
2027 | * |
2028 | * Take care of cleaning up after all DRM clients have exited. In the |
2029 | * mode setting case, we want to restore the kernel's initial mode (just |
2030 | * in case the last client left us in a bad state). |
2031 | * |
2032 | * Additionally, in the non-mode setting case, we'll tear down the GTT |
2033 | * and DMA structures, since the kernel won't be using them, and clea |
2034 | * up any GEM state. |
2035 | */ |
2036 | void i915_driver_lastclose(struct drm_device * dev) |
2037 | { |
2038 | struct drm_i915_private *dev_priv = dev->dev_private; |
2039 | |
2040 | /* On gen6+ we refuse to init without kms enabled, but then the drm core |
2041 | * goes right around and calls lastclose. Check for this and don't clean |
2042 | * up anything. */ |
2043 | if (!dev_priv) |
2044 | return; |
2045 | |
2046 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
2047 | intel_fbdev_restore_mode(dev); |
2048 | #ifndef __NetBSD__ /* XXX vga */ |
2049 | vga_switcheroo_process_delayed_switch(); |
2050 | #endif |
2051 | return; |
2052 | } |
2053 | |
2054 | i915_gem_lastclose(dev); |
2055 | |
2056 | i915_dma_cleanup(dev); |
2057 | } |
2058 | |
2059 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) |
2060 | { |
2061 | mutex_lock(&dev->struct_mutex); |
2062 | i915_gem_context_close(dev, file_priv); |
2063 | i915_gem_release(dev, file_priv); |
2064 | mutex_unlock(&dev->struct_mutex); |
2065 | } |
2066 | |
2067 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) |
2068 | { |
2069 | struct drm_i915_file_private *file_priv = file->driver_priv; |
2070 | |
2071 | #ifdef __NetBSD__ |
2072 | spin_lock_destroy(&file_priv->mm.lock); |
2073 | #endif |
2074 | |
2075 | kfree(file_priv); |
2076 | } |
2077 | |
2078 | const struct drm_ioctl_desc i915_ioctls[] = { |
2079 | DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2080 | DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), |
2081 | DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), |
2082 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), |
2083 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), |
2084 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), |
2085 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), |
2086 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2087 | DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), |
2088 | DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), |
2089 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2090 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), |
2091 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2092 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2093 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), |
2094 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), |
2095 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2096 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
2097 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), |
2098 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2099 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
2100 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
2101 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2102 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2103 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2104 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2105 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
2106 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
2107 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2108 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2109 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2110 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2111 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2112 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2113 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2114 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2115 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2116 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2117 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), |
2118 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2119 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
2120 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
2121 | DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
2122 | DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
2123 | DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2124 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2125 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2126 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2127 | DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2128 | }; |
2129 | |
2130 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); |
2131 | |
2132 | /* |
2133 | * This is really ugly: Because old userspace abused the linux agp interface to |
2134 | * manage the gtt, we need to claim that all intel devices are agp. For |
2135 | * otherwise the drm core refuses to initialize the agp support code. |
2136 | */ |
2137 | int i915_driver_device_is_agp(struct drm_device * dev) |
2138 | { |
2139 | return 1; |
2140 | } |
2141 | |